text
stringlengths
5.09k
178k
domain
stringclasses
106 values
from datetime import ( date, datetime, timedelta, ) from itertools import product import numpy as np import pytest import pandas as pd from pandas import ( Categorical, DataFrame, Grouper, Index, MultiIndex, Series, concat, date_range, ) import pandas._testing as tm from pandas.api.types import CategoricalDtype as CDT from pandas.core.reshape.pivot import pivot_table @pytest.fixture(params=[True, False]) def dropna(request): return request.param @pytest.fixture(params=[([0] * 4, [1] * 4), (range(0, 3), range(1, 4))]) def interval_values(request, closed): left, right = request.param return Categorical(pd.IntervalIndex.from_arrays(left, right, closed)) class TestPivotTable: def setup_method(self, method): self.data = DataFrame( { "A": [ "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar", "foo", "foo", "foo", ], "B": [ "one", "one", "one", "two", "one", "one", "one", "two", "two", "two", "one", ], "C": [ "dull", "dull", "shiny", "dull", "dull", "shiny", "shiny", "dull", "shiny", "shiny", "shiny", ], "D": np.random.randn(11), "E": np.random.randn(11), "F": np.random.randn(11), } ) def test_pivot_table(self, observed): index = ["A", "B"] columns = "C" table = pivot_table( self.data, values="D", index=index, columns=columns, observed=observed ) table2 = self.data.pivot_table( values="D", index=index, columns=columns, observed=observed ) tm.assert_frame_equal(table, table2) # this works pivot_table(self.data, values="D", index=index, observed=observed) if len(index) > 1: assert table.index.names == tuple(index) else: assert table.index.name == index[0] if len(columns) > 1: assert table.columns.names == columns else: assert table.columns.name == columns[0] expected = self.data.groupby(index + [columns])["D"].agg(np.mean).unstack() tm.assert_frame_equal(table, expected) def test_pivot_table_categorical_observed_equal(self, observed): # issue #24923 df = DataFrame( {"col1": list("abcde"), "col2": list("fghij"), "col3": [1, 2, 3, 4, 5]} ) expected = df.pivot_table( index="col1", values="col3", columns="col2", aggfunc=np.sum, fill_value=0 ) expected.index = expected.index.astype("category") expected.columns = expected.columns.astype("category") df.col1 = df.col1.astype("category") df.col2 = df.col2.astype("category") result = df.pivot_table( index="col1", values="col3", columns="col2", aggfunc=np.sum, fill_value=0, observed=observed, ) tm.assert_frame_equal(result, expected) def test_pivot_table_nocols(self): df = DataFrame( {"rows": ["a", "b", "c"], "cols": ["x", "y", "z"], "values": [1, 2, 3]} ) rs = df.pivot_table(columns="cols", aggfunc=np.sum) xp = df.pivot_table(index="cols", aggfunc=np.sum).T tm.assert_frame_equal(rs, xp) rs = df.pivot_table(columns="cols", aggfunc={"values": "mean"}) xp = df.pivot_table(index="cols", aggfunc={"values": "mean"}).T tm.assert_frame_equal(rs, xp) def test_pivot_table_dropna(self): df = DataFrame( { "amount": {0: 60000, 1: 100000, 2: 50000, 3: 30000}, "customer": {0: "A", 1: "A", 2: "B", 3: "C"}, "month": {0: 201307, 1: 201309, 2: 201308, 3: 201310}, "product": {0: "a", 1: "b", 2: "c", 3: "d"}, "quantity": {0: 2000000, 1: 500000, 2: 1000000, 3: 1000000}, } ) pv_col = df.pivot_table( "quantity", "month", ["customer", "product"], dropna=False ) pv_ind = df.pivot_table( "quantity", ["customer", "product"], "month", dropna=False ) m = MultiIndex.from_tuples( [ ("A", "a"), ("A", "b"), ("A", "c"), ("A", "d"), ("B", "a"), ("B", "b"), ("B", "c"), ("B", "d"), ("C", "a"), ("C", "b"), ("C", "c"), ("C", "d"), ], names=["customer", "product"], ) tm.assert_index_equal(pv_col.columns, m) tm.assert_index_equal(pv_ind.index, m) def test_pivot_table_categorical(self): cat1 = Categorical( ["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True ) cat2 = Categorical( ["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True ) df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) result = pivot_table(df, values="values", index=["A", "B"], dropna=True) exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"]) expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index) tm.assert_frame_equal(result, expected) def test_pivot_table_dropna_categoricals(self, dropna): # GH 15193 categories = ["a", "b", "c", "d"] df = DataFrame( { "A": ["a", "a", "a", "b", "b", "b", "c", "c", "c"], "B": [1, 2, 3, 1, 2, 3, 1, 2, 3], "C": range(0, 9), } ) df["A"] = df["A"].astype(CDT(categories, ordered=False)) result = df.pivot_table(index="B", columns="A", values="C", dropna=dropna) expected_columns = Series(["a", "b", "c"], name="A") expected_columns = expected_columns.astype(CDT(categories, ordered=False)) expected_index = Series([1, 2, 3], name="B") expected = DataFrame( [[0, 3, 6], [1, 4, 7], [2, 5, 8]], index=expected_index, columns=expected_columns, ) if not dropna: # add back the non observed to compare expected = expected.reindex(columns=Categorical(categories)).astype("float") tm.assert_frame_equal(result, expected) def test_pivot_with_non_observable_dropna(self, dropna): # gh-21133 df = DataFrame( { "A": Categorical( [np.nan, "low", "high", "low", "high"], categories=["low", "high"], ordered=True, ), "B": [0.0, 1.0, 2.0, 3.0, 4.0], } ) result = df.pivot_table(index="A", values="B", dropna=dropna) expected = DataFrame( {"B": [2.0, 3.0]}, index=Index( Categorical.from_codes( [0, 1], categories=["low", "high"], ordered=True ), name="A", ), ) tm.assert_frame_equal(result, expected) # gh-21378 df = DataFrame( { "A": Categorical( ["left", "low", "high", "low", "high"], categories=["low", "high", "left"], ordered=True, ), "B": range(5), } ) result = df.pivot_table(index="A", values="B", dropna=dropna) expected = DataFrame( {"B": [2, 3, 0]}, index=Index( Categorical.from_codes( [0, 1, 2], categories=["low", "high", "left"], ordered=True ), name="A", ), ) if not dropna: expected["B"] = expected["B"].astype(float) tm.assert_frame_equal(result, expected) def test_pivot_with_interval_index(self, interval_values, dropna): # GH 25814 df = DataFrame({"A": interval_values, "B": 1}) result = df.pivot_table(index="A", values="B", dropna=dropna) expected = DataFrame({"B": 1}, index=Index(interval_values.unique(), name="A")) if not dropna: expected = expected.astype(float) tm.assert_frame_equal(result, expected) def test_pivot_with_interval_index_margins(self): # GH 25815 ordered_cat = pd.IntervalIndex.from_arrays([0, 0, 1, 1], [1, 1, 2, 2]) df = DataFrame( { "A": np.arange(4, 0, -1, dtype=np.intp), "B": ["a", "b", "a", "b"], "C": Categorical(ordered_cat, ordered=True).sort_values( ascending=False ), } ) pivot_tab = pivot_table( df, index="C", columns="B", values="A", aggfunc="sum", margins=True ) result = pivot_tab["All"] expected = Series( [3, 7, 10], index=Index([pd.Interval(0, 1), pd.Interval(1, 2), "All"], name="C"), name="All", dtype=np.intp, ) tm.assert_series_equal(result, expected) def test_pass_array(self): result = self.data.pivot_table("D", index=self.data.A, columns=self.data.C) expected = self.data.pivot_table("D", index="A", columns="C") tm.assert_frame_equal(result, expected) def test_pass_function(self): result = self.data.pivot_table("D", index=lambda x: x // 5, columns=self.data.C) expected = self.data.pivot_table("D", index=self.data.index // 5, columns="C") tm.assert_frame_equal(result, expected) def test_pivot_table_multiple(self): index = ["A", "B"] columns = "C" table = pivot_table(self.data, index=index, columns=columns) expected = self.data.groupby(index + [columns]).agg(np.mean).unstack() tm.assert_frame_equal(table, expected) def test_pivot_dtypes(self): # can convert dtypes f = DataFrame( { "a": ["cat", "bat", "cat", "bat"], "v": [1, 2, 3, 4], "i": ["a", "b", "a", "b"], } ) assert f.dtypes["v"] == "int64" z = pivot_table( f, values="v", index=["a"], columns=["i"], fill_value=0, aggfunc=np.sum ) result = z.dtypes expected = Series([np.dtype("int64")] * 2, index=Index(list("ab"), name="i")) tm.assert_series_equal(result, expected) # cannot convert dtypes f = DataFrame( { "a": ["cat", "bat", "cat", "bat"], "v": [1.5, 2.5, 3.5, 4.5], "i": ["a", "b", "a", "b"], } ) assert f.dtypes["v"] == "float64" z = pivot_table( f, values="v", index=["a"], columns=["i"], fill_value=0, aggfunc=np.mean ) result = z.dtypes expected = Series([np.dtype("float64")] * 2, index=Index(list("ab"), name="i")) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( "columns,values", [ ("bool1", ["float1", "float2"]), ("bool1", ["float1", "float2", "bool1"]), ("bool2", ["float1", "float2", "bool1"]), ], ) def test_pivot_preserve_dtypes(self, columns, values): # GH 7142 regression test v = np.arange(5, dtype=np.float64) df = DataFrame( {"float1": v, "float2": v + 2.0, "bool1": v <= 2, "bool2": v <= 3} ) df_res = df.reset_index().pivot_table( index="index", columns=columns, values=values ) result = dict(df_res.dtypes) expected = {col: np.dtype("float64") for col in df_res} assert result == expected def test_pivot_no_values(self): # GH 14380 idx = pd.DatetimeIndex( ["2011-01-01", "2011-02-01", "2011-01-02", "2011-01-01", "2011-01-02"] ) df = DataFrame({"A": [1, 2, 3, 4, 5]}, index=idx) res = df.pivot_table(index=df.index.month, columns=df.index.day) exp_columns = MultiIndex.from_tuples([("A", 1), ("A", 2)]) exp = DataFrame([[2.5, 4.0], [2.0, np.nan]], index=[1, 2], columns=exp_columns) tm.assert_frame_equal(res, exp) df = DataFrame( { "A": [1, 2, 3, 4, 5], "dt": date_range("2011-01-01", freq="D", periods=5), }, index=idx, ) res = df.pivot_table(index=df.index.month, columns=Grouper(key="dt", freq="M")) exp_columns = MultiIndex.from_tuples([("A", pd.Timestamp("2011-01-31"))]) exp_columns.names = [None, "dt"] exp = DataFrame([3.25, 2.0], index=[1, 2], columns=exp_columns) tm.assert_frame_equal(res, exp) res = df.pivot_table( index=Grouper(freq="A"), columns=Grouper(key="dt", freq="M") ) exp = DataFrame( [3], index=pd.DatetimeIndex(["2011-12-31"], freq="A"), columns=exp_columns ) tm.assert_frame_equal(res, exp) def test_pivot_multi_values(self): result = pivot_table( self.data, values=["D", "E"], index="A", columns=["B", "C"], fill_value=0 ) expected = pivot_table( self.data.drop(["F"], axis=1), index="A", columns=["B", "C"], fill_value=0 ) tm.assert_frame_equal(result, expected) def test_pivot_multi_functions(self): f = lambda func: pivot_table( self.data, values=["D", "E"], index=["A", "B"], columns="C", aggfunc=func ) result = f([np.mean, np.std]) means = f(np.mean) stds = f(np.std) expected = concat([means, stds], keys=["mean", "std"], axis=1) tm.assert_frame_equal(result, expected) # margins not supported?? f = lambda func: pivot_table( self.data, values=["D", "E"], index=["A", "B"], columns="C", aggfunc=func, margins=True, ) result = f([np.mean, np.std]) means = f(np.mean) stds = f(np.std) expected = concat([means, stds], keys=["mean", "std"], axis=1) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("method", [True, False]) def test_pivot_index_with_nan(self, method): # GH 3588 nan = np.nan df = DataFrame( { "a": ["R1", "R2", nan, "R4"], "b": ["C1", "C2", "C3", "C4"], "c": [10, 15, 17, 20], } ) if method: result = df.pivot("a", "b", "c") else: result = pd.pivot(df, "a", "b", "c") expected = DataFrame( [ [nan, nan, 17, nan], [10, nan, nan, nan], [nan, 15, nan, nan], [nan, nan, nan, 20], ], index=Index([nan, "R1", "R2", "R4"], name="a"), columns=Index(["C1", "C2", "C3", "C4"], name="b"), ) tm.assert_frame_equal(result, expected) tm.assert_frame_equal(df.pivot("b", "a", "c"), expected.T) # GH9491 df = DataFrame( { "a": date_range("2014-02-01", periods=6, freq="D"), "c": 100 + np.arange(6), } ) df["b"] = df["a"] - pd.Timestamp("2014-02-02") df.loc[1, "a"] = df.loc[3, "a"] = nan df.loc[1, "b"] = df.loc[4, "b"] = nan if method: pv = df.pivot("a", "b", "c") else: pv = pd.pivot(df, "a", "b", "c") assert pv.notna().values.sum() == len(df) for _, row in df.iterrows(): assert pv.loc[row["a"], row["b"]] == row["c"] if method: result = df.pivot("b", "a", "c") else: result = pd.pivot(df, "b", "a", "c") tm.assert_frame_equal(result, pv.T) @pytest.mark.parametrize("method", [True, False]) def test_pivot_with_tz(self, method): # GH 5878 df = DataFrame( { "dt1": [ datetime(2013, 1, 1, 9, 0), datetime(2013, 1, 2, 9, 0), datetime(2013, 1, 1, 9, 0), datetime(2013, 1, 2, 9, 0), ], "dt2": [ datetime(2014, 1, 1, 9, 0), datetime(2014, 1, 1, 9, 0), datetime(2014, 1, 2, 9, 0), datetime(2014, 1, 2, 9, 0), ], "data1": np.arange(4, dtype="int64"), "data2": np.arange(4, dtype="int64"), } ) df["dt1"] = df["dt1"].apply(lambda d: pd.Timestamp(d, tz="US/Pacific")) df["dt2"] = df["dt2"].apply(lambda d: pd.Timestamp(d, tz="Asia/Tokyo")) exp_col1 = Index(["data1", "data1", "data2", "data2"]) exp_col2 = pd.DatetimeIndex( ["2014/01/01 09:00", "2014/01/02 09:00"] * 2, name="dt2", tz="Asia/Tokyo" ) exp_col = MultiIndex.from_arrays([exp_col1, exp_col2]) expected = DataFrame( [[0, 2, 0, 2], [1, 3, 1, 3]], index=pd.DatetimeIndex( ["2013/01/01 09:00", "2013/01/02 09:00"], name="dt1", tz="US/Pacific" ), columns=exp_col, ) if method: pv = df.pivot(index="dt1", columns="dt2") else: pv = pd.pivot(df, index="dt1", columns="dt2") tm.assert_frame_equal(pv, expected) expected = DataFrame( [[0, 2], [1, 3]], index=pd.DatetimeIndex( ["2013/01/01 09:00", "2013/01/02 09:00"], name="dt1", tz="US/Pacific" ), columns=pd.DatetimeIndex( ["2014/01/01 09:00", "2014/01/02 09:00"], name="dt2", tz="Asia/Tokyo" ), ) if method: pv = df.pivot(index="dt1", columns="dt2", values="data1") else: pv = pd.pivot(df, index="dt1", columns="dt2", values="data1") tm.assert_frame_equal(pv, expected) def test_pivot_tz_in_values(self): # GH 14948 df = DataFrame( [ { "uid": "aa", "ts": pd.Timestamp("2016-08-12 13:00:00-0700", tz="US/Pacific"), }, { "uid": "aa", "ts": pd.Timestamp("2016-08-12 08:00:00-0700", tz="US/Pacific"), }, { "uid": "aa", "ts": pd.Timestamp("2016-08-12 14:00:00-0700", tz="US/Pacific"), }, { "uid": "aa", "ts": pd.Timestamp("2016-08-25 11:00:00-0700", tz="US/Pacific"), }, { "uid": "aa", "ts": pd.Timestamp("2016-08-25 13:00:00-0700", tz="US/Pacific"), }, ] ) df = df.set_index("ts").reset_index() mins = df.ts.map(lambda x: x.replace(hour=0, minute=0, second=0, microsecond=0)) result = pivot_table( df.set_index("ts").reset_index(), values="ts", index=["uid"], columns=[mins], aggfunc=np.min, ) expected = DataFrame( [ [ pd.Timestamp("2016-08-12 08:00:00-0700", tz="US/Pacific"), pd.Timestamp("2016-08-25 11:00:00-0700", tz="US/Pacific"), ] ], index=Index(["aa"], name="uid"), columns=pd.DatetimeIndex( [ pd.Timestamp("2016-08-12 00:00:00", tz="US/Pacific"), pd.Timestamp("2016-08-25 00:00:00", tz="US/Pacific"), ], name="ts", ), ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("method", [True, False]) def test_pivot_periods(self, method): df = DataFrame( { "p1": [ pd.Period("2013-01-01", "D"), pd.Period("2013-01-02", "D"), pd.Period("2013-01-01", "D"), pd.Period("2013-01-02", "D"), ], "p2": [ pd.Period("2013-01", "M"), pd.Period("2013-01", "M"), pd.Period("2013-02", "M"), pd.Period("2013-02", "M"), ], "data1": np.arange(4, dtype="int64"), "data2": np.arange(4, dtype="int64"), } ) exp_col1 = Index(["data1", "data1", "data2", "data2"]) exp_col2 = pd.PeriodIndex(["2013-01", "2013-02"] * 2, name="p2", freq="M") exp_col = MultiIndex.from_arrays([exp_col1, exp_col2]) expected = DataFrame( [[0, 2, 0, 2], [1, 3, 1, 3]], index=pd.PeriodIndex(["2013-01-01", "2013-01-02"], name="p1", freq="D"), columns=exp_col, ) if method: pv = df.pivot(index="p1", columns="p2") else: pv = pd.pivot(df, index="p1", columns="p2") tm.assert_frame_equal(pv, expected) expected = DataFrame( [[0, 2], [1, 3]], index=pd.PeriodIndex(["2013-01-01", "2013-01-02"], name="p1", freq="D"), columns=pd.PeriodIndex(["2013-01", "2013-02"], name="p2", freq="M"), ) if method: pv = df.pivot(index="p1", columns="p2", values="data1") else: pv = pd.pivot(df, index="p1", columns="p2", values="data1") tm.assert_frame_equal(pv, expected) def test_pivot_periods_with_margins(self): # GH 28323 df = DataFrame( { "a": [1, 1, 2, 2], "b": [ pd.Period("2019Q1"), pd.Period("2019Q2"), pd.Period("2019Q1"), pd.Period("2019Q2"), ], "x": 1.0, } ) expected = DataFrame( data=1.0, index=Index([1, 2, "All"], name="a"), columns=Index([pd.Period("2019Q1"), pd.Period("2019Q2"), "All"], name="b"), ) result = df.pivot_table(index="a", columns="b", values="x", margins=True) tm.assert_frame_equal(expected, result) @pytest.mark.parametrize( "values", [ ["baz", "zoo"], np.array(["baz", "zoo"]), Series(["baz", "zoo"]), Index(["baz", "zoo"]), ], ) @pytest.mark.parametrize("method", [True, False]) def test_pivot_with_list_like_values(self, values, method): # issue #17160 df = DataFrame( { "foo": ["one", "one", "one", "two", "two", "two"], "bar": ["A", "B", "C", "A", "B", "C"], "baz": [1, 2, 3, 4, 5, 6], "zoo": ["x", "y", "z", "q", "w", "t"], } ) if method: result = df.pivot(index="foo", columns="bar", values=values) else: result = pd.pivot(df, index="foo", columns="bar", values=values) data = [[1, 2, 3, "x", "y", "z"], [4, 5, 6, "q", "w", "t"]] index = Index(data=["one", "two"], name="foo") columns = MultiIndex( levels=[["baz", "zoo"], ["A", "B", "C"]], codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], names=[None, "bar"], ) expected = DataFrame(data=data, index=index, columns=columns, dtype="object") tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "values", [ ["bar", "baz"], np.array(["bar", "baz"]), Series(["bar", "baz"]), Index(["bar", "baz"]), ], ) @pytest.mark.parametrize("method", [True, False]) def test_pivot_with_list_like_values_nans(self, values, method): # issue #17160 df = DataFrame( { "foo": ["one", "one", "one", "two", "two", "two"], "bar": ["A", "B", "C", "A", "B", "C"], "baz": [1, 2, 3, 4, 5, 6], "zoo": ["x", "y", "z", "q", "w", "t"], } ) if method: result = df.pivot(index="zoo", columns="foo", values=values) else: result = pd.pivot(df, index="zoo", columns="foo", values=values) data = [ [np.nan, "A", np.nan, 4], [np.nan, "C", np.nan, 6], [np.nan, "B", np.nan, 5], ["A", np.nan, 1, np.nan], ["B", np.nan, 2, np.nan], ["C", np.nan, 3, np.nan], ] index = Index(data=["q", "t", "w", "x", "y", "z"], name="zoo") columns = MultiIndex( levels=[["bar", "baz"], ["one", "two"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[None, "foo"], ) expected = DataFrame(data=data, index=index, columns=columns, dtype="object") tm.assert_frame_equal(result, expected) def test_pivot_columns_none_raise_error(self): # GH 30924 df = DataFrame({"col1": ["a", "b", "c"], "col2": [1, 2, 3], "col3": [1, 2, 3]}) msg = r"pivot\(\) missing 1 required argument: 'columns'" with pytest.raises(TypeError, match=msg): df.pivot(index="col1", values="col3") @pytest.mark.xfail( reason="MultiIndexed unstack with tuple names fails with KeyError GH#19966" ) @pytest.mark.parametrize("method", [True, False]) def test_pivot_with_multiindex(self, method): # issue #17160 index = Index(data=[0, 1, 2, 3, 4, 5]) data = [ ["one", "A", 1, "x"], ["one", "B", 2, "y"], ["one", "C", 3, "z"], ["two", "A", 4, "q"], ["two", "B", 5, "w"], ["two", "C", 6, "t"], ] columns = MultiIndex( levels=[["bar", "baz"], ["first", "second"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], ) df = DataFrame(data=data, index=index, columns=columns, dtype="object") if method: result = df.pivot( index=("bar", "first"), columns=("bar", "second"), values=("baz", "first"), ) else: result = pd.pivot( df, index=("bar", "first"), columns=("bar", "second"), values=("baz", "first"), ) data = { "A": Series([1, 4], index=["one", "two"]), "B": Series([2, 5], index=["one", "two"]), "C": Series([3, 6], index=["one", "two"]), } expected = DataFrame(data) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("method", [True, False]) def test_pivot_with_tuple_of_values(self, method): # issue #17160 df = DataFrame( { "foo": ["one", "one", "one", "two", "two", "two"], "bar": ["A", "B", "C", "A", "B", "C"], "baz": [1, 2, 3, 4, 5, 6], "zoo": ["x", "y", "z", "q", "w", "t"], } ) with pytest.raises(KeyError, match=r"^\('bar', 'baz'\)$"): # tuple is seen as a single column name if method: df.pivot(index="zoo", columns="foo", values=("bar", "baz")) else: pd.pivot(df, index="zoo", columns="foo", values=("bar", "baz")) def test_margins(self): def _check_output( result, values_col, index=["A", "B"], columns=["C"], margins_col="All" ): col_margins = result.loc[result.index[:-1], margins_col] expected_col_margins = self.data.groupby(index)[values_col].mean() tm.assert_series_equal(col_margins, expected_col_margins, check_names=False) assert col_margins.name == margins_col result = result.sort_index() index_margins = result.loc[(margins_col, "")].iloc[:-1] expected_ix_margins = self.data.groupby(columns)[values_col].mean() tm.assert_series_equal( index_margins, expected_ix_margins, check_names=False ) assert index_margins.name == (margins_col, "") grand_total_margins = result.loc[(margins_col, ""), margins_col] expected_total_margins = self.data[values_col].mean() assert grand_total_margins == expected_total_margins # column specified result = self.data.pivot_table( values="D", index=["A", "B"], columns="C", margins=True, aggfunc=np.mean ) _check_output(result, "D") # Set a different margins_name (not 'All') result = self.data.pivot_table( values="D", index=["A", "B"], columns="C", margins=True, aggfunc=np.mean, margins_name="Totals", ) _check_output(result, "D", margins_col="Totals") # no column specified table = self.data.pivot_table( index=["A", "B"], columns="C", margins=True, aggfunc=np.mean ) for value_col in table.columns.levels[0]: _check_output(table[value_col], value_col) # no col # to help with a buglet self.data.columns = [k * 2 for k in self.data.columns] table = self.data.pivot_table(index=["AA", "BB"], margins=True, aggfunc=np.mean) for value_col in table.columns: totals = table.loc[("All", ""), value_col] assert totals == self.data[value_col].mean() table = self.data.pivot_table(index=["AA", "BB"], margins=True, aggfunc="mean") for item in ["DD", "EE", "FF"]: totals = table.loc[("All", ""), item] assert totals == self.data[item].mean() @pytest.mark.parametrize( "columns, aggfunc, values, expected_columns", [ ( "A", np.mean, [[5.5, 5.5, 2.2, 2.2], [8.0, 8.0, 4.4, 4.4]], Index(["bar", "All", "foo", "All"], name="A"), ), ( ["A", "B"], "sum", [[9, 13, 22, 5, 6, 11], [14, 18, 32, 11, 11, 22]], MultiIndex.from_tuples( [ ("bar", "one"), ("bar", "two"), ("bar", "All"), ("foo", "one"), ("foo", "two"), ("foo", "All"), ], names=["A", "B"], ), ), ], ) def test_margin_with_only_columns_defined( self, columns, aggfunc, values, expected_columns ): # GH 31016 df = DataFrame( { "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], "C": [ "small", "large", "large", "small", "small", "large", "small", "small", "large", ], "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], } ) result = df.pivot_table(columns=columns, margins=True, aggfunc=aggfunc) expected = DataFrame(values, index=Index(["D", "E"]), columns=expected_columns) tm.assert_frame_equal(result, expected) def test_margins_dtype(self): # GH 17013 df = self.data.copy() df[["D", "E", "F"]] = np.arange(len(df) * 3).reshape(len(df), 3).astype("i8") mi_val = list(product(["bar", "foo"], ["one", "two"])) + [("All", "")] mi = MultiIndex.from_tuples(mi_val, names=("A", "B")) expected = DataFrame( {"dull": [12, 21, 3, 9, 45], "shiny": [33, 0, 36, 51, 120]}, index=mi ).rename_axis("C", axis=1) expected["All"] = expected["dull"] + expected["shiny"] result = df.pivot_table( values="D", index=["A", "B"], columns="C", margins=True, aggfunc=np.sum, fill_value=0, ) tm.assert_frame_equal(expected, result) def test_margins_dtype_len(self): mi_val = list(product(["bar", "foo"], ["one", "two"])) + [("All", "")] mi = MultiIndex.from_tuples(mi_val, names=("A", "B")) expected = DataFrame( {"dull": [1, 1, 2, 1, 5], "shiny": [2, 0, 2, 2, 6]}, index=mi ).rename_axis("C", axis=1) expected["All"] = expected["dull"] + expected["shiny"] result = self.data.pivot_table( values="D", index=["A", "B"], columns="C", margins=True, aggfunc=len, fill_value=0, ) tm.assert_frame_equal(expected, result) @pytest.mark.parametrize("cols", [(1, 2), ("a", "b"), (1, "b"), ("a", 1)]) def test_pivot_table_multiindex_only(self, cols): # GH 17038 df2 = DataFrame({cols[0]: [1, 2, 3], cols[1]: [1, 2, 3], "v": [4, 5, 6]}) result = df2.pivot_table(values="v", columns=cols) expected = DataFrame( [[4, 5, 6]], columns=MultiIndex.from_tuples([(1, 1), (2, 2), (3, 3)], names=cols), index=Index(["v"]), ) tm.assert_frame_equal(result, expected) def test_pivot_table_retains_tz(self): dti = date_range("2016-01-01", periods=3, tz="Europe/Amsterdam") df = DataFrame({"A": np.random.randn(3), "B": np.random.randn(3), "C": dti}) result = df.pivot_table(index=["B", "C"], dropna=False) # check tz retention assert result.index.levels[1].equals(dti) def test_pivot_integer_columns(self): # caused by upstream bug in unstack d = date.min data = list( product( ["foo", "bar"], ["A", "B", "C"], ["x1", "x2"], [d + timedelta(i) for i in range(20)], [1.0], ) ) df = DataFrame(data) table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2]) df2 = df.rename(columns=str) table2 = df2.pivot_table(values="4", index=["0", "1", "3"], columns=["2"]) tm.assert_frame_equal(table, table2, check_names=False) def test_pivot_no_level_overlap(self): # GH #1181 data = DataFrame( { "a": ["a", "a", "a", "a", "b", "b", "b", "b"] * 2, "b": [0, 0, 0, 0, 1, 1, 1, 1] * 2, "c": (["foo"] * 4 + ["bar"] * 4) * 2, "value": np.random.randn(16), } ) table = data.pivot_table("value", index="a", columns=["b", "c"]) grouped = data.groupby(["a", "b", "c"])["value"].mean() expected = grouped.unstack("b").unstack("c").dropna(axis=1, how="all") tm.assert_frame_equal(table, expected) def test_pivot_columns_lexsorted(self): n = 10000 dtype = np.dtype( [ ("Index", object), ("Symbol", object), ("Year", int), ("Month", int), ("Day", int), ("Quantity", int), ("Price", float), ] ) products = np.array( [ ("SP500", "ADBE"), ("SP500", "NVDA"), ("SP500", "ORCL"), ("NDQ100", "AAPL"), ("NDQ100", "MSFT"), ("NDQ100", "GOOG"), ("FTSE", "DGE.L"), ("FTSE", "TSCO.L"), ("FTSE", "GSK.L"), ], dtype=[("Index", object), ("Symbol", object)], ) items = np.empty(n, dtype=dtype) iproduct = np.random.randint(0, len(products), n) items["Index"] = products["Index"][iproduct] items["Symbol"] = products["Symbol"][iproduct] dr = date_range(date(2000, 1, 1), date(2010, 12, 31)) dates = dr[np.random.randint(0, len(dr), n)] items["Year"] = dates.year items["Month"] = dates.month items["Day"] = dates.day items["Price"] = np.random.lognormal(4.0, 2.0, n) df = DataFrame(items) pivoted = df.pivot_table( "Price", index=["Month", "Day"], columns=["Index", "Symbol", "Year"], aggfunc="mean", ) assert pivoted.columns.is_monotonic def test_pivot_complex_aggfunc(self): f = {"D": ["std"], "E": ["sum"]} expected = self.data.groupby(["A", "B"]).agg(f).unstack("B") result = self.data.pivot_table(index="A", columns="B", aggfunc=f) tm.assert_frame_equal(result, expected) def test_margins_no_values_no_cols(self): # Regression test on pivot table: no values or cols passed. result = self.data[["A", "B"]].pivot_table( index=["A", "B"], aggfunc=len, margins=True ) result_list = result.tolist() assert sum(result_list[:-1]) == result_list[-1] def test_margins_no_values_two_rows(self): # Regression test on pivot table: no values passed but rows are a # multi-index result = self.data[["A", "B", "C"]].pivot_table( index=["A", "B"], columns="C", aggfunc=len, margins=True ) assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0] def test_margins_no_values_one_row_one_col(self): # Regression test on pivot table: no values passed but row and col # defined result = self.data[["A", "B"]].pivot_table( index="A", columns="B", aggfunc=len, margins=True ) assert result.All.tolist() == [4.0, 7.0, 11.0] def test_margins_no_values_two_row_two_cols(self): # Regression test on pivot table: no values passed but rows and cols # are multi-indexed self.data["D"] = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"] result = self.data[["A", "B", "C", "D"]].pivot_table( index=["A", "B"], columns=["C", "D"], aggfunc=len, margins=True ) assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0] @pytest.mark.parametrize("margin_name", ["foo", "one", 666, None, ["a", "b"]]) def test_pivot_table_with_margins_set_margin_name(self, margin_name): # see gh-3335 msg = ( f'Conflicting name "{margin_name}" in margins|' "margins_name argument must be a string" ) with pytest.raises(ValueError, match=msg): # multi-index index pivot_table( self.data, values="D", index=["A", "B"], columns=["C"], margins=True, margins_name=margin_name, ) with pytest.raises(ValueError, match=msg): # multi-index column pivot_table( self.data, values="D", index=["C"], columns=["A", "B"], margins=True, margins_name=margin_name, ) with pytest.raises(ValueError, match=msg): # non-multi-index index/column pivot_table( self.data, values="D", index=["A"], columns=["B"], margins=True, margins_name=margin_name, ) def test_pivot_timegrouper(self, using_array_manager): df = DataFrame( { "Branch": "A A A A A A A B".split(), "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(), "Quantity": [1, 3, 5, 1, 8, 1, 9, 3], "Date": [ datetime(2013, 1, 1), datetime(2013, 1, 1), datetime(2013, 10, 1), datetime(2013, 10, 2), datetime(2013, 10, 1), datetime(2013, 10, 2), datetime(2013, 12, 2), datetime(2013, 12, 2), ], } ).set_index("Date") expected = DataFrame( np.array([10, 18, 3], dtype="int64").reshape(1, 3), index=pd.DatetimeIndex([datetime(2013, 12, 31)], freq="A"), columns="Carl Joe Mark".split(), ) expected.index.name = "Date" expected.columns.name = "Buyer" result = pivot_table( df, index=Grouper(freq="A"), columns="Buyer", values="Quantity", aggfunc=np.sum, ) tm.assert_frame_equal(result, expected) result = pivot_table( df, index="Buyer", columns=Grouper(freq="A"), values="Quantity", aggfunc=np.sum, ) tm.assert_frame_equal(result, expected.T) expected = DataFrame( np.array([1, np.nan, 3, 9, 18, np.nan]).reshape(2, 3), index=pd.DatetimeIndex( [datetime(2013, 1, 1), datetime(2013, 7, 1)], freq="6MS" ), columns="Carl Joe Mark".split(), ) expected.index.name = "Date" expected.columns.name = "Buyer" if using_array_manager: # INFO(ArrayManager) column without NaNs can preserve int dtype expected["Carl"] = expected["Carl"].astype("int64") result = pivot_table( df, index=Grouper(freq="6MS"), columns="Buyer", values="Quantity", aggfunc=np.sum, ) tm.assert_frame_equal(result, expected) result = pivot_table( df, index="Buyer", columns=Grouper(freq="6MS"), values="Quantity", aggfunc=np.sum, ) tm.assert_frame_equal(result, expected.T) # passing the name df = df.reset_index() result = pivot_table( df, index=Grouper(freq="6MS", key="Date"), columns="Buyer", values="Quantity", aggfunc=np.sum, ) tm.assert_frame_equal(result, expected) result = pivot_table( df, index="Buyer", columns=Grouper(freq="6MS", key="Date"), values="Quantity", aggfunc=np.sum, ) tm.assert_frame_equal(result, expected.T) msg = "'The grouper name foo is not found'" with pytest.raises(KeyError, match=msg): pivot_table( df, index=Grouper(freq="6MS", key="foo"), columns="Buyer", values="Quantity", aggfunc=np.sum, ) with pytest.raises(KeyError, match=msg): pivot_table( df, index="Buyer", columns=Grouper(freq="6MS", key="foo"), values="Quantity", aggfunc=np.sum, ) # passing the level df = df.set_index("Date") result = pivot_table( df, index=Grouper(freq="6MS", level="Date"), columns="Buyer", values="Quantity", aggfunc=np.sum, ) tm.assert_frame_equal(result, expected) result = pivot_table( df, index="Buyer", columns=Grouper(freq="6MS", level="Date"), values="Quantity", aggfunc=np.sum, ) tm.assert_frame_equal(result, expected.T) msg = "The level foo is not valid" with pytest.raises(ValueError, match=msg): pivot_table( df, index=Grouper(freq="6MS", level="foo"), columns="Buyer", values="Quantity", aggfunc=np.sum, ) with pytest.raises(ValueError, match=msg): pivot_table( df, index="Buyer", columns=Grouper(freq="6MS", level="foo"), values="Quantity", aggfunc=np.sum, ) # double grouper df = DataFrame( { "Branch": "A A A A A A A B".split(), "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(), "Quantity": [1, 3, 5, 1, 8, 1, 9, 3], "Date": [ datetime(2013, 11, 1, 13, 0), datetime(2013, 9, 1, 13, 5), datetime(2013, 10, 1, 20, 0), datetime(2013, 10, 2, 10, 0), datetime(2013, 11, 1, 20, 0), datetime(2013, 10, 2, 10, 0), datetime(2013, 10, 2, 12, 0), datetime(2013, 12, 5, 14, 0), ], "PayDay": [ datetime(2013, 10, 4, 0, 0), datetime(2013, 10, 15, 13, 5), datetime(2013, 9, 5, 20, 0), datetime(2013, 11, 2, 10, 0), datetime(2013, 10, 7, 20, 0), datetime(2013, 9, 5, 10, 0), datetime(2013, 12, 30, 12, 0), datetime(2013, 11, 20, 14, 0), ], } ) result = pivot_table( df, index=Grouper(freq="M", key="Date"), columns=Grouper(freq="M", key="PayDay"), values="Quantity", aggfunc=np.sum, ) expected = DataFrame( np.array( [ np.nan, 3, np.nan, np.nan, 6, np.nan, 1, 9, np.nan, 9, np.nan, np.nan, np.nan, np.nan, 3, np.nan, ] ).reshape(4, 4), index=pd.DatetimeIndex( [ datetime(2013, 9, 30), datetime(2013, 10, 31), datetime(2013, 11, 30), datetime(2013, 12, 31), ], freq="M", ), columns=pd.DatetimeIndex( [ datetime(2013, 9, 30), datetime(2013, 10, 31), datetime(2013, 11, 30), datetime(2013, 12, 31), ], freq="M", ), ) expected.index.name = "Date" expected.columns.name = "PayDay" tm.assert_frame_equal(result, expected) result = pivot_table( df, index=Grouper(freq="M", key="PayDay"), columns=Grouper(freq="M", key="Date"), values="Quantity", aggfunc=np.sum, ) tm.assert_frame_equal(result, expected.T) tuples = [ (datetime(2013, 9, 30), datetime(2013, 10, 31)), (datetime(2013, 10, 31), datetime(2013, 9, 30)), (datetime(2013, 10, 31), datetime(2013, 11, 30)), (datetime(2013, 10, 31), datetime(2013, 12, 31)), (datetime(2013, 11, 30), datetime(2013, 10, 31)), (datetime(2013, 12, 31), datetime(2013, 11, 30)), ] idx = MultiIndex.from_tuples(tuples, names=["Date", "PayDay"]) expected = DataFrame( np.array( [3, np.nan, 6, np.nan, 1, np.nan, 9, np.nan, 9, np.nan, np.nan, 3] ).reshape(6, 2), index=idx, columns=["A", "B"], ) expected.columns.name = "Branch" result = pivot_table( df, index=[Grouper(freq="M", key="Date"), Grouper(freq="M", key="PayDay")], columns=["Branch"], values="Quantity", aggfunc=np.sum, ) tm.assert_frame_equal(result, expected) result = pivot_table( df, index=["Branch"], columns=[Grouper(freq="M", key="Date"), Grouper(freq="M", key="PayDay")], values="Quantity", aggfunc=np.sum, ) tm.assert_frame_equal(result, expected.T) def test_pivot_datetime_tz(self): dates1 = [ "2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00", "2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00", ] dates2 = [ "2013-01-01 15:00:00", "2013-01-01 15:00:00", "2013-01-01 15:00:00", "2013-02-01 15:00:00", "2013-02-01 15:00:00", "2013-02-01 15:00:00", ] df = DataFrame( { "label": ["a", "a", "a", "b", "b", "b"], "dt1": dates1, "dt2": dates2, "value1": np.arange(6, dtype="int64"), "value2": [1, 2] * 3, } ) df["dt1"] = df["dt1"].apply(lambda d: pd.Timestamp(d, tz="US/Pacific")) df["dt2"] = df["dt2"].apply(lambda d: pd.Timestamp(d, tz="Asia/Tokyo")) exp_idx = pd.DatetimeIndex( ["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"], tz="US/Pacific", name="dt1", ) exp_col1 = Index(["value1", "value1"]) exp_col2 = Index(["a", "b"], name="label") exp_col = MultiIndex.from_arrays([exp_col1, exp_col2]) expected = DataFrame([[0, 3], [1, 4], [2, 5]], index=exp_idx, columns=exp_col) result = pivot_table(df, index=["dt1"], columns=["label"], values=["value1"]) tm.assert_frame_equal(result, expected) exp_col1 = Index(["sum", "sum", "sum", "sum", "mean", "mean", "mean", "mean"]) exp_col2 = Index(["value1", "value1", "value2", "value2"] * 2) exp_col3 = pd.DatetimeIndex( ["2013-01-01 15:00:00", "2013-02-01 15:00:00"] * 4, tz="Asia/Tokyo", name="dt2", ) exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3]) expected = DataFrame( np.array( [ [0, 3, 1, 2, 0, 3, 1, 2], [1, 4, 2, 1, 1, 4, 2, 1], [2, 5, 1, 2, 2, 5, 1, 2], ], dtype="int64", ), index=exp_idx, columns=exp_col, ) result = pivot_table( df, index=["dt1"], columns=["dt2"], values=["value1", "value2"], aggfunc=[np.sum, np.mean], ) tm.assert_frame_equal(result, expected) def test_pivot_dtaccessor(self): # GH 8103 dates1 = [ "2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00", "2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00", ] dates2 = [ "2013-01-01 15:00:00", "2013-01-01 15:00:00", "2013-01-01 15:00:00", "2013-02-01 15:00:00", "2013-02-01 15:00:00", "2013-02-01 15:00:00", ] df = DataFrame( { "label": ["a", "a", "a", "b", "b", "b"], "dt1": dates1, "dt2": dates2, "value1": np.arange(6, dtype="int64"), "value2": [1, 2] * 3, } ) df["dt1"] = df["dt1"].apply(lambda d: pd.Timestamp(d)) df["dt2"] = df["dt2"].apply(lambda d: pd.Timestamp(d)) result = pivot_table( df, index="label", columns=df["dt1"].dt.hour, values="value1" ) exp_idx = Index(["a", "b"], name="label") expected = DataFrame( {7: [0, 3], 8: [1, 4], 9: [2, 5]}, index=exp_idx, columns=Index([7, 8, 9], name="dt1"), ) tm.assert_frame_equal(result, expected) result = pivot_table( df, index=df["dt2"].dt.month, columns=df["dt1"].dt.hour, values="value1" ) expected = DataFrame( {7: [0, 3], 8: [1, 4], 9: [2, 5]}, index=Index([1, 2], name="dt2"), columns=Index([7, 8, 9], name="dt1"), ) tm.assert_frame_equal(result, expected) result = pivot_table( df, index=df["dt2"].dt.year.values, columns=[df["dt1"].dt.hour, df["dt2"].dt.month], values="value1", ) exp_col = MultiIndex.from_arrays( [[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=["dt1", "dt2"] ) expected = DataFrame( np.array([[0, 3, 1, 4, 2, 5]], dtype="int64"), index=[2013], columns=exp_col ) tm.assert_frame_equal(result, expected) result = pivot_table( df, index=np.array(["X", "X", "X", "X", "Y", "Y"]), columns=[df["dt1"].dt.hour, df["dt2"].dt.month], values="value1", ) expected = DataFrame( np.array( [[0, 3, 1, np.nan, 2, np.nan], [np.nan, np.nan, np.nan, 4, np.nan, 5]] ), index=["X", "Y"], columns=exp_col, ) tm.assert_frame_equal(result, expected) def test_daily(self): rng = date_range("1/1/2000", "12/31/2004", freq="D") ts = Series(np.random.randn(len(rng)), index=rng) annual = pivot_table( DataFrame(ts), index=ts.index.year, columns=ts.index.dayofyear ) annual.columns = annual.columns.droplevel(0) doy = np.asarray(ts.index.dayofyear) for i in range(1, 367): subset = ts[doy == i] subset.index = subset.index.year result = annual[i].dropna() tm.assert_series_equal(result, subset, check_names=False) assert result.name == i def test_monthly(self): rng = date_range("1/1/2000", "12/31/2004", freq="M") ts = Series(np.random.randn(len(rng)), index=rng) annual = pivot_table(DataFrame(ts), index=ts.index.year, columns=ts.index.month) annual.columns = annual.columns.droplevel(0) month = ts.index.month for i in range(1, 13): subset = ts[month == i] subset.index = subset.index.year result = annual[i].dropna() tm.assert_series_equal(result, subset, check_names=False) assert result.name == i def test_pivot_table_with_iterator_values(self): # GH 12017 aggs = {"D": "sum", "E": "mean"} pivot_values_list = pivot_table( self.data, index=["A"], values=list(aggs.keys()), aggfunc=aggs ) pivot_values_keys = pivot_table( self.data, index=["A"], values=aggs.keys(), aggfunc=aggs ) tm.assert_frame_equal(pivot_values_keys, pivot_values_list) agg_values_gen = (value for value in aggs.keys()) pivot_values_gen = pivot_table( self.data, index=["A"], values=agg_values_gen, aggfunc=aggs ) tm.assert_frame_equal(pivot_values_gen, pivot_values_list) def test_pivot_table_margins_name_with_aggfunc_list(self): # GH 13354 margins_name = "Weekly" costs = DataFrame( { "item": ["bacon", "cheese", "bacon", "cheese"], "cost": [2.5, 4.5, 3.2, 3.3], "day": ["M", "M", "T", "T"], } ) table = costs.pivot_table( index="item", columns="day", margins=True, margins_name=margins_name, aggfunc=[np.mean, max], ) ix = Index(["bacon", "cheese", margins_name], dtype="object", name="item") tups = [ ("mean", "cost", "M"), ("mean", "cost", "T"), ("mean", "cost", margins_name), ("max", "cost", "M"), ("max", "cost", "T"), ("max", "cost", margins_name), ] cols = MultiIndex.from_tuples(tups, names=[None, None, "day"]) expected = DataFrame(table.values, index=ix, columns=cols) tm.assert_frame_equal(table, expected) def test_categorical_margins(self, observed, request): if observed: request.node.add_marker( pytest.mark.xfail( reason="GH#17035 (np.mean of ints is casted back to ints)" ) ) # GH 10989 df = DataFrame( {"x": np.arange(8), "y": np.arange(8) // 4, "z": np.arange(8) % 2} ) expected = DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]]) expected.index = Index([0, 1, "All"], name="y") expected.columns = Index([0, 1, "All"], name="z") table = df.pivot_table("x", "y", "z", dropna=observed, margins=True) tm.assert_frame_equal(table, expected) def test_categorical_margins_category(self, observed, request): if observed: request.node.add_marker( pytest.mark.xfail( reason="GH#17035 (np.mean of ints is casted back to ints)" ) ) df = DataFrame( {"x": np.arange(8), "y": np.arange(8) // 4, "z": np.arange(8) % 2} ) expected = DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]]) expected.index = Index([0, 1, "All"], name="y") expected.columns = Index([0, 1, "All"], name="z") df.y = df.y.astype("category") df.z = df.z.astype("category") table = df.pivot_table("x", "y", "z", dropna=observed, margins=True) tm.assert_frame_equal(table, expected) def test_margins_casted_to_float(self, observed): # GH 24893 df = DataFrame( { "A": [2, 4, 6, 8], "B": [1, 4, 5, 8], "C": [1, 3, 4, 6], "D": ["X", "X", "Y", "Y"], } ) result = pivot_table(df, index="D", margins=True) expected = DataFrame( {"A": [3, 7, 5], "B": [2.5, 6.5, 4.5], "C": [2, 5, 3.5]}, index=Index(["X", "Y", "All"], name="D"), ) tm.assert_frame_equal(result, expected) def test_pivot_with_categorical(self, observed, ordered): # gh-21370 idx = [np.nan, "low", "high", "low", np.nan] col = [np.nan, "A", "B", np.nan, "A"] df = DataFrame( { "In": Categorical(idx, categories=["low", "high"], ordered=ordered), "Col": Categorical(col, categories=["A", "B"], ordered=ordered), "Val": range(1, 6), } ) # case with index/columns/value result = df.pivot_table( index="In", columns="Col", values="Val", observed=observed ) expected_cols = pd.CategoricalIndex(["A", "B"], ordered=ordered, name="Col") expected = DataFrame(data=[[2.0, np.nan], [np.nan, 3.0]], columns=expected_cols) expected.index = Index( Categorical(["low", "high"], categories=["low", "high"], ordered=ordered), name="In", ) tm.assert_frame_equal(result, expected) # case with columns/value result = df.pivot_table(columns="Col", values="Val", observed=observed) expected = DataFrame( data=[[3.5, 3.0]], columns=expected_cols, index=Index(["Val"]) ) tm.assert_frame_equal(result, expected) def test_categorical_aggfunc(self, observed): # GH 9534 df = DataFrame( {"C1": ["A", "B", "C", "C"], "C2": ["a", "a", "b", "b"], "V": [1, 2, 3, 4]} ) df["C1"] = df["C1"].astype("category") result = df.pivot_table( "V", index="C1", columns="C2", dropna=observed, aggfunc="count" ) expected_index = pd.CategoricalIndex( ["A", "B", "C"], categories=["A", "B", "C"], ordered=False, name="C1" ) expected_columns = Index(["a", "b"], name="C2") expected_data = np.array([[1, 0], [1, 0], [0, 2]], dtype=np.int64) expected = DataFrame( expected_data, index=expected_index, columns=expected_columns ) tm.assert_frame_equal(result, expected) def test_categorical_pivot_index_ordering(self, observed): # GH 8731 df = DataFrame( { "Sales": [100, 120, 220], "Month": ["January", "January", "January"], "Year": [2013, 2014, 2013], } ) months = [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", ] df["Month"] = df["Month"].astype("category").cat.set_categories(months) result = df.pivot_table( values="Sales", index="Month", columns="Year", observed=observed, aggfunc="sum", ) expected_columns = pd.Int64Index([2013, 2014], name="Year") expected_index = pd.CategoricalIndex( months, categories=months, ordered=False, name="Month" ) expected_data = [[320, 120]] + [[0, 0]] * 11 expected = DataFrame( expected_data, index=expected_index, columns=expected_columns ) if observed: expected = expected.loc[["January"]] tm.assert_frame_equal(result, expected) def test_pivot_table_not_series(self): # GH 4386 # pivot_table always returns a DataFrame # when values is not list like and columns is None # and aggfunc is not instance of list df = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"], "col3": [1, 3, 9]}) result = df.pivot_table("col1", index=["col3", "col2"], aggfunc=np.sum) m = MultiIndex.from_arrays([[1, 3, 9], ["C", "D", "E"]], names=["col3", "col2"]) expected = DataFrame([3, 4, 5], index=m, columns=["col1"]) tm.assert_frame_equal(result, expected) result = df.pivot_table("col1", index="col3", columns="col2", aggfunc=np.sum) expected = DataFrame( [[3, np.NaN, np.NaN], [np.NaN, 4, np.NaN], [np.NaN, np.NaN, 5]], index=Index([1, 3, 9], name="col3"), columns=Index(["C", "D", "E"], name="col2"), ) tm.assert_frame_equal(result, expected) result = df.pivot_table("col1", index="col3", aggfunc=[np.sum]) m = MultiIndex.from_arrays([["sum"], ["col1"]]) expected = DataFrame([3, 4, 5], index=Index([1, 3, 9], name="col3"), columns=m) tm.assert_frame_equal(result, expected) def test_pivot_margins_name_unicode(self): # issue #13292 greek = "\u0394\u03bf\u03ba\u03b9\u03bc\u03ae" frame = DataFrame({"foo": [1, 2, 3]}) table = pivot_table( frame, index=["foo"], aggfunc=len, margins=True, margins_name=greek ) index = Index([1, 2, 3, greek], dtype="object", name="foo") expected = DataFrame(index=index) tm.assert_frame_equal(table, expected) def test_pivot_string_as_func(self): # GH #18713 # for correctness purposes data = DataFrame( { "A": [ "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar", "foo", "foo", "foo", ], "B": [ "one", "one", "one", "two", "one", "one", "one", "two", "two", "two", "one", ], "C": range(11), } ) result = pivot_table(data, index="A", columns="B", aggfunc="sum") mi = MultiIndex( levels=[["C"], ["one", "two"]], codes=[[0, 0], [0, 1]], names=[None, "B"] ) expected = DataFrame( {("C", "one"): {"bar": 15, "foo": 13}, ("C", "two"): {"bar": 7, "foo": 20}}, columns=mi, ).rename_axis("A") tm.assert_frame_equal(result, expected) result = pivot_table(data, index="A", columns="B", aggfunc=["sum", "mean"]) mi = MultiIndex( levels=[["sum", "mean"], ["C"], ["one", "two"]], codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]], names=[None, None, "B"], ) expected = DataFrame( { ("mean", "C", "one"): {"bar": 5.0, "foo": 3.25}, ("mean", "C", "two"): {"bar": 7.0, "foo": 6.666666666666667}, ("sum", "C", "one"): {"bar": 15, "foo": 13}, ("sum", "C", "two"): {"bar": 7, "foo": 20}, }, columns=mi, ).rename_axis("A") tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "f, f_numpy", [ ("sum", np.sum), ("mean", np.mean), ("std", np.std), (["sum", "mean"], [np.sum, np.mean]), (["sum", "std"], [np.sum, np.std]), (["std", "mean"], [np.std, np.mean]), ], ) def test_pivot_string_func_vs_func(self, f, f_numpy): # GH #18713 # for consistency purposes result = pivot_table(self.data, index="A", columns="B", aggfunc=f) expected = pivot_table(self.data, index="A", columns="B", aggfunc=f_numpy) tm.assert_frame_equal(result, expected) @pytest.mark.slow def test_pivot_number_of_levels_larger_than_int32(self): # GH 20601 df = DataFrame( {"ind1": np.arange(2 ** 16), "ind2": np.arange(2 ** 16), "count": 0} ) msg = "Unstacked DataFrame is too big, causing int32 overflow" with pytest.raises(ValueError, match=msg): df.pivot_table( index="ind1", columns="ind2", values="count", aggfunc="count" ) def test_pivot_table_aggfunc_dropna(self, dropna): # GH 22159 df = DataFrame( { "fruit": ["apple", "peach", "apple"], "size": [1, 1, 2], "taste": [7, 6, 6], } ) def ret_one(x): return 1 def ret_sum(x): return sum(x) def ret_none(x): return np.nan result = pivot_table( df, columns="fruit", aggfunc=[ret_sum, ret_none, ret_one], dropna=dropna ) data = [[3, 1, np.nan, np.nan, 1, 1], [13, 6, np.nan, np.nan, 1, 1]] col = MultiIndex.from_product( [["ret_sum", "ret_none", "ret_one"], ["apple", "peach"]], names=[None, "fruit"], ) expected = DataFrame(data, index=["size", "taste"], columns=col) if dropna: expected = expected.dropna(axis="columns") tm.assert_frame_equal(result, expected) def test_pivot_table_aggfunc_scalar_dropna(self, dropna): # GH 22159 df = DataFrame( {"A": ["one", "two", "one"], "x": [3, np.nan, 2], "y": [1, np.nan, np.nan]} ) result = pivot_table(df, columns="A", aggfunc=np.mean, dropna=dropna) data = [[2.5, np.nan], [1, np.nan]] col = Index(["one", "two"], name="A") expected = DataFrame(data, index=["x", "y"], columns=col) if dropna: expected = expected.dropna(axis="columns") tm.assert_frame_equal(result, expected) def test_pivot_table_empty_aggfunc(self): # GH 9186 & GH 13483 df = DataFrame( { "A": [2, 2, 3, 3, 2], "id": [5, 6, 7, 8, 9], "C": ["p", "q", "q", "p", "q"], "D": [None, None, None, None, None], } ) result = df.pivot_table(index="A", columns="D", values="id", aggfunc=np.size) expected = DataFrame(index=Index([], dtype="int64", name="A")) expected.columns.name = "D" tm.assert_frame_equal(result, expected) def test_pivot_table_no_column_raises(self): # GH 10326 def agg(arr): return np.mean(arr) foo = DataFrame({"X": [0, 0, 1, 1], "Y": [0, 1, 0, 1], "Z": [10, 20, 30, 40]}) with pytest.raises(KeyError, match="notpresent"): foo.pivot_table("notpresent", "X", "Y", aggfunc=agg) def test_pivot_table_doctest_case(self): # TODO: better name. the relevant characteristic is that # the call to maybe_downcast_to_dtype(agged[v], data[v].dtype) in # __internal_pivot_table has `agged[v]` a DataFrame instead of Series, # i.e agged.columns is not unique df = DataFrame( { "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], "C": [ "small", "large", "large", "small", "small", "large", "small", "small", "large", ], "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], } ) table = pivot_table( df, values=["D", "E"], index=["A", "C"], aggfunc={"D": np.mean, "E": [min, max, np.mean]}, ) cols = MultiIndex.from_tuples( [("D", "mean"), ("E", "max"), ("E", "mean"), ("E", "min")] ) index = MultiIndex.from_tuples( [("bar", "large"), ("bar", "small"), ("foo", "large"), ("foo", "small")], names=["A", "C"], ) vals = np.array( [ [5.5, 9.0, 7.5, 6.0], [5.5, 9.0, 8.5, 8.0], [2.0, 5.0, 4.5, 4.0], [2.33333333, 6.0, 4.33333333, 2.0], ] ) expected = DataFrame(vals, columns=cols, index=index) tm.assert_frame_equal(table, expected) def test_pivot_table_sort_false(self): # GH#39143 df = DataFrame( { "a": ["d1", "d4", "d3"], "col": ["a", "b", "c"], "num": [23, 21, 34], "year": ["2018", "2018", "2019"], } ) result = df.pivot_table( index=["a", "col"], columns="year", values="num", aggfunc="sum", sort=False ) expected = DataFrame( [[23, np.nan], [21, np.nan], [np.nan, 34]], columns=Index(["2018", "2019"], name="year"), index=MultiIndex.from_arrays( [["d1", "d4", "d3"], ["a", "b", "c"]], names=["a", "col"] ), ) tm.assert_frame_equal(result, expected) class TestPivot: def test_pivot(self): data = { "index": ["A", "B", "C", "C", "B", "A"], "columns": ["One", "One", "One", "Two", "Two", "Two"], "values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0], } frame = DataFrame(data) pivoted = frame.pivot(index="index", columns="columns", values="values") expected = DataFrame( { "One": {"A": 1.0, "B": 2.0, "C": 3.0}, "Two": {"A": 1.0, "B": 2.0, "C": 3.0}, } ) expected.index.name, expected.columns.name = "index", "columns" tm.assert_frame_equal(pivoted, expected) # name tracking assert pivoted.index.name == "index" assert pivoted.columns.name == "columns" # don't specify values pivoted = frame.pivot(index="index", columns="columns") assert pivoted.index.name == "index" assert pivoted.columns.names == (None, "columns") def test_pivot_duplicates(self): data = DataFrame( { "a": ["bar", "bar", "foo", "foo", "foo"], "b": ["one", "two", "one", "one", "two"], "c": [1.0, 2.0, 3.0, 3.0, 4.0], } ) with pytest.raises(ValueError, match="duplicate entries"): data.pivot("a", "b", "c") def test_pivot_empty(self): df = DataFrame(columns=["a", "b", "c"]) result = df.pivot("a", "b", "c") expected = DataFrame() tm.assert_frame_equal(result, expected, check_names=False) def test_pivot_integer_bug(self): df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")]) result = df.pivot(index=1, columns=0, values=2) repr(result) tm.assert_index_equal(result.columns, Index(["A", "B"], name=0)) def test_pivot_index_none(self): # GH#3962 data = { "index": ["A", "B", "C", "C", "B", "A"], "columns": ["One", "One", "One", "Two", "Two", "Two"], "values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0], } frame = DataFrame(data).set_index("index") result = frame.pivot(columns="columns", values="values") expected = DataFrame( { "One": {"A": 1.0, "B": 2.0, "C": 3.0}, "Two": {"A": 1.0, "B": 2.0, "C": 3.0}, } ) expected.index.name, expected.columns.name = "index", "columns" tm.assert_frame_equal(result, expected) # omit values result = frame.pivot(columns="columns") expected.columns = MultiIndex.from_tuples( [("values", "One"), ("values", "Two")], names=[None, "columns"] ) expected.index.name = "index" tm.assert_frame_equal(result, expected, check_names=False) assert result.index.name == "index" assert result.columns.names == (None, "columns") expected.columns = expected.columns.droplevel(0) result = frame.pivot(columns="columns", values="values") expected.columns.name = "columns" tm.assert_frame_equal(result, expected) def test_pivot_index_list_values_none_immutable_args(self): # GH37635 df = DataFrame( { "lev1": [1, 1, 1, 2, 2, 2], "lev2": [1, 1, 2, 1, 1, 2], "lev3": [1, 2, 1, 2, 1, 2], "lev4": [1, 2, 3, 4, 5, 6], "values": [0, 1, 2, 3, 4, 5], } ) index = ["lev1", "lev2"] columns = ["lev3"] result = df.pivot(index=index, columns=columns, values=None) expected = DataFrame( np.array( [ [1.0, 2.0, 0.0, 1.0], [3.0, np.nan, 2.0, np.nan], [5.0, 4.0, 4.0, 3.0], [np.nan, 6.0, np.nan, 5.0], ] ), index=MultiIndex.from_arrays( [(1, 1, 2, 2), (1, 2, 1, 2)], names=["lev1", "lev2"] ), columns=MultiIndex.from_arrays( [("lev4", "lev4", "values", "values"), (1, 2, 1, 2)], names=[None, "lev3"], ), ) tm.assert_frame_equal(result, expected) assert index == ["lev1", "lev2"] assert columns == ["lev3"]
codeparrot/github-code-clean
# -*- coding: utf-8 -*- ''' GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. “This License” refers to version 3 of the GNU General Public License. “Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. “The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. A “covered work” means either the unmodified Program or a work based on the Program. To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: * a) The work must carry prominent notices stating that you modified it, and giving a relevant date. * b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. * c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. * d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: * a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. * b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. * c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. * d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. * e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. “Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. “Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: * a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or * b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or * c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or * d) Limiting the use for publicity purposes of names of licensors or authors of the material; or * e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or * f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS ''' # fix a bug in uuid, import it first !! import uuid import os import sys import ConfigParser #sys.path.append('/usr/lib/python/') #sys.path.append('/usr/lib64/python2.4/site-packages') #sys.path.append('/usr/lib64/python2.4/site-packages/gtk-2.0') #sys.path.append(os.environ['CUON_PATH']) #try: import pygtk #except: # print 'No python-module pygtk found. please install first' # sys.exit(0) # test 1 import os.path import shlex, subprocess pygtk.require('2.0') import gtk try: import gtk.glade except: pass import gobject import locale from locale import gettext as _ import cuon.Addresses.addresses import cuon.Addresses.SingleAddress import cuon.Addresses.SinglePartner import cuon.Addresses.SingleScheduling print 'import Address' from xmlrpclib import ServerProxy import cuon.Articles.articles import cuon.Bank.bank import cuon.Addresses.contact try: import cuon.Clients.clients except Exception, params: print 'import failed' print Exception, params import cuon.Leasing.leasing try: import cuon.Order.order import cuon.Proposal.proposal import cuon.Enquiry.enquiry except Exception, params: print 'some Order modul missing' import cuon.User.user import cuon.Preferences.preferences import cuon.PrefsFinance.prefsFinance import cuon.Stock.stock import cuon.XML.MyXML from cuon.TypeDefs.typedefs import typedefs from cuon.Windows.windows import windows import cPickle import cuon.Databases.dumps from cuon.TypeDefs.typedefs_server import typedefs_server import cuon.Databases.cyr_load_table #import threading import cuon.Staff.staff import cuon.Project.project import commands import cuon.Databases.SingleDataTreeModel import cuon.Databases.SingleCuon import cuon.Finances.invoicebook import cuon.Finances.bookkeeping import cuon.Finances.cashAccountBook import cuon.Calendar.calendar try: import cuon.Web2.web2 except: print 'no Module Web2' try: from PIL import Image except: print 'no PIL Image found' try: import cuon.SQL_Shell.sql_shell except: pass # localisation #import locale, gettext import locale from locale import gettext as _ try: #import gtkhtml2 import webkit except: print 'python webkit not found' import time import cuon.E_Mail.imap_dms #http connections import httplib, urllib #try: # import profile #except: # print "no Profile" import cuon.DMS.documentTools import bz2 try: import atexit from guppy import hpy except: pass #try: # _last_log_time = time.time() # _logfile = open('logfile.txt', 'w') #except Exception, params: # print Exception, params # print 'profile error' #print 'profile vars: ', _last_log_time class MainWindow(windows): """ @author: Juergen Hamel @organization: Cyrus-Computer GmbH, D-32584 Loehne @copyright: by Juergen Hamel @license: GPL V3 ( GNU GENERAL PUBLIC LICENSE ) @contact: jh@cyrus.de """ def __init__(self, sT): """ @param sT: set the Starttype of C.U.O.N., normaly client or server """ windows.__init__(self) self.sStartType = sT self.Version = {'Major': 14, 'Minor': 10, 'Rev': 27, 'Species': 0, 'Maschine': 'Linux,BSD,Windows,Mac'} self.sTitle = `self.Version['Major']` + '.' + `self.Version['Minor']` + '-' + `self.Version['Rev']` self.t0 = None self.t1 = None self.t2 = None self.t3 = None self.dicParams = {} self.allTables = {} self.sDebug = 'NO' self.ModulNumber = self.MN['Mainwindow'] self.extMenucommand = {} self.store = None self.connectTreeId = None self.singleAddress = None self.singlePartner = None self.singleSchedul = None self.schedulHash1 = None self.schedulHash2 = None self.schedulHash3 = None self.ClientID = 0 self.sv = ServerProxy("http://localhost:" + `self.td.ipc_port1`,allow_none = 1) self.webview = None #self.extMenucommand['ext1'] = 'Test' #set this Functions to None def loadUserInfo(self): pass def checkClient(self): pass def delete_event(self, widget, event, data=None): self.on_end1_activate(None) return False def destroy(self, widget, data=None): print "destroy signal occurred" self.on_end1_activate(None) def on_end1_activate(self,event): print "exit cuon" #clean up the tmp-files try: os.system( 'rm ' + os.path.normpath(self.td.cuon_path + '/cuon__*' )) except Exception, params: print 'prm1', Exception, params #pass try: os.system( 'rm ' + os.path.normpath(self.td.cuon_path + '/cuon_data/dms/cuon__*' )) except Exception, params: print 'prm1', Exception, params #pass try: os.system( 'rm ' + os.path.normpath(self.td.cuon_path + '/cuon_data/dms/*__dms*' )) except Exception, params: print 'prm1', Exception, params #pass try: os.system( 'rm ' + os.path.normpath(self.dicUser['prefPath']['tmp'] + '/*__dms*' )) except Exception, params: print 'prm2',Exception, params #pass try: os.system( 'rm ' + os.path.normpath( self.td.cuon_path + '/*__dms*' )) except Exception, params: print 'prm3',Exception, params #pass try: os.system( 'rm ' + os.path.normpath( './*__dms*' )) except Exception, params: print 'prm3',Exception, params self.on_logout1_activate(None) self.gtk_main_quit() def on_databases1_activate(self,event): import cuon.Databases.databases daba = cuon.Databases.databases.databaseswindow() def on_login1_activate(self,event): import cuon.Login.login print 'lgi client id = ', self.ClientID lgi = cuon.Login.login.loginwindow( [self.getWidget('eUserName')], None, Username, PASSWORD, self.ClientID) self.openDB() self.oUser = self.loadObject('User') self.oUser.setParams(self.dicParams) self.saveObject('User', self.oUser) self.closeDB() if self.oUser.getUserName()== 'EMPTY': pass else: self.getWidget('eServer').set_text(self.td.server) #choose the client #sys.exit() self.on_clients1_activate(None) print 'Hallo - client' self.checkMenus() print 'ShowNews = ', self.dicUser['showNews'] if self.dicUser['showNews'] : self.activateClick('onlineNews') print "oUserParams = ", self.oUser.Params def checkMenus(self): liModullist = self.rpc.callRP('User.getModulList', self.oUser.getSqlDicUser()) #print liModullist if self.sStartType == 'server': self.enableMenuItem('serverMode') self.disableMenuItem('user') self.enableMenuItem('login') misc_menu = False #print 'LI_MODULELIST' print liModullist for iL in liModullist: #print iL if 'all' in iL: #print 'key all found' #data self.addEnabledMenuItems('work','mi_addresses1') self.addEnabledMenuItems('work','mi_articles1') self.addEnabledMenuItems('work','mi_bibliographic') self.addEnabledMenuItems('work','mi_clients1') self.addEnabledMenuItems('work','contracts1') self.addEnabledMenuItems('work','mi_leasing1') print 'enableMenuItem staff' self.addEnabledMenuItems('work','mi_staff1') print 'enableMenuItem staff end' #action self.addEnabledMenuItems('work','mi_order1') self.addEnabledMenuItems('work','mi_stock1') self.addEnabledMenuItems('work','mi_dms1') self.addEnabledMenuItems('work','mi_supportticket1') #accounting self.addEnabledMenuItems('work','mi_cash_account_book1') # extras self.addEnabledMenuItems('work','mi_expert_system1') self.addEnabledMenuItems('work','mi_project1') ## self.addEnabledMenuItems('work','mi_forms1') ## self.addEnabledMenuItems('work','mi_forms_addresses1') #tools self.addEnabledMenuItems('work','mi_preferences1') self.addEnabledMenuItems('work','mi_user1') self.addEnabledMenuItems('work','mi_finances1') #self.addEnabledMenuItems('work','mi_project1') self.addEnabledMenuItems('work','mi_import_data1') self.enableMenuItem('work') if iL.has_key('addresses'): self.addEnabledMenuItems('misc','mi_addresses1') misc_menu = True if iL.has_key('articles'): self.addEnabledMenuItems('misc','mi_articles1') misc_menu = True if iL.has_key('biblio'): self.addEnabledMenuItems('misc','mi_bibliographic') misc_menu = True if iL.has_key('clients'): self.addEnabledMenuItems('misc','mi_clients1') misc_menu = True if iL.has_key('staff'): self.addEnabledMenuItems('misc','mi_staff1') misc_menu = True if iL.has_key('order'): self.addEnabledMenuItems('misc','mi_order1') misc_menu = True if iL.has_key('stock'): self.addEnabledMenuItems('misc','mi_stock1') misc_menu = True if iL.has_key('dms'): self.addEnabledMenuItems('misc','mi_dms1') misc_menu = True if iL.has_key('account_book'): self.addEnabledMenuItems('misc','mi_cash_account_book1') misc_menu = True if iL.has_key('expert_system'): self.addEnabledMenuItems('misc','mi_expert_system1') misc_menu = True if iL.has_key('project'): print 'key project found ' self.addEnabledMenuItems('misc','mi_project1') misc_menu = True if iL.has_key('web2'): print 'key web2 found ' self.addEnabledMenuItems('misc','web2') misc_menu = True if iL.has_key('forms'): print 'key forms found ' self.addEnabledMenuItems('misc','forms1') misc_menu = True print '-----------------------' if iL.has_key('forms_addresses'): print 'key forms_addresses found ' self.addEnabledMenuItems('misc','forms_addresses1') self.addEnabledMenuItems('misc','mi_addresses_notes_misc1') self.addEnabledMenuItems('misc','mi_addresses_notes_contacter1') self.addEnabledMenuItems('misc','mi_addresses_notes_representant1') self.addEnabledMenuItems('misc','mi_addresses_notes_salesman1') misc_menu = True if iL.has_key('experimental'): print 'key experimental found' self.addEnabledMenuItems('experimental','mi_mayavi1') self.addEnabledMenuItems('experimental','mi_test1') self.enableMenuItem('experimental') if iL.has_key('extendet_gpl'): try: liExtGpl = iL['extendet_gpl'] print 'Ext.GPL =', liExtGpl for newProgram in liExtGpl: print newProgram mi1 = self.addMenuItem(self.getWidget(newProgram['MenuItem']['Main']),newProgram['MenuItem']['Sub']) try: print 'new Item = ', `mi1` if newProgram['MenuItem']['ExternalNumber'] == 'ext1': mi1.connect("activate", self.on_ext1_activate) elif newProgram['MenuItem']['ExternalNumber'] == 'ext2': mi1.connect("activate", self.on_ext2_activate) elif newProgram['MenuItem']['ExternalNumber'] == 'ext3': mi1.connect("activate", self.on_ext3_activate) elif newProgram['MenuItem']['ExternalNumber'] == 'ext4': mi1.connect("activate", self.on_ext4_activate) if newProgram.has_key('Imports'): newImports = newProgram['Imports'] for nI in newImports: try: print 'import ext Module 1', nI exec('import ' + nI) print 'import extendet module 2', nI except Exception, params: print Exception, params if newProgram.has_key('MenuStart'): print 'MenuStart = ', newProgram['MenuItem']['ExternalNumber'] self.extMenucommand[newProgram['MenuItem']['ExternalNumber']] = newProgram['MenuStart'] if newProgram.has_key('Start'): exec(newProgram['Start']) print 'EXEC = ', newProgram['Start'] except Exception,params: print Exception,params except Exception,params: print Exception,params if misc_menu: self.enableMenuItem('misc') def on_logout1_activate(self, event): print 'Logout' try: self.rpc.callRP('Database.logout', self.oUser.getUserName()) except: print 'Exception' self.disableMenuItem('login') self.enableMenuItem('user') def on_eUserName_changed(self, event): if self.getWidget('eUserName').get_text() != 'EMPTY': print 'User changed 22' self.oUser = self.loadObject('User') print 'sDebug (Cuon) = ' + self.sDebug self.oUser.setDebug(self.sDebug) self.saveObject('User', self.oUser) # self.openDB() #if self.startProgressBar(): if not self.allTables: self.generateLocalSqlObjects() # self.stopProgressBar() #print self.oUser.getDicUser() # now start scheduling print 'Client = ', self.oUser.getSqlDicUser()['client'] def generateSqlObjects(self): self.setProgressBar( 0.4) entryList = self.rpc.callRP('Database.executeNormalQuery',"select skey from cuon where skey ~* 'entry_' ") #print entryList self.openDB() for i in entryList: #print i['skey'] sk = self.rpc.callRP('Database.getInfo', i['skey']) self.saveObject(i['skey'],sk) #print sk self.closeDB() #self.rpc.callRP('src.Databases.py_getInfoOfTable', 'allTables') at = self.rpc.callRP('Database.getInfo', 'allTables') #print 'at23 = ', `at` self.setProgressBar( 3.0) liAllTables = cPickle.loads(eval(self.doDecode(at))) #sys.exit(0) #print 'liAllTables = ' #print liAllTables iCount = len(liAllTables) for i in range(iCount): self.loadSqlDefs(liAllTables, i) self.setProgressBar( (float(i) * 1.0/float(iCount) * 100.0) + 5.0) #print 'Progress-Value = ' + str(float(i) * 1.0/float(iCount) * 100.0) #print self.allTables def generateLocalSqlObjects(self): at = self.rpc.callRP('Database.getInfo', 'allTables') #print 'at24 = ', `at` liAllTables = cPickle.loads(eval(self.doDecode(at))) #liAllTables = cPickle.loads(self.rpc.callRP('src.Databases.py_getInfoOfTable', 'allTables')) #print 'liAllTables = ', liAllTables #print liAllTables iCount = len(liAllTables) #print 'iCount = ', iCount for i in range(iCount): self.loadLocalSqlDefs(liAllTables, i) #self.setProgressBar(float(i) * 1.0/float(iCount) * 100.0) #print 'Progress-Value = ' + str(float(i) * 1.0/float(iCount) * 100.0) #print self.allTables def loadSqlDefs(self, liAllTables, i ): try: clt = cuon.Databases.cyr_load_table.cyr_load_table() #print 'Table0 = ', liAllTables[i] if liAllTables[i].find('_history') < 0: #print 'Table = ', liAllTables[i] self.allTables[liAllTables[i]] = clt.loadTable(liAllTables[i]) except Exception, param: print 'ERROR SQL Defs' print Exception print param print liAllTables[i] def loadLocalSqlDefs(self, liAllTables, i ): #print 'loadLocalSQL1 ', liAllTables #print 'loadLocalSQL2 ', i clt = cuon.Databases.cyr_load_table.cyr_load_table() self.allTables[liAllTables[i]] = clt.loadLocalTable(liAllTables[i]) #print 'loadLocalSQL3 ', `self.allTables` # Data-Menu #--> def on_addresses1_activate(self,event): print "ipc-port", self.td.ipc_port1 if ipc_port1 and "ad" in td.ipc_list: adr = self.sv.address.initSN() else: adr = cuon.Addresses.addresses.addresswindow(self.allTables) def on_articles1_activate(self,event): art = cuon.Articles.articles.articleswindow(self.allTables) def on_bank1_activate(self,event): bank = cuon.Bank.bank.bankwindow(self.allTables) def on_bibliographic_activate(self, event): import cuon.Biblio.biblio bib = cuon.Biblio.biblio.bibliowindow(self.allTables) def on_clients1_activate(self, event): #print self.allTables self.dicUser = self.oUser.getDicUser() if event: self.ClientID = 0 print 'cli = ', self.ClientID cli = cuon.Clients.clients.clientswindow(self.allTables, self.ClientID, eClient = self.getWidget('eClient')) def on_staff1_activate(self, event): staff = cuon.Staff.staff.staffwindow(self.allTables) # submenu contracts1 def on_leasing1_activate(self, event): leasing = cuon.Leasing.leasing.leasingwindow(self.allTables) # Action-Menu def on_enquiry_activate(self, event): enq = cuon.Enquiry.enquiry.enquirywindow(self.allTables) def on_proposal1_activate(self,event): ord = cuon.Proposal.proposal.proposalwindow(self.allTables) def on_order1_activate(self,event): ord = cuon.Order.order.orderwindow(self.allTables) def on_stock1_activate(self,event): print "ipc-port", self.td.ipc_port1 if ipc_port1 and "st" in td.ipc_list: ord = self.sv.stock.initSN() else: ord = cuon.Stock.stock.stockwindow(self.allTables) def on_mi_supportticket1_activate(self, event): import cuon.SupportTicket.supportTicket supt = cuon.SupportTicket.supportTicket.supportticketwindow(self.allTables) def on_dms1_activate(self,event): import cuon.DMS.dms print "ipc-port", self.td.ipc_port1 if ipc_port1 and "dms" in td.ipc_list: dms = self.sv.dms.initSN() else: dms = cuon.DMS.dms.dmswindow(self.allTables) # Finances # Cash Account Book def on_cash_account_book1_activate(self, event): cab = cuon.Finances.cashAccountBook.cashAccountBookwindow(self.allTables) def on_bookkeeping1_activate(self, event): bk = cuon.Finances.bookkeeping.bookkeepingwindow(self.allTables) def on_listOfInvoices1_activate(self, event): loi = cuon.Finances.invoicebook.invoicebookwindow(self.allTables) def on_analyse_costs1_activate(self, event ): pass # Extras def on_expert_system1_activate(self, event): import cuon.AI.ai cai = cuon.AI.ai.aiwindow(self.allTables) def on_project1_activate(self, event): cpro = cuon.Project.project.projectwindow(self.allTables) def on_sourcenav1_activate(self, event): print "ipc-port", self.td.ipc_port1 if ipc_port1 and "sn" in td.ipc_list: sn = self.sv.sourcenavigator.initSN() def on_web2_activate(self, event): web2 = cuon.Web2.web2.web2window(self.allTables) def on_stats1_activate(self, event): import cuon.Stats.stats stats = cuon.Stats.stats.statswindow(self.allTables) def on_calendar_activate(self, event): ccal = cuon.Calendar.calendar.calendarwindow(self.allTables) def on_mindmap1_activate(self, event): import cuon.Think.think think = cuon.Think.think.thinkwindow(self.allTables) # Tools def on_addresses_notes_misc1_activate(self,event): dms = cuon.DMS.dms.dmswindow(self.allTables, self.MN['Forms_Address_Notes_Misc']) def on_addresses_notes_contacter1_activate(self,event): dms = cuon.DMS.dms.dmswindow(self.allTables, self.MN['Forms_Address_Notes_Contacter']) def on_addresses_notes_representant1_activate(self,event): dms = cuon.DMS.dms.dmswindow(self.allTables, self.MN['Forms_Address_Notes_Rep']) def on_addresses_notes_salesman1_activate(self,event): dms = cuon.DMS.dms.dmswindow(self.allTables, self.MN['Forms_Address_Notes_Salesman']) def on_update1_activate(self, event): self.updateVersion() def on_pref_user1_activate(self,event): prefs = cuon.Preferences.preferences.preferenceswindow(self.allTables) def on_prefs_finances_activate(self,event): prefs = cuon.PrefsFinance.prefsFinance.prefsFinancewindow(self.allTables) def on_report_generator1_activate(self, event): import cuon.ReportBuilder.report reps = cuon.ReportBuilder.report.report() def on_webshop1_activate(self,event): import cuon.WebShop.webshop print 'Webshop' prefs = cuon.WebShop.webshop.webshopwindow(self.allTables) def updateVersion(self): if self.startProgressBar(): self.generateSqlObjects() self.writeAllGladeFiles() self.stopProgressBar() def on_import_data1_activate(self, event): import cuon.Databases.import_generic1 imp1 = cuon.Databases.import_generic1.import_generic1(self.allTables) def on_ExportData_activate(self, event): print 'export Data' import cuon.Databases.export_generic1 exp1 = cuon.Databases.export_generic1.export_generic1(self.allTables) def on_sql_shell_activated(self, event): sqlw = cuon.SQL_Shell.sql_shell.sql_shell() def on_test1_activate(self, event): import cuon.VTK.mainLogo import cuon.VTK.test te = cuon.VTK.test.test() te.show() # Logs def on_logs_mail1_activate(self, event): import cuon.Editor.editor dicFile = {'TYPE':'FILE','NAME':os.path.normpath(self.td.cuon_path + '/' + 'cuonmail.log'),'Rights':'RO'} em = cuon.Editor.editor.editorwindow(dicFile) # help and info def on_about1_activate(self, event): about1 = self.getWidget('aCuon') about1.show() def on_onlinehelp_activate(self, event): import cuon.Help.help he1 = cuon.Help.help.helpwindow() # hide about-info def on_okAbout1_clicked(self, event): about1 = self.getWidget('aCuon') about1.hide() # extendet Menu def on_ext1_activate(self, event): print 'ext1 menu activated !!!!!' ext1 = eval(self.extMenucommand['ext1']) try: ext1.start() except: print 'No StartModule' def on_ext2_activate(self, event): print 'ext2 menu activated !!!!!' ext2 = eval(self.extMenucommand['ext2']) try: ext2.start() except: print 'No StartModule' def on_ext3_activate(self, event): print 'ext3 menu activated !!!!!' ext3 = eval(self.extMenucommand['ext3']) try: ext3.start() except: print 'No StartModule' def on_ext4_activate(self, event): print 'ext4 menu activated !!!!!' print self.extMenucommand['ext4'] ext4 = eval(self.extMenucommand['ext4']) try: ext4.start() except: print 'No StartModule ext4' def getNewClientSoftware(self, id): cuonpath = '..' self.infoMsg('C.U.O.N. will now try to load the new Clientversion. ') shellcommand = 'rm ' + cuonpath + '/newclient' liStatus = commands.getstatusoutput(shellcommand) print shellcommand, liStatus shellcommand = 'rm -R ' + cuonpath + '/iClient' liStatus = commands.getstatusoutput(shellcommand) print shellcommand, liStatus sc = cuon.Databases.SingleCuon.SingleCuon(self.allTables) sc.saveNewVersion(id) shellcommand = 'cd '+cuonpath+' ; tar -xvjf newclient' liStatus = commands.getstatusoutput(shellcommand) print shellcommand, liStatus #shellcommand = 'sh ' + cuonpath + '/iClient/iCuon ' #liStatus = commands.getstatusoutput(shellcommand) #print shellcommand, liStatus f = open('newversion','a') f.write(`self.Version`) f.close() # Plugins # Dia shellcommand = 'if [ ! -d ~/.dia/python ] ; then mkdir ~/.dia/python ; fi ' liStatus = commands.getstatusoutput(shellcommand) print shellcommand, liStatus shellcommand = 'cd ' + cuonpath +'/Plugins/Dia ; cp cuon_dia.py ~/.dia/python ' liStatus = commands.getstatusoutput(shellcommand) print shellcommand, liStatus self.infoMsg('Update complete. Please start C.U.O.N. new ') def startT0(self): try: print 'First T0' self.openDB() oUser = self.loadObject('User') self.closeDB() if oUser: #print 'T0 Client = ', oUser.client if oUser.client > 0: self.singleAddress = cuon.Addresses.SingleAddress.SingleAddress(self.allTables) self.singlePartner = cuon.Addresses.SinglePartner.SinglePartner(self.allTables) self.singleSchedul = cuon.Addresses.SingleScheduling.SingleScheduling(self.allTables) self.startTiming() except Exception, params: print Exception, params return True def startTiming(self): #'print start Timer' # 60*1000 = 1 minute time_contact = 2*60*1000 time_schedul = 15*60*1000 time_imap_dms = 30*60*1000 if self.t0: gobject.source_remove(self.t0) if self.t1: gobject.source_remove(self.t1) if self.t2: gobject.source_remove(self.t2) if self.t3: gobject.source_remove(self.t3) try: if not self.t1: self.startChecking() self.t1 = gobject.timeout_add(time_contact, self.startChecking) except Exception, params: print Exception, params try: if not self.t2: self.setSchedulTree() self.t2 = gobject.timeout_add(time_schedul,self.setSchedulTree) except Exception, params: print Exception, params try: if not self.t3: self.checkImapDMS() self.t3 = gobject.timeout_add(time_imap_dms,self.checkImapDMS) except Exception, params: print Exception, params def checkImapDMS(self): #print '######################################### EMail #########################' self.openDB() oUser = self.loadObject('User') self.closeDB() imapD = cuon.E_Mail.imap_dms.imap_dms(self.allTables, oUser.getDicUser()) imapD.run() #print '######################################### EMail END #########################' return True def startChecking(self): #gtk.gdk.threads_enter() try: #print 'start scheduling' #print self.Version self.openDB() oUser = self.loadObject('User') liSchedul = self.loadObject('Scheduling') self.closeDB() #print `self.oUser.getDicUser()` #print 'Client = ', oUser.getDicUser()['client'] liContacts = self.rpc.callRP('Address.getAllActiveContacts', oUser.getSqlDicUser()) #print liContacts try: if not liSchedul: liSchedul = [] for contacts in liContacts: ok = False for oldSchedul in liSchedul: if oldSchedul == contacts['id']: ok = True if not ok: cuon.Addresses.contact.contactwindow(self.allTables, contacts['address_id'], contacts['partner_id']) liSchedul.append(contacts['id']) except Exception, params: print Exception, params self.openDB() self.saveObject('Scheduling', liSchedul) self.closeDB() #cuon.Addresses.contact.contactwindow(self.allTables) finally: # print 'Ende' pass return True #gtk.gdk.threads_leave() #self.startTimer(10) def on_rbScheduls_activate(self, event): print 'rbScheduls clicked' self.setSchedulTree() def disconnectTree(self): try: self.getWidget('treeSchedul').get_selection().disconnect(self.connectTreeId) except: pass def connectTree(self): try: self.connectTreeId = self.getWidget('treeSchedul').get_selection().connect("changed", self.tree_select_callback) except: pass def tree_select_callback(self, treeSelection): listStore, iter = treeSelection.get_selected() print listStore,iter if listStore and len(listStore) > 0: row = listStore[0] else: row = -1 if iter != None: sNewId = listStore.get_value(iter, 0) print sNewId try: newID = int(sNewId[sNewId.find('###')+ 3:]) self.setDateValues(newID) except: pass #self.fillEntries(newId) def on_treeSchedul_row_activated(self, event): print 'event' self.on_bGotoAddress_clicked(event) def setSchedulTree(self): self.openDB() oUser = self.loadObject('User') self.closeDB() # Data sChoice = 'All' if self.getWidget('rbSchedulsNew').get_active(): sChoice = 'New' elif self.getWidget('rbSchedulsCancel').get_active(): sChoice = 'Cancel' elif self.getWidget('rbSchedulsActualWeek').get_active(): sChoice = 'actualWeek' print 'sChoice = ', sChoice liDates, newHash = self.rpc.callRP('Address.getAllActiveSchedul', oUser.getSqlDicUser(),'Name','All', sChoice, self.schedulHash1) #print 'lidates = ', liDates #print 'newHash = ', newHash if liDates == ['NO_NEW_DATA']: print 'liDates = no Data' return True # new data arrived, go on #liststore = gtk.ListStore(str) self.schedulHash1 = newHash self.disconnectTree() treeview = self.getWidget('treeSchedul') #treeview.set_model(liststore) #renderer = gtk.CellRendererText() #column = gtk.TreeViewColumn("Scheduls", renderer, text=0) #treeview.append_column(column) treestore = gtk.TreeStore(object) treestore = gtk.TreeStore(str) ## renderer = gtk.CellRendererText() ## ## column = gtk.TreeViewColumn("Zweite Spalte", renderer, text=0) ## treeview.append_column(column) treeview.set_model(treestore) print 'Schedul by names: ', liDates if liDates: lastRep = None lastSalesman = None Schedulname = None lastSchedulname = None iter = treestore.append(None,[_('Names')]) iter2 = None iter3 = None for oneDate in liDates: Schedulname = oneDate['schedul_name'] if lastSchedulname != Schedulname: lastSchedulname = Schedulname iter2 = treestore.insert_after(iter,None,[lastSchedulname]) sTime = self.getTimeString(oneDate['time_begin'] ) sTime2 = self.getTimeString(oneDate['time_end'] ) iter3 = treestore.insert_after(iter2,None,[oneDate['date'] +'--' + sTime + '-' +sTime2 +', ' + oneDate['a_lastname'] + ', ' + oneDate['a_city'] + ' ###' + `oneDate['id']`]) ## try: ## iter = treestore.append(None,['Names']) ## iter2 = treestore.insert_after(iter,None,['jhamel']) ## iter3 = treestore.insert_after(iter2,None,['termin1']) ## iter = treestore.append(None,['Scheduls']) ## iter2 = treestore.insert_after(iter,None,['date']) ## iter3 = treestore.insert_after(iter2,None,['termin1']) ## except Exception,params: ## print Exception,params ## #liDates, self.schedulHash2 = self.rpc.callRP('Address.getAllActiveSchedul', oUser.getSqlDicUser(),'Schedul','All',sChoice) #liTest.sort(key=(lambda x: (x['test1'], lambda x: x['testA']) )) liDates.sort(key=(lambda x: (x['date_norm'], x['schedul_name'], x['time_begin'] )), reverse = True) #print 'Schedul by schedul_date 2 : ', liDates if liDates: lastRep = None lastSalesman = None Schedulname = None lastSchedulname = None iter = treestore.append(None,[_('Schedul')]) iter2 = None iter3 = None for oneDate in liDates: Schedulname = oneDate['date'] if lastSchedulname != Schedulname: lastSchedulname = Schedulname iter2 = treestore.insert_after(iter,None,[lastSchedulname]) sTime = self.getTimeString(oneDate['time_begin'] ) sTime2 = self.getTimeString(oneDate['time_end'] ) iter3 = treestore.insert_after(iter2,None,[oneDate['schedul_name'] +'--' + sTime + '-' +sTime2 +', ' + oneDate['a_lastname'] + ', ' + oneDate['a_city'] +' ###' + `oneDate['id']`]) # reps and Saleman # #liDates, self.schedulHash3 = self.rpc.callRP('Address.getAllActiveSchedul', oUser.getSqlDicUser(),'rep_salesman','All', sChoice) #liDates.sort(key=(lambda x: (x['date_norm'], lambda x: x['rep_lastname'], lambda x: x['salesman_lastname'], lambda x: x['date_norm'])), reverse = True) liDates.sort(key=(lambda x: (x['salesman_lastname'], x['rep_lastname'], x['date_norm']) ), reverse = True) #print 'Schedul by names: 3', liDates if liDates and liDates not in ['NONE']: lastRep = None lastSalesman = None Schedulname = None lastSchedulname = None iter = treestore.append(None,[_('Salesman')]) iter2 = None iter3 = None for oneDate in liDates: Schedulname = oneDate['schedul_name'] if lastSchedulname != Schedulname: lastSchedulname = Schedulname iter2 = treestore.insert_after(iter,None,[lastSchedulname]) sTime = self.getTimeString(oneDate['time_begin'] ) sTime2 = self.getTimeString(oneDate['time_end'] ) iter3 = treestore.insert_after(iter2,None,[oneDate['date'] +'--' + sTime + '-' +sTime2 +', ' + oneDate['a_lastname'] + ', ' + oneDate['a_city'] + ' ###' + `oneDate['id']`]) treeview.show() self.connectTree() return True def setDateValues(self, id): widgetTVAddress = self.getWidget('tvAddress') widgetTVPartner = self.getWidget('tvPartner') widgetEShortRemark = self.getWidget('eShortRemark') widgetTvEvent = self.getWidget('tvEvent') self.singleSchedul.load(id) partnerid = self.singleSchedul.getPartnerID() self.singlePartner.load(partnerid) addressid = self.singlePartner.getAddressID() self.singleAddress.load(addressid) print partnerid print addressid s = self.singleSchedul.getShortRemark() print 's=', s if s: widgetEShortRemark.set_text(s) else: widgetEShortRemark.set_text('') s = self.singleSchedul.getNotes() print 's=', s if s: self.add2Textbuffer(widgetTvEvent,s,'Overwrite') else: self.add2Textbuffer(widgetTvEvent,' ','Overwrite') s = self.singleAddress.getMailAddress() if s: self.add2Textbuffer(widgetTVAddress,s,'Overwrite') else: self.add2Textbuffer(widgetTVAddress,' ','Overwrite') s = self.singlePartner.getMailAddress() if s: self.add2Textbuffer(widgetTVPartner,s,'Overwrite') else: self.add2Textbuffer(widgetTVPartner,' ','Overwrite') def on_bGotoAddress_clicked(self, event): if self.singleAddress.ID > 0: adr = cuon.Addresses.addresses.addresswindow(self.allTables, addrid = self.singleAddress.ID) def on_bChat_clicked(self, event): print self.dicUser['Communications'] if self.dicUser['Communications']['textChat'] and self.dicUser['Communications']['textChat'] != 'OWN': shellcommand = shlex.split(self.dicUser['Communications']['textChat'] ) liStatus = subprocess.Popen(shellcommand) #liStatus = commands.getstatusoutput(shellcommand) print shellcommand, liStatus else: import cuon.Chat.chat cw = cuon.Chat.chat.chatWindow() def on_b3DChat_clicked(self, event): import cuon.Chat.world wo = cuon.Chat.world.panda_chat() def on_bEmail_clicked(self, event): shellcommand = shlex.split(self.dicUser['Communications']['emailPrg']) liStatus = subprocess.Popen(shellcommand) #liStatus = commands.getstatusoutput(shellcommand) print shellcommand, liStatus #def startTimer(self, seconds): # self.t1 = threading.Timer(seconds, self.startChecking) # self.t1.start() def on_eClient_changed(self, event): ''' client ID changed ''' try: dt = cuon.DMS.documentTools.documentTools() sFile = dt.load_mainwindow_logo(self.allTables) if sFile: print "image found" logo = self.getWidget("company_logo") # # newIm = Image.fromstring('RGB',[1024, 1024], bz2.decompress( image)) # newIm.thumbnail([208,208]) # sFile = self.dicUser['prefPath']['tmp'] + 'cuon_mainwindow_logo.png' # save(sFile) print 'sFile = ', sFile pixbuf = gtk.gdk.pixbuf_new_from_file(sFile) scaled_buf = pixbuf.scale_simple(208,208,gtk.gdk.INTERP_BILINEAR) logo.set_from_pixbuf(scaled_buf) logo.show() except: pass #logo.set_from_file(sFile) # now start the cash_desk, if it is a cash_desk user try: print "#############################################################################" print "eClient changed" print "User params = ", self.oUser.Params print "#############################################################################" for sCash in ["startCash1","startCash2","startCash3"]: print sCash, self.oUser.Params[sCash] if self.oUser.Params[sCash]: self.on_order1_activate(None) break except Exception, params: print Exception, params def on_onlineNews_activate(self, event): self.winNews.remove(self.swMap) self.webview = None self.webview = webkit.WebView() self.viewMap = gtk.Viewport() self.swMap = gtk.ScrolledWindow() self.viewMap.add(self.webview) self.swMap.add(self.viewMap) self.winNews.add(self.swMap) print 'mapmoz = ', self.webview if self.webview: if self.dicUser['Locales'].lower() == 'de': sUrl = 'http://cuon.org/Cuon/news.html' else: sUrl = 'http://cuon.org/en_Cuon/news.html' print sUrl self.webview.open(sUrl) #self.webview.set_size_request(816,600) self.viewMap.show() self.swMap.show() self.webview.show() self.winNews.show_all() def closeOnlineNews(self, event, data=None): self.winNews.hide() def startMain(self, sStartType, sDebug,sLocal='NO', Username='EMPTY', PASSWORD='Test', ClientID=0, dicParams = {}): #ML = cuon.VTK.mainLogo.mainLogo() #ML.startLogo() self.dicParams = dicParams self.ClientID = ClientID if sDebug: self.sDebug = sDebug else: self.sDebug = 'NO' if sStartType == 'server': print 'Server-Modus' td = typedefs_server() # create widget tree ... self.gladeName = '/usr/share/cuon/glade/cuon.glade2' self.loadGladeFile(self.gladeName) else: id, version = self.rpc.callRP('Database.getLastVersion') print 'Version', version print 'id', id ## self.openDB() ## version = self.loadObject('ProgramVersion') ## self.closeDB() ## print 'Version:' + str(version) print self.Version['Major'], version['Major'] print self.Version['Minor'], version['Minor'] print self.Version['Rev'], version['Rev'] print self.Version, version self.openDB() oUser = self.loadObject('User') if not oUser: oUser = cuon.User.user.User() oUser.client = 0 self.saveObject('User',oUser) self.closeDB() if not version: print 'no Version, please inform Cuon-Administrator' sys.exit(0) if self.rpc.callRP('Database.checkVersion', self.Version, version) == 'Wrong': print ' ungleiche Versionen' print 'load new version of pyCuon' self.getNewClientSoftware(id) cuonpath = '..' shellcommand = 'rm ' + cuonpath + '/cuonObjects' liStatus = commands.getstatusoutput(shellcommand) print shellcommand, liStatus self.openDB() version = self.saveObject('newClientVersion',True) self.closeDB() sys.exit(0) newClientExist = self.loadObject('newClientVersion') if newClientExist: self.updateVersion() self.openDB() self.saveObject('ProgramVersion', self.Version) version = self.saveObject('newClientVersion',False) self.closeDB() version = self.rpc.callRP('Database.getLastVersion') print 'Version', version if sLocal != 'NO' and self.rpc.callRP('Database.checkVersion', self.Version, version[1]) == 'Wrong': self.getNewClientSoftware(id) sys.exit(0) # create widget tree ... # self.gladeName = td.main_glade_name self.loadGlade('cuon.xml','window1') self.win1 = self.getWidget("window1") self.win1.connect("delete_event", self.delete_event) self.win1.connect("destroy", self.destroy) # Online news self.winNews= self.getWidget('OnlineNews') self.winNews.connect("delete_event", self.closeOnlineNews) self.swMap = self.getWidget('swOnlineNews') # Menu-items self.initMenuItemsMain() self.disableAllMenuItems() self.addEnabledMenuItems('login','logout1') self.addEnabledMenuItems('login','data') self.addEnabledMenuItems('login','action1') self.addEnabledMenuItems('login','accounting1') self.addEnabledMenuItems('login','extras') self.addEnabledMenuItems('login','tools') self.addEnabledMenuItems('serverMode','databases1') self.addEnabledMenuItems('user','login1') self.addEnabledMenuItems('user','tools') self.addEnabledMenuItems('user','update1') self.disableMenuItem('login') self.disableMenuItem('serverMode') self.enableMenuItem('user') sTitle = self.getWidget('window1').get_title() + self.sTitle self.setTitle('window1',sTitle) self.openDB() oUser = self.loadObject('User') if not oUser: oUser = cuon.User.user.User() oUser.client = 0 self.saveObject('User',oUser) self.saveObject('Scheduling', []) self.closeDB() # set initial columns treeview = self.getWidget('treeSchedul') #treeview.set_model(liststore) renderer = gtk.CellRendererText() column = gtk.TreeViewColumn("Scheduls", renderer, text=0) treeview.append_column(column) self.t0 = gobject.timeout_add(2000, self.startT0) if Username != "empty": print "Username = ", Username self.activateClick("login1") def gtk_main_quit(self): if self.t1: gobject.source_remove(self.t1) gtk.main_quit() #gtk.gdk.threads_init() def getConfigOption(cpParser, section, option): value = None if cpParser.has_option(section,option): value = cpParser.get(section, option) print 'getConfigOption', section + ', ' + option + ' = ' + value if not value: value = " " return value sStartType = 'client' sDebug = 'NO' sLocal = 'NO' print sys.argv # Args: # 1 server http://host:post # 2 client/server # 3 Debug = ON/OFF # 4 Path to Locale/ default # 5 cuon_path try: cpParser = ConfigParser.ConfigParser() sFile = 'cuon.ini' f = open(sFile) print "open ini File = ", f cpParser.readfp(f) f.close() except Exception, param: print "oooops - no ini File" print Exception,param Description = None WorkingDir= 'NO' Host = None Port = None Proto = None sStartType = 'client' sLocal = 'NO' sDebug = 'NO' AlternateGui = 'LINUX-Standard' Username = "EMPTY" PASSWORD = "TEST" ClientID = 0 ReportUser ="EMPTY" SSH_PORT = 22 sSect = 'Client' Description = getConfigOption(cpParser, sSect,'DESCRIPTION') WorkingDir = getConfigOption(cpParser, sSect,'WORKINGDIR') Host = getConfigOption(cpParser, sSect,'HOST') Port = getConfigOption(cpParser, sSect,'PORT') Proto = getConfigOption(cpParser, sSect,'PROTOCOL') sStartType = getConfigOption(cpParser, sSect,'TYPE') sLocal = WorkingDir + getConfigOption(cpParser, sSect,'LOCALE') sDebug = getConfigOption(cpParser, sSect,'DEBUG') AlternateGui = getConfigOption(cpParser, sSect,'ALTERNATEGUI') Username = getConfigOption(cpParser, sSect, "USERNAME").strip() PASSWORD = getConfigOption(cpParser, sSect, "PASSWORD").strip() ReportUser = getConfigOption(cpParser, sSect, "REPORTUSER").strip() startCash1 = getConfigOption(cpParser, sSect, "START_CASH1").strip() startCash2 = getConfigOption(cpParser, sSect, "START_CASH2").strip() startCash3 = getConfigOption(cpParser, sSect, "START_CASH3").strip() cash_user_shortkey = getConfigOption(cpParser, sSect, "CASH_KEY").strip() cash_user_pincode = getConfigOption(cpParser, sSect, "CASH_PINCODE").strip() ipc_port1 = getConfigOption(cpParser, sSect, "IPC_PORT1").strip() ipc_list = getConfigOption(cpParser, sSect, "IPC_LIST").strip() print "startcash 1, 2, 3 = ", startCash1, startCash2, startCash3 if not startCash1: startCash1 = "NO" if not startCash2: startCash2 = "NO" if not startCash3: startCash3 = "NO" if not ipc_list: ipc_list = [] else: ipc_list = ipc_list.split(",") if not ipc_port1: ipc_port1 = 0 else: ipc_port1 = int(ipc_port1) dicParams = {} if startCash1.upper() == "YES": dicParams["startCash1"] = True else: dicParams["startCash1"] = False if startCash2.upper() == "YES": dicParams["startCash2"] = True else: dicParams["startCash2"] = False if startCash3.upper() == "YES": dicParams["startCash3"] = True else: dicParams["startCash3"] = False dicParams["cash_user_shortkey"] = cash_user_shortkey dicParams["cash_user_pincode"] = cash_user_pincode print "dicParams = ", dicParams try: ClientID = int(getConfigOption(cpParser, sSect, "CLIENT_ID")) except: ClientID = 0 try: SSH_PORT = int(getConfigOption(cpParser, sSect, "SSH_PORT")) except: SSH_PORT = 22 if not Username or not PASSWORD or not ClientID: Username = "EMPTY" PASSWORD = "TEST" ClientID = 0 if not ReportUser or not SSH_PORT: ReportUser ="EMPTY" SSH_PORT = 22 print "AlternateGui = ", AlternateGui if not AlternateGui or AlternateGui == 'NO' : AlternateGui = 'LINUX-Standard' if len(sys.argv) > 4: if len(sys.argv[4]) > 1: sLocal = sys.argv[4] if len(sys.argv) > 3: if len(sys.argv[3]) > 1: sDebug = sys.argv[3] if len(sys.argv) > 2: if len(sys.argv[2]) > 1: sStartType = sys.argv[2] print sStartType if sStartType == 'server': td = cuon.TypeDefs.typedefs_server.typedefs_server() else: td = cuon.TypeDefs.typedefs.typedefs() td.SystemName = AlternateGui td.cuon_path = WorkingDir td.SSH_PORT = SSH_PORT td.ReportUser = ReportUser td.ipc_port1 = ipc_port1 td.ipc_list = ipc_list td.server = Proto.strip() +'://' + Host.strip() +':' + Port.strip() if len(sys.argv) > 1: if len(sys.argv[1]) > 1: if sys.argv[1] != 'NO': td.server = sys.argv[1] print 'td-server =', td.server if len(sys.argv) > 5: if len(sys.argv[5]) > 1: if sys.argv[5] != 'NO': td.cuon_path = sys.argv[5] WorkingDir = td.cuon_path print 'td.cuon_path =', td.cuon_path if len(sys.argv) > 6: if len(sys.argv[6]) > 1: if sys.argv[6] != 'NO': td.SystemName = sys.argv[6] print 'td.System =', td.SystemName else: td.SystemName = 'LINUX-Standard' else: td.SystemName = 'LINUX-Standard' else: td.SystemName = AlternateGui print 'now -> ', td.SystemName if len(sys.argv) > 7: if len(sys.argv[7]) > 1: Username = sys.argv[7] print 'Username =', Username print len(sys.argv) if len(sys.argv) > 8: if len(sys.argv[8]) > 1: PASSWORD = sys.argv[8] print 'password =', PASSWORD if len(sys.argv) > 9: if len(sys.argv[9]) > 0: ClientID = int(sys.argv[9].strip()) print 'clientID =', ClientID # set some pathes try: print 'WorkingDir', WorkingDir if not os.path.exists(WorkingDir + '/cuon_data'): print 'make dir cuon_data' os.mkdir(WorkingDir +'/cuon_data') if not os.path.exists(WorkingDir +'/cuon_data/dms'): print 'make dir cuon_data/dms' os.mkdir(WorkingDir +'/cuon_data/dms') if not os.path.exists(WorkingDir +'/cuon_data/import'): print 'make dir cuon_data/dms' os.mkdir(WorkingDir +'/cuon_data/import') if not os.path.exists(WorkingDir +'/cuon_data/export'): print 'make dir cuon_data/dms' os.mkdir(WorkingDir +'/cuon_data/export') except Exception, params: print Exception, params os.environ['CUON_PATH'] = td.cuon_path td.ClinetID = ClientID td.saveTD() if sLocal == 'NO': DIR = '/usr/share/locale' else: DIR = sLocal #locale.setlocale (locale.LC_ALL, '') APP = 'cuon' try: locale.bindtextdomain(APP,DIR) locale.textdomain(APP) except: gettext.bindtextdomain (APP, DIR) gettext.textdomain (APP) gettext.install (APP, DIR, unicode=1) try: Gtk.glade.bindtextdomain(APP,DIR) Gtk.glade.textdomain(APP) except: pass print _('Debug by C.U.O.N. = ' ), sDebug # ######################################################################################################################## #checkProfile = True # #def heapy_profile(frame, event, arg): # currtime = time.time() # if currtime - _last_log_time < 1: # return # _last_log_time = currtime # code = frame.f_code # filename = code.co_filename # lineno = code.co_firstlineno # idset = hpy().heap() # logfile.write('%s %s:%s\n%s\n\n' % (currtime, filename, lineno, idset)) # logfile.flush() # #if checkProfile: # atexit.register(_logfile.close) # sys.settrace(heapy_profile) ######################################################################################################################## m = MainWindow(sStartType) m.startMain(sStartType, sDebug,sLocal, Username, PASSWORD, ClientID, dicParams) #profile.run('m.startMain(sStartType, sDebug,sLocal)','cuonprofile') # Import Psyco if available #try: # import psyco #psyco.full() #print ' start psyco' #except ImportError: # print 'no psyco found' #gtk.gdk.threads_enter() gtk.main() #gtk.gdk.threads_leave() #gtk.main()
codeparrot/github-code-clean
#!/usr/bin/python # -*- coding: utf8 -*- # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. """Módulo para obtener código de operación electrónico (COE) para Liquidación Primaria Electrónica de Granos del web service WSLPG de AFIP """ __author__ = "Mariano Reingart <reingart@gmail.com>" __copyright__ = "Copyright (C) 2013-2015 Mariano Reingart" __license__ = "GPL 3.0" __version__ = "1.27h" LICENCIA = """ wslpg.py: Interfaz para generar Código de Operación Electrónica para Liquidación Primaria de Granos (LpgService) Copyright (C) 2013-2015 Mariano Reingart reingart@gmail.com http://www.sistemasagiles.com.ar/trac/wiki/LiquidacionPrimariaGranos Este progarma es software libre, se entrega ABSOLUTAMENTE SIN GARANTIA y es bienvenido a redistribuirlo respetando la licencia GPLv3. Para información adicional sobre garantía, soporte técnico comercial e incorporación/distribución en programas propietarios ver PyAfipWs: http://www.sistemasagiles.com.ar/trac/wiki/PyAfipWs """ AYUDA=""" Opciones: --ayuda: este mensaje --debug: modo depuración (detalla y confirma las operaciones) --formato: muestra el formato de los archivos de entrada/salida --prueba: genera y autoriza una liquidación de prueba (no usar en producción!) --xml: almacena los requerimientos y respuestas XML (depuración) --dbf: utilizar tablas DBF (xBase) para los archivos de intercambio --json: utilizar formato json para el archivo de intercambio --dummy: consulta estado de servidores --autorizar: Autorizar Liquidación Primaria de Granos (liquidacionAutorizar) --ajustar: Ajustar Liquidación Primaria de Granos (liquidacionAjustar) --anular: Anular una Liquidación Primaria de Granos (liquidacionAnular) --consultar: Consulta una liquidación (parámetros: nro de orden y COE) --ult: Consulta el último número de orden registrado en AFIP (liquidacionUltimoNroOrdenConsultar) --pdf: genera el formulario C 1116 B en formato PDF --mostrar: muestra el documento PDF generado (usar con --pdf) --imprimir: imprime el documento PDF generado (usar con --mostrar y --pdf) --autorizar-lsg: Autoriza una Liquidación Secundaria de Granos (lsgAutorizar) --lsg --anular: Anula una LSG (lsgAnular) --lsg --consular: Consulta una LSG por pto_emision, nro_orden o COE --lsg --ult: Consulta el último Nº LSG emitida (lsgConsultarUltimoNroOrden) --lsg --asociar: Asocia una liq. sec. a un contrato (lsgAsociarAContrato) --ajustar-lsg: Ajusta una liquidación secundaria (lsgAjustar por COE/Contrato) --autorizar-cg: Autorizar Certificación de Granos (cgAutorizar) --cg --anular: Solicita anulación de un CG (cgSolicitarAnulacion) --cg --consultar: Consulta una CG por pto_emision, nro_orden o COE --cg --ult: Consulta el último Nº LSG emitida (cgConsultarUltimoNroOrden) --informar-calidad: Informa la calidad de una CG (cgInformarCalidad) --buscar-ctg: devuelve los datos de la CTG a certificar espera tipo_certificado, cuit_depositante, nro_planta, cod_grano, campania --buscar-cert-con-saldo-disp: CG disponible para liquidar/retirar/transferir espera cuit_depositante, cod_grano, campania, coe fecha_emision_des/has --provincias: obtiene el listado de provincias --localidades: obtiene el listado de localidades por provincia --tipograno: obtiene el listado de los tipos de granos disponibles --campanias: obtiene el listado de las campañas --gradoref: obtiene el listado de los grados de referencias --gradoent: obtiene el listado de los grados y valores entregados --certdeposito: obtiene el listado de los tipos de certificados de depósito --deducciones: obtiene el listado de los tipos de deducciones --retenciones: obtiene el listado de los tipos de retenciones --puertos: obtiene el listado de los puertos habilitados --actividades: obtiene el listado de las actividades habilitados --actividadesrep: devuelve las actividades en las que emisor/representado se encuentra inscripto en RUOCA --operaciones: obtiene el listado de las operaciones para el representado Ver wslpg.ini para parámetros de configuración (URL, certificados, etc.)" """ import os, sys, shelve import decimal, datetime import traceback import pprint from pysimplesoap.client import SoapFault from fpdf import Template import utils # importo funciones compartidas: from utils import leer, escribir, leer_dbf, guardar_dbf, N, A, I, json, BaseWS, inicializar_y_capturar_excepciones, get_install_dir WSDL = "https://fwshomo.afip.gov.ar/wslpg/LpgService?wsdl" #WSDL = "https://serviciosjava.afip.gob.ar/wslpg/LpgService?wsdl" #WSDL = "file:wslpg.wsdl" DEBUG = False XML = False CONFIG_FILE = "wslpg.ini" HOMO = False # definición del formato del archivo de intercambio: ENCABEZADO = [ ('tipo_reg', 1, A), # 0: encabezado liquidación ('nro_orden', 18, N), ('cuit_comprador', 11, N), ('nro_act_comprador', 5, N), ('nro_ing_bruto_comprador', 15, N), ('cod_tipo_operacion', 2, N), ('es_liquidacion_propia', 1, A), # S o N ('es_canje', 1, A), # S o N ('cod_puerto', 4, N), ('des_puerto_localidad', 240, A), ('cod_grano', 3, N), ('cuit_vendedor', 11, N), ('nro_ing_bruto_vendedor', 15, N), ('actua_corredor', 1, A), # S o N ('liquida_corredor', 1, A), # S o N ('cuit_corredor', 11, N), ('nro_ing_bruto_corredor', 15, N), ('comision_corredor', 5, I, 2), # 3.2 ('fecha_precio_operacion', 10, A), # 26/02/2013 ('precio_ref_tn', 8, I, 3), # 4.3 ('cod_grado_ref', 2, A), ('cod_grado_ent', 2, A), ('factor_ent', 6, I, 3), # 3.3 ('precio_flete_tn', 7, I, 2), # 5.2 ('cont_proteico', 6, I, 3), # 3.3 ('alic_iva_operacion', 5, I, 2), # 3.2 ('campania_ppal', 4, N), ('cod_localidad_procedencia', 6, N), ('reservado1', 200, A), # datos_adicionales (compatibilidad hacia atras) ('coe', 12, N), ('coe_ajustado', 12, N), ('estado', 2, A), ('total_deduccion', 17, I, 2), # 17.2 ('total_retencion', 17, I, 2), # 17.2 ('total_retencion_afip', 17, I, 2), # 17.2 ('total_otras_retenciones', 17, I, 2), # 17.2 ('total_neto_a_pagar', 17, I, 2), # 17.2 ('total_iva_rg_2300_07', 17, I, 2), # 17.2 ('total_pago_segun_condicion', 17, I, 2), # 17.2 ('fecha_liquidacion', 10, A), ('nro_op_comercial', 10, N), ('precio_operacion', 17, I, 3), # 17.3 ('subtotal', 17, I, 2), # 17.2 ('importe_iva', 17, I, 2), # 17.2 ('operacion_con_iva', 17, I, 2), # 17.2 ('total_peso_neto', 8, N), # 17.2 # Campos WSLPGv1.1: ('pto_emision', 4, N), ('cod_prov_procedencia', 2, N), ('peso_neto_sin_certificado', 8, N), ('cod_tipo_ajuste', 2, N), ('val_grado_ent', 4, I, 3), # 1.3 # Campos WSLPGv1.3: ('cod_prov_procedencia_sin_certificado', 2, N), ('cod_localidad_procedencia_sin_certificado', 6, N), # Campos WSLPGv1.4 (ajustes): ('nro_contrato', 15, N), ('tipo_formulario', 2, N), ('nro_formulario', 12, N), # datos devuetos: ('total_iva_10_5', 17, I, 2), # 17.2 ('total_iva_21', 17, I, 2), # 17.2 ('total_retenciones_ganancias', 17, I, 2), # 17.2 ('total_retenciones_iva', 17, I, 2), # 17.2 ('datos_adicionales', 400, A), # max 400 desde WSLPGv1.2 # Campos agregados WSLPGv1.5 (ajustes): ('iva_deducciones', 17, I, 2), # 17.2 ('subtotal_deb_cred', 17, I, 2), # 17.2 ('total_base_deducciones', 17, I, 2), # 17.2 # Campos agregados WSLPGv1.6 (liquidación secundaria base): ('cantidad_tn', 11, I, 3), # 8.3 ('nro_act_vendedor', 5, N), # Campos agregados WSLPGv1.9 (liquidación secundaria base): ('total_deducciones', 19, I , 2), ('total_percepciones', 19, I , 2), ] CERTIFICADO = [ ('tipo_reg', 1, A), # 1: Certificado ('reservado1', 2, N), # en WSLPGv1.7 se amplio el campo ('nro_certificado_deposito', 12, N), ('peso_neto', 8, N), ('cod_localidad_procedencia', 6, N), ('cod_prov_procedencia', 2, N), ('reservado', 2, N), ('campania', 4, N), ('fecha_cierre', 10, A), ('peso_neto_total_certificado', 8, N), # para ajuste unificado (WSLPGv1.4) ('coe_certificado_deposito', 12, N), # para certificacion (WSLPGv1.6) ('tipo_certificado_deposito', 3, N), # wSLPGv1.7 agrega valor 332 ] RETENCION = [ ('tipo_reg', 1, A), # 2: Retencion ('codigo_concepto', 2, A), ('detalle_aclaratorio', 30, A), ('base_calculo', 10, I, 2), # 8.2 ('alicuota', 6, I, 2), # 3.2 ('nro_certificado_retencion', 14, N), ('fecha_certificado_retencion', 10, A), ('importe_certificado_retencion', 17, I, 2), # 17.2 ('importe_retencion', 17, I, 2), # 17.2 ] DEDUCCION = [ ('tipo_reg', 1, A), # 3: Deducción ('codigo_concepto', 2, A), ('detalle_aclaratorio', 30, A), # max 50 por WSLPGv1.2 ('dias_almacenaje', 4, N), ('reservado1', 6, I, 3), ('comision_gastos_adm', 5, I, 2), # 3.2 ('base_calculo', 10, I, 2), # 8.2 ('alicuota', 6, I, 2), # 3.2 ('importe_iva', 17, I, 2), # 17.2 ('importe_deduccion', 17, I, 2), # 17.2 ('precio_pkg_diario', 11, I, 8), # 3.8, ajustado WSLPGv1.2 ] PERCEPCION = [ ('tipo_reg', 1, A), # P: Percepcion ('detalle_aclaratoria', 50, A), # max 50 por WSLPGv1.8 ('base_calculo', 10, I, 2), # 8.2 ('alicuota', 6, I, 2), # 3.2 ] OPCIONAL = [ ('tipo_reg', 1, A), # O: Opcional ('codigo', 50, A), ('descripcion', 250, A), ] AJUSTE = [ ('tipo_reg', 1, A), # 4: ajuste débito / 5: crédito (WSLPGv1.4) ('concepto_importe_iva_0', 20, A), ('importe_ajustar_iva_0', 15, I, 2), # 11.2 ('concepto_importe_iva_105', 20, A), ('importe_ajustar_iva_105', 15, I, 2), # 11.2 ('concepto_importe_iva_21', 20, A), ('importe_ajustar_iva_21', 15, I, 2), # 11.2 ('diferencia_peso_neto', 8, N), ('diferencia_precio_operacion', 17, I, 3), # 17.3 ('cod_grado', 2, A), ('val_grado', 4, I, 3), # 1.3 ('factor', 6, I, 3), # 3.3 ('diferencia_precio_flete_tn', 7, I, 2), # 5.2 ('datos_adicionales', 400, A), # datos devueltos: ('fecha_liquidacion', 10, A), ('nro_op_comercial', 10, N), ('precio_operacion', 17, I, 3), # 17.3 ('subtotal', 17, I, 2), # 17.2 ('importe_iva', 17, I, 2), # 17.2 ('operacion_con_iva', 17, I, 2), # 17.2 ('total_peso_neto', 8, N), # 17.2 ('total_deduccion', 17, I, 2), # 17.2 ('total_retencion', 17, I, 2), # 17.2 ('total_retencion_afip', 17, I, 2), # 17.2 ('total_otras_retenciones', 17, I, 2), # 17.2 ('total_neto_a_pagar', 17, I, 2), # 17.2 ('total_iva_rg_2300_07', 17, I, 2), # 17.2 ('total_pago_segun_condicion', 17, I, 2), # 17.2 ('iva_calculado_iva_0', 15, I, 2), # 15.2 ('iva_calculado_iva_105', 15, I, 2), # 15.2 ('iva_calculado_iva_21', 15, I, 2), # 15.2 ] CERTIFICACION = [ ('tipo_reg', 1, A), # 7: encabezado certificación # campos de la cabecera para todas las certificaciones (WSLPGv1.6) ('pto_emision', 4, N), ('nro_orden', 8, N), ('tipo_certificado', 1, A), # P:Primaria,R:Retiro,T:Transferencia,E:Preexistente ('nro_planta', 6, N), ('nro_ing_bruto_depositario', 15, N), ('titular_grano', 1, A), # "P" (Propio) "T" (Tercero) ('cuit_depositante', 11, N), # obligatorio si titular_grano es T ('nro_ing_bruto_depositante', 15, N), ('cuit_corredor', 11, N), ('cod_grano', 3, N), ('campania', 4, N), ('datos_adicionales', 400, A), ('reservado1', 14, A), # reservado para futuros campos (no usar) # campos para CgAutorizarPrimariaType ex-cgAutorizarDeposito (WSLPGv1.6-1.8) ('nro_act_depositario', 5, N), # nuevo WSLPGv1.8 tambien R/T ('descripcion_tipo_grano', 20, A), ('monto_almacenaje', 10, I, 2), ('monto_acarreo', 10, I, 2), ('monto_gastos_generales', 10, I, 2), ('monto_zarandeo', 10, I, 2), ('porcentaje_secado_de', 5, I, 2), ('porcentaje_secado_a', 5, I, 2), ('monto_secado', 10, I, 2), ('monto_por_cada_punto_exceso', 10, I, 2), ('monto_otros', 10, I, 2), ('reservado_calidad', 35, A), # ver subestructura WSLPGv1.10 ('peso_neto_merma_volatil', 10, I , 2), ('porcentaje_merma_secado', 5, I, 2), ('peso_neto_merma_secado', 10, I, 2), ('porcentaje_merma_zarandeo', 5, I, 2), ('peso_neto_merma_zarandeo', 10, I, 2), ('peso_neto_certificado', 10, I, 2), # WSLPGv1.9 2 decimales! ('servicios_secado', 8, I, 3), ('servicios_zarandeo', 8, I, 3), ('servicios_otros', 7, I, 3), ('servicios_forma_de_pago', 20, A), # campos para cgAutorizarRetiroTransferencia (WSLPGv1.6): ('cuit_receptor', 11, N), ('fecha', 10, A), # no usado WSLPGv1.8 ('nro_carta_porte_a_utilizar', 9, N), # obligatorio para retiro ('cee_carta_porte_a_utilizar', 14, N), # no usado WSLPGv1.8 # para cgAutorizarPreexistente (WSLPGv1.6): ('tipo_certificado_deposito_preexistente', 1, N), # "R": Retiro "T": Tra. ('nro_certificado_deposito_preexistente', 12, N), ('cac_certificado_deposito_preexistente', 14, N), # cambio WSLPGv1.8 ('fecha_emision_certificado_deposito_preexistente', 10, A), ('peso_neto', 8, N), # nro_planta definido previamente - agregado WSLPGv1.8 # datos devueltos por el webservice: ('reservado2', 183, N), # padding para futuros campos (no usar) ('coe', 12, N), ('fecha_certificacion', 10, A), ('estado', 2, A), ('reservado3', 101, A), # padding para futuros campos (no usar) # otros campos devueltos (opcionales) # 'pesosResumen' ('peso_bruto_certificado', 10, I , 2), ('peso_merma_secado', 10, I , 2), ('peso_merma_zarandeo', 10, I , 2), # peso_neto_certificado definido arriba # serviciosResumen ('importe_iva', 10, I , 2), ('servicio_gastos_generales', 10, I , 2), ('servicio_otros', 10, I , 2), ('servicio_total', 10, I , 2), ('servicio_zarandeo', 10, I , 2), # planta ('cuit_titular_planta', 11, N), ('razon_social_titular_planta', 11, A), ] CTG = [ # para cgAutorizarDeposito (WSLPGv1.6) ('tipo_reg', 1, A), # C: CTG ('nro_ctg', 8, N), ('nro_carta_porte', 9, N), ('porcentaje_secado_humedad', 5, I, 2), ('importe_secado', 10, I, 2), ('peso_neto_merma_secado', 10, I, 2), ('tarifa_secado', 10, I, 2), ('importe_zarandeo', 10, I, 2), ('peso_neto_merma_zarandeo', 10, I, 2), ('tarifa_zarandeo', 10, I, 2), ('peso_neto_confirmado_definitivo', 10, I, 2), ] DET_MUESTRA_ANALISIS = [ # para cgAutorizarDeposito (WSLPGv1.6) ('tipo_reg', 1, A), # D: detalle muestra analisis ('descripcion_rubro', 400, A), ('tipo_rubro', 1, A), # "B" (Bonificación) y "R" (Rebaja) ('porcentaje', 5, I, 2), ('valor', 5, I, 2), ] CALIDAD = [ # para cgAutorizar y cgInformarCalidad (WSLPGv1.10) ('tipo_reg', 1, A), # Q: caldiad ('analisis_muestra', 10, N), ('nro_boletin', 10, N), ('cod_grado', 2, A), # nuevo WSLPGv1.10: G1 G2 .... ('valor_grado', 4, I, 3), # solo para cod_grado F1 F2 ... ('valor_contenido_proteico', 5, I, 3), ('valor_factor', 6, I, 3), ] FACTURA_PAPEL = [ # para lsgAjustar (WSLPGv1.15) ('tipo_reg', 1, A), # F: factura papel ('nro_cai', 14, N), ('nro_factura_papel', 12, N), ('fecha_factura', 10, A), ('tipo_comprobante', 3, N), ] EVENTO = [ ('tipo_reg', 1, A), # E: Evento ('codigo', 4, A), ('descripcion', 250, A), ] ERROR = [ ('tipo_reg', 1, A), # R: Error ('codigo', 4, A), ('descripcion', 250, A), ] DATO = [ ('tipo_reg', 1, A), # 9: Dato adicional ('campo', 25, A), ('valor', 250, A), ] class WSLPG(BaseWS): "Interfaz para el WebService de Liquidación Primaria de Granos" _public_methods_ = ['Conectar', 'Dummy', 'SetTicketAcceso', 'DebugLog', 'AutorizarLiquidacion', 'AutorizarLiquidacionSecundaria', 'AnularLiquidacionSecundaria','AnularLiquidacion', 'CrearLiquidacion', 'CrearLiqSecundariaBase', 'AgregarCertificado', 'AgregarRetencion', 'AgregarDeduccion', 'AgregarPercepcion', 'AgregarOpcional', 'AgregarCalidad', 'AgregarFacturaPapel', 'ConsultarLiquidacion', 'ConsultarUltNroOrden', 'ConsultarLiquidacionSecundaria', 'ConsultarLiquidacionSecundariaUltNroOrden', 'CrearAjusteBase', 'CrearAjusteDebito', 'CrearAjusteCredito', 'AjustarLiquidacionUnificado', 'AjustarLiquidacionUnificadoPapel', 'AjustarLiquidacionContrato', 'AjustarLiquidacionSecundaria', 'AnalizarAjusteDebito', 'AnalizarAjusteCredito', 'AsociarLiquidacionAContrato', 'ConsultarAjuste', 'ConsultarLiquidacionesPorContrato', 'ConsultarLiquidacionesSecundariasPorContrato', 'AsociarLiquidacionSecundariaAContrato', 'CrearCertificacionCabecera', 'AgregarCertificacionPrimaria', 'AgregarCertificacionRetiroTransferencia', 'AgregarCertificacionPreexistente', 'AgregarDetalleMuestraAnalisis', 'AgregarCTG', 'AutorizarCertificacion', 'InformarCalidadCertificacion', 'BuscarCTG', 'AnularCertificacion', 'ConsultarCertificacion', 'ConsultarCertificacionUltNroOrden', 'BuscarCertConSaldoDisponible', 'LeerDatosLiquidacion', 'ConsultarCampanias', 'ConsultarTipoGrano', 'ConsultarGradoEntregadoXTipoGrano', 'ConsultarCodigoGradoReferencia', 'ConsultarTipoCertificadoDeposito', 'ConsultarTipoDeduccion', 'ConsultarTipoRetencion', 'ConsultarPuerto', 'ConsultarTipoActividad', 'ConsultarTipoActividadRepresentado', 'ConsultarProvincias', 'ConsultarLocalidadesPorProvincia', 'ConsultarTiposOperacion', 'BuscarLocalidades', 'AnalizarXml', 'ObtenerTagXml', 'LoadTestXML', 'SetParametros', 'SetParametro', 'GetParametro', 'CargarFormatoPDF', 'AgregarCampoPDF', 'AgregarDatoPDF', 'CrearPlantillaPDF', 'ProcesarPlantillaPDF', 'GenerarPDF', 'MostrarPDF', ] _public_attrs_ = ['Token', 'Sign', 'Cuit', 'AppServerStatus', 'DbServerStatus', 'AuthServerStatus', 'Excepcion', 'ErrCode', 'ErrMsg', 'LanzarExcepciones', 'Errores', 'XmlRequest', 'XmlResponse', 'Version', 'Traceback', 'InstallDir', 'COE', 'COEAjustado', 'Estado', 'Resultado', 'NroOrden', 'TotalDeduccion', 'TotalRetencion', 'TotalRetencionAfip', 'TotalOtrasRetenciones', 'TotalNetoAPagar', 'TotalPagoSegunCondicion', 'TotalIvaRg2300_07', 'Subtotal', 'TotalIva105', 'TotalIva21', 'TotalRetencionesGanancias', 'TotalRetencionesIVA', 'NroContrato', 'FechaCertificacion', ] _reg_progid_ = "WSLPG" _reg_clsid_ = "{9D21C513-21A6-413C-8592-047357692608}" # Variables globales para BaseWS: HOMO = HOMO WSDL = WSDL LanzarExcepciones = False Version = "%s %s" % (__version__, HOMO and 'Homologación' or '') def inicializar(self): BaseWS.inicializar(self) self.AppServerStatus = self.DbServerStatus = self.AuthServerStatus = None self.errores = [] self.COE = self.COEAjustado = "" self.Estado = self.Resultado = self.NroOrden = self.NroContrato = '' self.TotalDeduccion = "" self.TotalRetencion = "" self.TotalRetencionAfip = "" self.TotalOtrasRetenciones = "" self.TotalNetoAPagar = "" self.TotalIvaRg2300_07 = "" self.TotalPagoSegunCondicion = "" self.Subtotal = self.TotalIva105 = self.TotalIva21 = "" self.TotalRetencionesGanancias = self.TotalRetencionesIVA = "" self.TotalPercepcion = "" self.FechaCertificacion = "" self.datos = {} @inicializar_y_capturar_excepciones def Conectar(self, cache=None, url="", proxy="", wrapper="", cacert=None, timeout=30): "Establecer la conexión a los servidores de la AFIP" # llamo al constructor heredado: ok = BaseWS.Conectar(self, cache, url, proxy, wrapper, cacert, timeout) if ok: # corrijo ubicación del servidor (puerto htttp 80 en el WSDL) location = self.client.services['LpgService']['ports']['LpgEndPoint']['location'] if location.startswith("http://"): print "Corrigiendo WSDL ...", location, location = location.replace("http://", "https://").replace(":80", ":443") self.client.services['LpgService']['ports']['LpgEndPoint']['location'] = location print location try: # intento abrir el diccionario persistente de localidades import wslpg_datos localidades_db = os.path.join(self.cache, "localidades.dat") # verificar que puede escribir en el dir, sino abrir solo lectura flag = os.access(self.cache, os.W_OK) and 'c' or 'r' wslpg_datos.LOCALIDADES = shelve.open(localidades_db, flag=flag) if DEBUG: print "Localidades en BD:", len(wslpg_datos.LOCALIDADES) self.Traceback = "Localidades en BD: %s" % len(wslpg_datos.LOCALIDADES) except Exception, e: print "ADVERTENCIA: No se pudo abrir la bbdd de localidades:", e self.Excepcion = str(e) return ok def __analizar_errores(self, ret): "Comprueba y extrae errores si existen en la respuesta XML" errores = [] if 'errores' in ret: errores.extend(ret['errores']) if 'erroresFormato' in ret: errores.extend(ret['erroresFormato']) if errores: self.Errores = ["%(codigo)s: %(descripcion)s" % err['error'] for err in errores] self.errores = [ {'codigo': err['error']['codigo'], 'descripcion': err['error']['descripcion'].replace("\n", "") .replace("\r", "")} for err in errores] self.ErrCode = ' '.join(self.Errores) self.ErrMsg = '\n'.join(self.Errores) @inicializar_y_capturar_excepciones def Dummy(self): "Obtener el estado de los servidores de la AFIP" results = self.client.dummy()['return'] self.AppServerStatus = str(results['appserver']) self.DbServerStatus = str(results['dbserver']) self.AuthServerStatus = str(results['authserver']) return True @inicializar_y_capturar_excepciones def CrearLiquidacion(self, nro_orden=None, cuit_comprador=None, nro_act_comprador=None, nro_ing_bruto_comprador=None, cod_tipo_operacion=None, es_liquidacion_propia=None, es_canje=None, cod_puerto=None, des_puerto_localidad=None, cod_grano=None, cuit_vendedor=None, nro_ing_bruto_vendedor=None, actua_corredor=None, liquida_corredor=None, cuit_corredor=None, comision_corredor=None, nro_ing_bruto_corredor=None, fecha_precio_operacion=None, precio_ref_tn=None, cod_grado_ref=None, cod_grado_ent=None, factor_ent=None, precio_flete_tn=None, cont_proteico=None, alic_iva_operacion=None, campania_ppal=None, cod_localidad_procedencia=None, datos_adicionales=None, pto_emision=1, cod_prov_procedencia=None, peso_neto_sin_certificado=None, val_grado_ent=None, cod_localidad_procedencia_sin_certificado=None, cod_prov_procedencia_sin_certificado=None, nro_contrato=None, **kwargs ): "Inicializa internamente los datos de una liquidación para autorizar" # limpio los campos especiales (segun validaciones de AFIP) if alic_iva_operacion == 0: alic_iva_operacion = None # no informar alicuota p/ monotributo if val_grado_ent == 0: val_grado_ent = None # borrando datos corredor si no corresponden if actua_corredor == "N": cuit_corredor = None comision_corredor = None nro_ing_bruto_corredor = None # si no corresponde elimino el peso neto certificado campo opcional if not peso_neto_sin_certificado or not int(peso_neto_sin_certificado): peso_neto_sin_certificado = None if cod_puerto and int(cod_puerto) != 14: des_puerto_localidad = None # validacion 1630 # limpio los campos opcionales para no enviarlos si no corresponde: if cod_grado_ref == "": cod_grado_ref = None if cod_grado_ent == "": cod_grado_ent = None if val_grado_ent == 0: val_grado_ent = None # creo el diccionario con los campos generales de la liquidación: self.liquidacion = dict( ptoEmision=pto_emision, nroOrden=nro_orden, cuitComprador=cuit_comprador, nroActComprador=nro_act_comprador, nroIngBrutoComprador=nro_ing_bruto_comprador, codTipoOperacion=cod_tipo_operacion, esLiquidacionPropia=es_liquidacion_propia, esCanje=es_canje, codPuerto=cod_puerto, desPuertoLocalidad=des_puerto_localidad, codGrano=cod_grano, cuitVendedor=cuit_vendedor, nroIngBrutoVendedor=nro_ing_bruto_vendedor, actuaCorredor=actua_corredor, liquidaCorredor=liquida_corredor, cuitCorredor=cuit_corredor, comisionCorredor=comision_corredor, nroIngBrutoCorredor=nro_ing_bruto_corredor, fechaPrecioOperacion=fecha_precio_operacion, precioRefTn=precio_ref_tn, codGradoRef=cod_grado_ref, codGradoEnt=cod_grado_ent, valGradoEnt=val_grado_ent, factorEnt=factor_ent, precioFleteTn=precio_flete_tn, contProteico=cont_proteico, alicIvaOperacion=alic_iva_operacion, campaniaPPal=campania_ppal, codLocalidadProcedencia=cod_localidad_procedencia, codProvProcedencia=cod_prov_procedencia, datosAdicionales=datos_adicionales, pesoNetoSinCertificado=peso_neto_sin_certificado, numeroContrato=nro_contrato or None, certificados=[], ) # para compatibilidad hacia atras, "copiar" los campos si no hay cert: if peso_neto_sin_certificado: if cod_localidad_procedencia_sin_certificado is None: cod_localidad_procedencia_sin_certificado = cod_localidad_procedencia if cod_prov_procedencia_sin_certificado is None: cod_prov_procedencia_sin_certificado = cod_prov_procedencia self.liquidacion.update(dict( codLocalidadProcedenciaSinCertificado=cod_localidad_procedencia_sin_certificado, codProvProcedenciaSinCertificado=cod_prov_procedencia_sin_certificado, )) # inicializo las listas que contentran las retenciones y deducciones: self.retenciones = [] self.deducciones = [] # limpio las estructuras internas no utilizables en este caso self.certificacion = None return True @inicializar_y_capturar_excepciones def CrearLiqSecundariaBase(self, pto_emision=1, nro_orden=None, nro_contrato=None, cuit_comprador=None, nro_ing_bruto_comprador=None, cod_puerto=None, des_puerto_localidad=None, cod_grano=None, cantidad_tn=None, cuit_vendedor=None, nro_act_vendedor=None, # nuevo!! nro_ing_bruto_vendedor=None, actua_corredor=None, liquida_corredor=None, cuit_corredor=None, nro_ing_bruto_corredor=None, fecha_precio_operacion=None, precio_ref_tn=None, precio_operacion=None, alic_iva_operacion=None, campania_ppal=None, cod_localidad_procedencia=None, cod_prov_procedencia=None, datos_adicionales=None, **kwargs): "Inicializa los datos de una liquidación secundaria de granos (base)" # creo el diccionario con los campos generales de la liquidación: self.liquidacion = dict( ptoEmision=pto_emision, nroOrden=nro_orden, numeroContrato=nro_contrato or None, cuitComprador=cuit_comprador, nroIngBrutoComprador=nro_ing_bruto_comprador, codPuerto=cod_puerto, desPuertoLocalidad=des_puerto_localidad, codGrano=cod_grano, cantidadTn=cantidad_tn, cuitVendedor=cuit_vendedor, nroActVendedor=nro_act_vendedor, nroIngBrutoVendedor=nro_ing_bruto_vendedor, actuaCorredor=actua_corredor, liquidaCorredor=liquida_corredor, cuitCorredor=cuit_corredor or None, nroIngBrutoCorredor=nro_ing_bruto_corredor or None, fechaPrecioOperacion=fecha_precio_operacion, precioRefTn=precio_ref_tn, precioOperacion=precio_operacion, alicIvaOperacion=alic_iva_operacion or None, campaniaPPal=campania_ppal, codLocalidad=cod_localidad_procedencia, codProvincia=cod_prov_procedencia, datosAdicionales=datos_adicionales, ) # inicializo las listas que contentran las retenciones y deducciones: self.deducciones = [] self.percepciones = [] self.opcionales = [] self.factura_papel = None return True @inicializar_y_capturar_excepciones def AgregarCertificado(self, tipo_certificado_deposito=None, nro_certificado_deposito=None, peso_neto=None, cod_localidad_procedencia=None, cod_prov_procedencia=None, campania=None, fecha_cierre=None, peso_neto_total_certificado=None, coe_certificado_deposito=None, # WSLPGv1.6 **kwargs): "Agrego el certificado a la liquidación / certificación de granos" # limpio campos opcionales: if not peso_neto_total_certificado: peso_neto_total_certificado = None # 0 no es válido # coe_certificado_deposito no es para LPG, unificar en futuras versiones if tipo_certificado_deposito and int(tipo_certificado_deposito) == 332: if coe_certificado_deposito and long(coe_certificado_deposito): nro_certificado_deposito = coe_certificado_deposito coe_certificado_deposito = None cert = dict( tipoCertificadoDeposito=tipo_certificado_deposito, nroCertificadoDeposito=nro_certificado_deposito, pesoNeto=peso_neto, codLocalidadProcedencia=cod_localidad_procedencia, codProvProcedencia=cod_prov_procedencia, campania=campania, fechaCierre=fecha_cierre, pesoNetoTotalCertificado=peso_neto_total_certificado, coeCertificadoDeposito=coe_certificado_deposito, ) if self.liquidacion: self.liquidacion['certificados'].append({'certificado': cert}) else: self.certificacion['retiroTransferencia']['certificadoDeposito'] = cert return True @inicializar_y_capturar_excepciones def AgregarRetencion(self, codigo_concepto, detalle_aclaratorio, base_calculo, alicuota, nro_certificado_retencion=None, fecha_certificado_retencion=None, importe_certificado_retencion=None, **kwargs): "Agrega la información referente a las retenciones de la liquidación" # limpio los campos opcionales: if fecha_certificado_retencion is not None and not fecha_certificado_retencion.strip(): fecha_certificado_retencion = None if importe_certificado_retencion is not None and not float(importe_certificado_retencion): importe_certificado_retencion = None if nro_certificado_retencion is not None and not int(nro_certificado_retencion): nro_certificado_retencion = None self.retenciones.append(dict( retencion=dict( codigoConcepto=codigo_concepto, detalleAclaratorio=detalle_aclaratorio, baseCalculo=base_calculo, alicuota=alicuota, nroCertificadoRetencion=nro_certificado_retencion, fechaCertificadoRetencion=fecha_certificado_retencion, importeCertificadoRetencion=importe_certificado_retencion, )) ) return True @inicializar_y_capturar_excepciones def AgregarDeduccion(self, codigo_concepto=None, detalle_aclaratorio=None, dias_almacenaje=None, precio_pkg_diario=None, comision_gastos_adm=None, base_calculo=None, alicuota=None, **kwargs): "Agrega la información referente a las deducciones de la liquidación." # limpiar campo según validación (comision_gastos_adm puede ser 0.00!) if codigo_concepto != "CO" and comision_gastos_adm is not None \ and float(comision_gastos_adm) == 0: comision_gastos_adm = None # no enviar campos para prevenir errores AFIP 1705, 1707, 1708 if base_calculo is not None: if codigo_concepto == "AL": base_calculo = None if codigo_concepto == "CO" and float(base_calculo) == 0: base_calculo = None # no enviar, por retrocompatibilidad if codigo_concepto != "AL": dias_almacenaje = None precio_pkg_diario = None self.deducciones.append(dict( deduccion=dict( codigoConcepto=codigo_concepto, detalleAclaratorio=detalle_aclaratorio, diasAlmacenaje=dias_almacenaje, precioPKGdiario=precio_pkg_diario, comisionGastosAdm=comision_gastos_adm, baseCalculo=base_calculo, alicuotaIva=alicuota, )) ) return True @inicializar_y_capturar_excepciones def AgregarPercepcion(self, codigo_concepto=None, detalle_aclaratoria=None, base_calculo=None, alicuota=None, **kwargs): "Agrega la información referente a las percepciones de la liq. sec." self.percepciones.append(dict( percepcion=dict( detalleAclaratoria=detalle_aclaratoria, baseCalculo=base_calculo, alicuota=alicuota, )) ) return True @inicializar_y_capturar_excepciones def AgregarOpcional(self, codigo=None, descripcion=None, **kwargs): "Agrega la información referente a los opcionales de la liq. seq." self.opcionales.append(dict( opcional=dict( codigo=codigo, descripcion=descripcion, )) ) return True @inicializar_y_capturar_excepciones def AgregarFacturaPapel(self, nro_cai=None, nro_factura_papel=None, fecha_factura=None, tipo_comprobante=None, **kwargs): self.factura_papel = dict( nroCAI=nro_cai, nroFacturaPapel=nro_factura_papel, fechaFactura=fecha_factura, tipoComprobante=tipo_comprobante, ) return True @inicializar_y_capturar_excepciones def AutorizarLiquidacion(self): "Autorizar Liquidación Primaria Electrónica de Granos" # limpio los elementos que no correspondan por estar vacios: if not self.liquidacion['certificados']: del self.liquidacion['certificados'] if not self.retenciones: self.retenciones = None if not self.deducciones: self.deducciones = None # llamo al webservice: ret = self.client.liquidacionAutorizar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, liquidacion=self.liquidacion, retenciones=self.retenciones, deducciones=self.deducciones, ) # analizo la respusta ret = ret['liqReturn'] self.__analizar_errores(ret) self.AnalizarLiquidacion(ret.get('autorizacion'), self.liquidacion) return True @inicializar_y_capturar_excepciones def AutorizarLiquidacionSecundaria(self): "Autorizar Liquidación Secundaria Electrónica de Granos" # extraer y adaptar los campos para liq. sec. if self.deducciones: self.liquidacion['deduccion'] = [] for it in self.deducciones: ded = it['deduccion'] # no se agrupa self.liquidacion['deduccion'].append({ 'detalleAclaratoria': ded['detalleAclaratorio'], 'baseCalculo': ded['baseCalculo'], 'alicuotaIVA': ded['alicuotaIva']}) if self.percepciones: self.liquidacion['percepcion'] = [] for it in self.percepciones: per = it['percepcion'] # no se agrupa self.liquidacion['percepcion'].append(per) if self.opcionales: self.liquidacion['opcionales'] = self.opcionales # agrupado ok # llamo al webservice: ret = self.client.lsgAutorizar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, liqSecundariaBase=self.liquidacion, facturaPapel=self.factura_papel, ) # analizo la respusta ret = ret['oReturn'] self.__analizar_errores(ret) self.AnalizarLiquidacion(ret.get('autorizacion'), self.liquidacion) return True def AnalizarLiquidacion(self, aut, liq=None, ajuste=False): "Método interno para analizar la respuesta de AFIP" # proceso los datos básicos de la liquidación (devuelto por consultar): if liq: self.params_out = dict( pto_emision=liq.get('ptoEmision'), nro_orden=liq.get('nroOrden'), cuit_comprador=liq.get('cuitComprador'), nro_act_comprador=liq.get('nroActComprador'), nro_ing_bruto_comprador=liq.get('nroIngBrutoComprador'), cod_tipo_operacion=liq.get('codTipoOperacion'), es_liquidacion_propia=liq.get('esLiquidacionPropia'), es_canje=liq.get('esCanje'), cod_puerto=liq.get('codPuerto'), des_puerto_localidad=liq.get('desPuertoLocalidad'), cod_grano=liq.get('codGrano'), cuit_vendedor=liq.get('cuitVendedor'), nro_ing_bruto_vendedor=liq.get('nroIngBrutoVendedor'), actua_corredor=liq.get('actuaCorredor'), liquida_corredor=liq.get('liquidaCorredor'), cuit_corredor=liq.get('cuitCorredor'), comision_corredor=liq.get('comisionCorredor'), nro_ing_bruto_corredor=liq.get('nroIngBrutoCorredor'), fecha_precio_operacion=liq.get('fechaPrecioOperacion'), precio_ref_tn=liq.get('precioRefTn'), cod_grado_ref=liq.get('codGradoRef'), cod_grado_ent=liq.get('codGradoEnt'), factor_ent=liq.get('factorEnt'), precio_flete_tn=liq.get('precioFleteTn'), cont_proteico=liq.get('contProteico'), alic_iva_operacion=liq.get('alicIvaOperacion'), campania_ppal=liq.get('campaniaPPal'), cod_localidad_procedencia=liq.get('codLocalidadProcedencia'), cod_prov_procedencia=liq.get('codProvProcedencia'), datos_adicionales=liq.get('datosAdicionales'), peso_neto_sin_certificado=liq.get('pesoNetoSinCertificado'), cod_localidad_procedencia_sin_certificado=liq.get('codLocalidadProcedenciaSinCertificado'), cod_prov_procedencia_sin_certificado=liq.get('codProvProcedenciaSinCertificado'), certificados=[], ) if ajuste: self.params_out.update( # ajustes: diferencia_peso_neto=liq.get('diferenciaPesoNeto'), diferencia_precio_operacion=liq.get('diferenciaPrecioOperacion'), cod_grado=liq.get('codGrado'), val_grado=liq.get('valGrado'), factor=liq.get('factor'), diferencia_precio_flete_tn=liq.get('diferenciaPrecioFleteTn'), concepto_importe_iva_0=liq.get('conceptoImporteIva0'), importe_ajustar_iva_0=liq.get('importeAjustarIva0'), concepto_importe_iva_105=liq.get('conceptoImporteIva105'), importe_ajustar_iva_105=liq.get('importeAjustarIva105'), concepto_importe_iva_21=liq.get('conceptoImporteIva21'), importe_ajustar_iva_21=liq.get('importeAjustarIva21'), ) # analizar detalle de importes ajustados discriminados por alicuota # (por compatibildiad y consistencia se usan los mismos campos) for it in liq.get("importes", liq.get("importe")): # en ajustes LSG no se agrupan los importes en un subtipo... if 'importeReturn' in it: it = it['importeReturn'][0] # TODO: revisar SOAP tasa = "iva_%s" % str(it['alicuota']).replace(".", "").strip() self.params_out["concepto_importe_%s" % tasa] = it['concepto'] self.params_out["importe_ajustar_%s" % tasa] = it['importe'] self.params_out["iva_calculado_%s" % tasa] = it['ivaCalculado'] if 'certificados' in liq: for c in liq['certificados']: cert = c['certificado'] self.params_out['certificados'].append(dict( tipo_certificado_deposito=cert['tipoCertificadoDeposito'], nro_certificado_deposito=cert['nroCertificadoDeposito'], peso_neto=cert['pesoNeto'], cod_localidad_procedencia=cert['codLocalidadProcedencia'], cod_prov_procedencia=cert['codProvProcedencia'], campania=cert['campania'], fecha_cierre=cert['fechaCierre'], )) self.params_out['errores'] = self.errores # proceso la respuesta de autorizar, ajustar (y consultar): if aut: self.TotalDeduccion = aut.get('totalDeduccion') self.TotalRetencion = aut.get('totalRetencion') self.TotalRetencionAfip = aut.get('totalRetencionAfip') self.TotalOtrasRetenciones = aut.get('totalOtrasRetenciones') self.TotalNetoAPagar = aut.get('totalNetoAPagar') self.TotalIvaRg2300_07 = aut.get('totalIvaRg2300_07') self.TotalPagoSegunCondicion = aut.get('totalPagoSegunCondicion') self.COE = str(aut.get('coe', '')) self.COEAjustado = aut.get('coeAjustado') self.Estado = aut.get('estado', '') self.NroContrato = aut.get('numeroContrato', '') # actualizo parámetros de salida: self.params_out['coe'] = self.COE self.params_out['coe_ajustado'] = self.COEAjustado self.params_out['estado'] = self.Estado self.params_out['total_deduccion'] = self.TotalDeduccion self.params_out['total_retencion'] = self.TotalRetencion self.params_out['total_retencion_afip'] = self.TotalRetencionAfip self.params_out['total_otras_retenciones'] = self.TotalOtrasRetenciones self.params_out['total_neto_a_pagar'] = self.TotalNetoAPagar self.params_out['total_iva_rg_2300_07'] = self.TotalIvaRg2300_07 self.params_out['total_pago_segun_condicion'] = self.TotalPagoSegunCondicion # datos adicionales: self.NroOrden = self.params_out['nro_orden'] = aut.get('nroOrden') self.params_out['cod_tipo_ajuste'] = aut.get('codTipoAjuste') fecha = aut.get('fechaLiquidacion') if fecha: fecha = str(fecha) self.params_out['fecha_liquidacion'] = fecha self.params_out['importe_iva'] = aut.get('importeIva') self.params_out['nro_op_comercial'] = aut.get('nroOpComercial') self.params_out['operacion_con_iva'] = aut.get('operacionConIva') self.params_out['precio_operacion'] = aut.get('precioOperacion') self.params_out['total_peso_neto'] = aut.get('totalPesoNeto') self.params_out['subtotal'] = aut.get('subTotal') # LSG (especificos): self.params_out['total_deducciones'] = aut.get('totalDeducciones') if 'todalPercepciones' in aut: # error de tipeo en el WSDL de AFIP... self.params_out['total_percepciones'] = aut.get('todalPercepciones') else: self.params_out['total_percepciones'] = aut.get('totalPercepciones') # sub estructuras: self.params_out['retenciones'] = [] self.params_out['deducciones'] = [] for retret in aut.get("retenciones", []): retret = retret['retencionReturn'] self.params_out['retenciones'].append({ 'importe_retencion': retret['importeRetencion'], 'alicuota': retret['retencion'].get('alicuota'), 'base_calculo': retret['retencion'].get('baseCalculo'), 'codigo_concepto': retret['retencion'].get('codigoConcepto'), 'detalle_aclaratorio': retret['retencion'].get('detalleAclaratorio', "").replace("\n", ""), 'importe_certificado_retencion': retret['retencion'].get('importeCertificadoRetencion'), 'nro_certificado_retencion': retret['retencion'].get('nroCertificadoRetencion'), 'fecha_certificado_retencion': retret['retencion'].get('fechaCertificadoRetencion'), }) for dedret in aut.get("deducciones", []): dedret = dedret['deduccionReturn'] self.params_out['deducciones'].append({ 'importe_deduccion': dedret['importeDeduccion'], 'importe_iva': dedret.get('importeIva'), 'alicuota': dedret['deduccion'].get('alicuotaIva'), 'base_calculo': dedret['deduccion'].get('baseCalculo'), 'codigo_concepto': dedret['deduccion'].get('codigoConcepto'), 'detalle_aclaratorio': dedret['deduccion'].get('detalleAclaratorio', "").replace("\n", ""), 'dias_almacenaje': dedret['deduccion'].get('diasAlmacenaje'), 'precio_pkg_diario': dedret['deduccion'].get('precioPKGdiario'), 'comision_gastos_adm': dedret['deduccion'].get('comisionGastosAdm'), }) @inicializar_y_capturar_excepciones def CrearAjusteBase(self, pto_emision=1, nro_orden=None, # unificado, contrato, papel coe_ajustado=None, # unificado nro_contrato=None, # contrato tipo_formulario=None, # papel nro_formulario=None, # papel actividad=None, # contrato / papel cod_grano=None, # contrato / papel cuit_vendedor=None, # contrato / papel cuit_comprador=None, # contrato / papel cuit_corredor=None, # contrato / papel nro_ing_bruto_vendedor=None, # papel nro_ing_bruto_comprador=None, # papel nro_ing_bruto_corredor=None, # papel tipo_operacion=None, # papel precio_ref_tn=None, # contrato cod_grado_ent=None, # contrato val_grado_ent=None, # contrato precio_flete_tn=None, # contrato cod_puerto=None, # contrato des_puerto_localidad=None, # contrato cod_provincia=None, # unificado, contrato, papel cod_localidad=None, # unificado, contrato, papel comision_corredor=None, # papel **kwargs ): "Inicializa internamente los datos de una liquidación para ajustar" # ajusto nombre de campos para compatibilidad hacia atrás (encabezado): if 'cod_localidad_procedencia' in kwargs: cod_localidad = kwargs['cod_localidad_procedencia'] if 'cod_provincia_procedencia' in kwargs: cod_provincia = kwargs['cod_provincia_procedencia'] if 'nro_act_comprador' in kwargs: actividad = kwargs['nro_act_comprador'] if 'cod_tipo_operacion' in kwargs: tipo_operacion = kwargs['cod_tipo_operacion'] # limpio los campos especiales (segun validaciones de AFIP) if val_grado_ent == 0: val_grado_ent = None # borrando datos si no corresponden if cuit_corredor and int(cuit_corredor) == 0: cuit_corredor = None comision_corredor = None nro_ing_bruto_corredor = None if cod_puerto and int(cod_puerto) != 14: des_puerto_localidad = None # validacion 1630 # limpio los campos opcionales para no enviarlos si no corresponde: if cod_grado_ent == "": cod_grado_ent = None if val_grado_ent == 0: val_grado_ent = None # creo el diccionario con los campos generales del ajuste base: self.ajuste = { 'ajusteBase': { 'ptoEmision': pto_emision, 'nroOrden': nro_orden, 'coeAjustado': coe_ajustado, 'nroContrato': nro_contrato, 'tipoFormulario': tipo_formulario, 'nroFormulario': nro_formulario, 'actividad': actividad, 'codGrano': cod_grano, 'cuitVendedor': cuit_vendedor, 'cuitComprador': cuit_comprador, 'cuitCorredor': cuit_corredor, 'nroIngBrutoVendedor': nro_ing_bruto_vendedor, 'nroIngBrutoComprador': nro_ing_bruto_comprador, 'nroIngBrutoCorredor': nro_ing_bruto_corredor, 'tipoOperacion': tipo_operacion, 'codPuerto': cod_puerto, 'desPuertoLocalidad': des_puerto_localidad, 'comisionCorredor': comision_corredor, 'precioRefTn': precio_ref_tn, 'codGradoEnt': cod_grado_ent, 'valGradoEnt': val_grado_ent, 'precioFleteTn': precio_flete_tn, 'codLocalidad': cod_localidad, 'codProv': cod_provincia, 'certificados': [], } } # para compatibilidad con AgregarCertificado self.liquidacion = self.ajuste['ajusteBase'] # inicializar temporales self.__ajuste_base = None self.__ajuste_debito = None self.__ajuste_credito = None return True @inicializar_y_capturar_excepciones def CrearAjusteCredito(self, datos_adicionales=None, # unificado, contrato, papel concepto_importe_iva_0=None, # unificado, contrato, papel importe_ajustar_iva_0=None, # unificado, contrato, papel concepto_importe_iva_105=None, # unificado, contrato, papel importe_ajustar_iva_105=None, # unificado, contrato, papel concepto_importe_iva_21=None, # unificado, contrato, papel importe_ajustar_iva_21=None, # unificado, contrato, papel diferencia_peso_neto=None, # unificado diferencia_precio_operacion=None, # unificado cod_grado=None, # unificado val_grado=None, # unificado factor=None, # unificado diferencia_precio_flete_tn=None, # unificado **kwargs ): "Inicializa internamente los datos del crédito del ajuste" self.ajuste['ajusteCredito'] = { 'diferenciaPesoNeto': diferencia_peso_neto, 'diferenciaPrecioOperacion': diferencia_precio_operacion, 'codGrado': cod_grado, 'valGrado': val_grado, 'factor': factor, 'diferenciaPrecioFleteTn': diferencia_precio_flete_tn, 'datosAdicionales': datos_adicionales, 'opcionales': None, 'conceptoImporteIva0': concepto_importe_iva_0, 'importeAjustarIva0': importe_ajustar_iva_0, 'conceptoImporteIva105': concepto_importe_iva_105, 'importeAjustarIva105': importe_ajustar_iva_105, 'conceptoImporteIva21': concepto_importe_iva_21, 'importeAjustarIva21': importe_ajustar_iva_21, 'deducciones': [], 'retenciones': [], 'percepciones': [], } # vinculación con AgregarOpcional: self.opcionales = self.ajuste['ajusteCredito']['opcionales'] # vinculación con AgregarRetencion y AgregarDeduccion self.deducciones = self.ajuste['ajusteCredito']['deducciones'] self.retenciones = self.ajuste['ajusteCredito']['retenciones'] # para LSG: self.percepciones = self.ajuste['ajusteCredito']['percepciones'] return True @inicializar_y_capturar_excepciones def CrearAjusteDebito(self, datos_adicionales=None, # unificado, contrato, papel concepto_importe_iva_0=None, # unificado, contrato, papel importe_ajustar_iva_0=None, # unificado, contrato, papel concepto_importe_iva_105=None, # unificado, contrato, papel importe_ajustar_iva_105=None, # unificado, contrato, papel concepto_importe_iva_21=None, # unificado, contrato, papel importe_ajustar_iva_21=None, # unificado, contrato, papel diferencia_peso_neto=None, # unificado diferencia_precio_operacion=None, # unificado cod_grado=None, # unificado val_grado=None, # unificado factor=None, # unificado diferencia_precio_flete_tn=None, # unificado **kwargs ): "Inicializa internamente los datos del crédito del ajuste" self.ajuste['ajusteDebito'] = { 'diferenciaPesoNeto': diferencia_peso_neto, 'diferenciaPrecioOperacion': diferencia_precio_operacion, 'codGrado': cod_grado, 'valGrado': val_grado, 'factor': factor, 'diferenciaPrecioFleteTn': diferencia_precio_flete_tn, 'datosAdicionales': datos_adicionales, 'opcionales': None, 'conceptoImporteIva0': concepto_importe_iva_0, 'importeAjustarIva0': importe_ajustar_iva_0, 'conceptoImporteIva105': concepto_importe_iva_105, 'importeAjustarIva105': importe_ajustar_iva_105, 'conceptoImporteIva21': concepto_importe_iva_21, 'importeAjustarIva21': importe_ajustar_iva_21, 'deducciones': [], 'retenciones': [], 'percepciones': [], } # vinculación con AgregarOpcional: self.opcionales = self.ajuste['ajusteDebito']['opcionales'] # vinculación con AgregarRetencion y AgregarDeduccion self.deducciones = self.ajuste['ajusteDebito']['deducciones'] self.retenciones = self.ajuste['ajusteDebito']['retenciones'] # para LSG: self.percepciones = self.ajuste['ajusteDebito']['percepciones'] return True @inicializar_y_capturar_excepciones def AjustarLiquidacionUnificado(self): "Ajustar Liquidación Primaria de Granos" # limpiar estructuras no utilizadas (si no hay deducciones / retenciones) for k in ('ajusteDebito', 'ajusteCredito'): if not any(self.ajuste[k].values()): del self.ajuste[k] else: if not self.ajuste[k]['deducciones']: del self.ajuste[k]['deducciones'] if not self.ajuste[k]['retenciones']: del self.ajuste[k]['retenciones'] # llamar al webservice: ret = self.client.liquidacionAjustarUnificado( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, **self.ajuste ) # analizar el resultado: ret = ret['ajusteUnifReturn'] self.__analizar_errores(ret) if 'ajusteUnificado' in ret: aut = ret['ajusteUnificado'] self.AnalizarAjuste(aut) return True @inicializar_y_capturar_excepciones def AjustarLiquidacionUnificadoPapel(self): "Ajustar Liquidación realizada en un formulario F1116 B / C (papel)" # limpiar arrays no enviados: if not self.ajuste['ajusteBase']['certificados']: del self.ajuste['ajusteBase']['certificados'] for k1 in ('ajusteCredito', 'ajusteDebito'): for k2 in ('retenciones', 'deducciones'): if not self.ajuste[k1][k2]: del self.ajuste[k1][k2] ret = self.client.liquidacionAjustarUnificadoPapel( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, **self.ajuste ) ret = ret['ajustePapelReturn'] self.__analizar_errores(ret) if 'ajustePapel' in ret: aut = ret['ajustePapel'] self.AnalizarAjuste(aut) return True @inicializar_y_capturar_excepciones def AjustarLiquidacionContrato(self): "Ajustar Liquidación activas relacionadas a un contrato" # limpiar arrays no enviados: if not self.ajuste['ajusteBase']['certificados']: del self.ajuste['ajusteBase']['certificados'] for k1 in ('ajusteCredito', 'ajusteDebito'): for k2 in ('retenciones', 'deducciones'): if not self.ajuste[k1][k2]: del self.ajuste[k1][k2] ret = self.client.liquidacionAjustarContrato( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, **self.ajuste ) ret = ret['ajusteContratoReturn'] self.__analizar_errores(ret) if 'ajusteContrato' in ret: aut = ret['ajusteContrato'] self.AnalizarAjuste(aut) return True @inicializar_y_capturar_excepciones def AjustarLiquidacionSecundaria(self): "Ajustar Liquidación Secundaria de Granos" # limpiar estructuras no utilizadas (si no hay deducciones / retenciones) for k in ('ajusteDebito', 'ajusteCredito'): if k not in self.ajuste: # ignorar si no se agrego estructura ajuste credito / debito continue elif not any(self.ajuste[k].values()): # eliminar estructura vacia credito / debito del self.ajuste[k] else: # ajustar cambios de nombre entre LSG y LPG for tasa in ("0", "105", "21"): tasa_lsg = "10" if tasa == "105" else tasa self.ajuste[k]['importeAjustar%s' % tasa_lsg] = self.ajuste[k]['importeAjustarIva%s' % tasa] self.ajuste[k]['conceptoIva%s' % tasa_lsg] = self.ajuste[k]['conceptoImporteIva%s' % tasa] # no enviar tag percepciones vacio if not self.ajuste[k]['percepciones']: del self.ajuste[k]['percepciones'] base = self.ajuste['ajusteBase'] base['coe'] = base['coeAjustado'] base['codProvincia'] = base['codProv'] # llamar al webservice: if base['nroContrato'] is not None and long(base['nroContrato']): metodo = self.client.lsgAjustarXContrato else: metodo = self.client.lsgAjustarXCoe ret = metodo( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, ajusteCredito=self.ajuste.get('ajusteCredito'), ajusteDebito=self.ajuste.get('ajusteDebito'), **base ) # analizar el resultado: ret = ret['oReturn'] self.__analizar_errores(ret) if ret: self.AnalizarAjuste(ret) return True def AnalizarAjuste(self, aut, base=True): "Método interno para analizar la respuesta de AFIP (ajustes)" self.__ajuste_base = None self.__ajuste_debito = None self.__ajuste_credito = None # para compatibilidad con la generacion de PDF (completo datos) if hasattr(self, "liquidacion") and self.liquidacion and base: self.AnalizarLiquidacion(aut=None, liq=self.liquidacion) self.params_out['errores'] = self.errores # proceso la respuesta de autorizar, ajustar (y consultar): if aut: # en caso de anulación o no ser ajuste, ahora no devuelve datos: self.COE = str(aut.get('coe', "")) self.COEAjustado = aut.get('coeAjustado') self.NroContrato = aut.get('nroContrato') self.Estado = aut.get('estado', "") totunif = aut.get("totalesUnificados") or {} self.Subtotal = totunif.get('subTotalGeneral') self.TotalIva105 = totunif.get('iva105') self.TotalIva21 = totunif.get('iva21') self.TotalRetencionesGanancias = totunif.get('retencionesGanancias') self.TotalRetencionesIVA = totunif.get('retencionesIVA') self.TotalOtrasRetenciones = totunif.get('importeOtrasRetenciones') self.TotalNetoAPagar = totunif.get('importeNeto') self.TotalIvaRg2300_07 = totunif.get('ivaRG2300_2007') self.TotalPagoSegunCondicion = totunif.get('pagoSCondicion') # actualizo parámetros de salida: self.params_out['coe'] = self.COE self.params_out['coe_ajustado'] = self.COEAjustado self.params_out['estado'] = self.Estado self.params_out['nro_orden'] = aut.get('nroOrden') self.params_out['cod_tipo_operacion'] = aut.get('codTipoOperacion') self.params_out['nro_contrato'] = aut.get('nroContrato') self.params_out['nro_op_comercial'] = aut.get('nroOpComercial', "") # actualizo totales solo para ajuste base (liquidacion general) if base: self.params_out['subtotal'] = self.Subtotal self.params_out['iva_deducciones'] = totunif.get('ivaDeducciones') self.params_out['subtotal_deb_cred'] = totunif.get('subTotalDebCred') self.params_out['total_base_deducciones'] = totunif.get('totalBaseDeducciones') self.params_out['total_iva_10_5'] = self.TotalIva105 self.params_out['total_iva_21'] = self.TotalIva21 self.params_out['total_retenciones_ganancias'] = self.TotalRetencionesGanancias self.params_out['total_retenciones_iva'] = self.TotalRetencionesIVA self.params_out['total_otras_retenciones'] = self.TotalOtrasRetenciones self.params_out['total_neto_a_pagar'] = self.TotalNetoAPagar self.params_out['total_iva_rg_2300_07'] = self.TotalIvaRg2300_07 self.params_out['total_pago_segun_condicion'] = self.TotalPagoSegunCondicion # almaceno los datos de ajustes crédito y débito para usarlos luego self.__ajuste_base = aut self.__ajuste_debito = aut.get('ajusteDebito') or {} self.__ajuste_credito = aut.get('ajusteCredito') or {} return True @inicializar_y_capturar_excepciones def AnalizarAjusteDebito(self): "Método para analizar la respuesta de AFIP para Ajuste Debito" # para compatibilidad con la generacion de PDF (completo datos) liq = {} if hasattr(self, "liquidacion") and self.liquidacion: liq.update(self.liquidacion) if hasattr(self, "ajuste") and 'ajusteDebito' in self.ajuste: liq.update(self.ajuste['ajusteDebito']) if self.__ajuste_debito: liq.update(self.__ajuste_debito) self.AnalizarLiquidacion(aut=self.__ajuste_debito, liq=liq, ajuste=True) self.AnalizarAjuste(self.__ajuste_base, base=False) # datos generales return True @inicializar_y_capturar_excepciones def AnalizarAjusteCredito(self): "Método para analizar la respuesta de AFIP para Ajuste Credito" liq = {} if hasattr(self, "liquidacion") and self.liquidacion: liq.update(self.liquidacion) if hasattr(self, "ajuste") and 'ajusteCredito' in self.ajuste: liq.update(self.ajuste['ajusteCredito']) if self.__ajuste_credito: liq.update(self.__ajuste_credito) self.AnalizarLiquidacion(aut=self.__ajuste_credito, liq=liq, ajuste=True) self.AnalizarAjuste(self.__ajuste_base, base=False) # datos generales return True @inicializar_y_capturar_excepciones def CrearCertificacionCabecera(self, pto_emision=1, nro_orden=None, tipo_certificado=None, nro_planta=None, nro_ing_bruto_depositario=None, titular_grano=None, cuit_depositante=None, nro_ing_bruto_depositante=None, cuit_corredor=None, cod_grano=None, campania=None, datos_adicionales=None, **kwargs): "Inicializa los datos de una certificación de granos (cabecera)" self.certificacion = {} self.certificacion['cabecera'] = dict( ptoEmision=pto_emision, nroOrden=nro_orden, tipoCertificado=tipo_certificado, nroPlanta=nro_planta or None, # opcional nroIngBrutoDepositario=nro_ing_bruto_depositario, titularGrano=titular_grano, cuitDepositante=cuit_depositante or None, # opcional nroIngBrutoDepositante=nro_ing_bruto_depositante or None, # opcional cuitCorredor=cuit_corredor or None, # opcional codGrano=cod_grano, campania=campania, datosAdicionales=datos_adicionales, # opcional ) # limpio las estructuras internas no utilizables en este caso self.liquidacion = None return True @inicializar_y_capturar_excepciones def AgregarCertificacionPrimaria(self, nro_act_depositario=None, descripcion_tipo_grano=None, monto_almacenaje=None, monto_acarreo=None, monto_gastos_generales=None, monto_zarandeo=None, porcentaje_secado_de=None, porcentaje_secado_a=None, monto_secado=None, monto_por_cada_punto_exceso=None, monto_otros=None, porcentaje_merma_volatil=None, peso_neto_merma_volatil=None, porcentaje_merma_secado=None, peso_neto_merma_secado=None, porcentaje_merma_zarandeo=None, peso_neto_merma_zarandeo=None, peso_neto_certificado=None, servicios_secado=None, servicios_zarandeo=None, servicios_otros=None, servicios_forma_de_pago=None, **kwargs): self.certificacion['primaria'] = dict( nroActDepositario=nro_act_depositario, ctg=[], # <!--0 or more repetitions:--> descripcionTipoGrano=descripcion_tipo_grano, montoAlmacenaje=monto_almacenaje, montoAcarreo=monto_acarreo, montoGastosGenerales=monto_gastos_generales, montoZarandeo=monto_zarandeo, porcentajeSecadoDe=porcentaje_secado_de, porcentajeSecadoA=porcentaje_secado_a, montoSecado=monto_secado, montoPorCadaPuntoExceso=monto_por_cada_punto_exceso, montoOtros=monto_otros, porcentajeMermaVolatil=porcentaje_merma_volatil, pesoNetoMermaVolatil=peso_neto_merma_volatil, porcentajeMermaSecado=porcentaje_merma_secado, pesoNetoMermaSecado=peso_neto_merma_secado, porcentajeMermaZarandeo=porcentaje_merma_zarandeo, pesoNetoMermaZarandeo=peso_neto_merma_zarandeo, pesoNetoCertificado=peso_neto_certificado, serviciosSecado=servicios_secado or None, # opcional serviciosZarandeo=servicios_zarandeo or None, serviciosOtros=servicios_otros or None, serviciosFormaDePago=servicios_forma_de_pago or None, ) return True @inicializar_y_capturar_excepciones def AgregarCertificacionRetiroTransferencia(self, nro_act_depositario=None, cuit_receptor=None, fecha=None, nro_carta_porte_a_utilizar=None, cee_carta_porte_a_utilizar=None, **kwargs): self.certificacion['retiroTransferencia'] = dict( nroActDepositario=nro_act_depositario, cuitReceptor=cuit_receptor or None, # opcional fecha=fecha, nroCartaPorteAUtilizar=nro_carta_porte_a_utilizar or None, ceeCartaPorteAUtilizar=cee_carta_porte_a_utilizar or None, certificadoDeposito=[], # <!--0 or more repetitions:--> ) return True @inicializar_y_capturar_excepciones def AgregarCertificacionPreexistente(self, tipo_certificado_deposito_preexistente=None, nro_certificado_deposito_preexistente=None, cac_certificado_deposito_preexistente=None, fecha_emision_certificado_deposito_preexistente=None, peso_neto=None, nro_planta=None, **kwargs): self.certificacion['preexistente'] = dict( tipoCertificadoDepositoPreexistente=tipo_certificado_deposito_preexistente, nroCertificadoDepositoPreexistente=nro_certificado_deposito_preexistente, cacCertificadoDepositoPreexistente=cac_certificado_deposito_preexistente, fechaEmisionCertificadoDepositoPreexistente=fecha_emision_certificado_deposito_preexistente, pesoNeto=peso_neto, nroPlanta=nro_planta, ) return True @inicializar_y_capturar_excepciones def AgregarCalidad(self, analisis_muestra=None, nro_boletin=None, cod_grado=None, valor_grado=None, valor_contenido_proteico=None, valor_factor=None, **kwargs): "Agrega la información sobre la calidad, al autorizar o posteriormente" self.certificacion['primaria']['calidad'] = dict( analisisMuestra=analisis_muestra, nroBoletin=nro_boletin, codGrado=cod_grado, # G1 G2 G3 F1 F2 F3 valorGrado=valor_grado or None, # opcional valorContProteico=valor_contenido_proteico, valorFactor=valor_factor, detalleMuestraAnalisis=[], # <!--1 or more repetitions:--> ) return True @inicializar_y_capturar_excepciones def AgregarDetalleMuestraAnalisis(self, descripcion_rubro=None, tipo_rubro=None, porcentaje=None, valor=None, **kwargs): "Agrega la información referente al detalle de la certificación" det = dict( descripcionRubro=descripcion_rubro, tipoRubro=tipo_rubro, porcentaje=porcentaje, valor=valor, ) self.certificacion['primaria']['calidad']['detalleMuestraAnalisis'].append(det) return True @inicializar_y_capturar_excepciones def BuscarCTG(self, tipo_certificado="P", cuit_depositante=None, nro_planta=None, cod_grano=2, campania=1314, nro_ctg=None, tipo_ctg=None, nro_carta_porte=None, fecha_confirmacion_ctg_des=None, fecha_confirmacion_ctg_has=None, ): "Devuelve los CTG/Carta de porte que se puede incluir en un certificado" ret = self.client.cgBuscarCtg( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, tipoCertificado=tipo_certificado, cuitDepositante=cuit_depositante or self.Cuit, nroPlanta=nro_planta, codGrano=cod_grano, campania=campania, nroCtg=nro_ctg, tipoCtg=tipo_ctg, nroCartaPorte=nro_carta_porte, fechaConfirmacionCtgDes=fecha_confirmacion_ctg_des, fechaConfirmacionCtgHas=fecha_confirmacion_ctg_has, )['oReturn'] self.__analizar_errores(ret) array = ret.get('ctg', []) self.Excepcion = self.Traceback = "" self.params_out['ctgs'] = [] for ctg in array: self.params_out['ctgs'].append({ 'campania': ctg.get('campania'), 'nro_planta': ctg.get('nroPlanta'), 'nro_ctg': ctg.get('nroCtg'), 'tipo_ctg': ctg.get('tipoCtg'), 'nro_carta_porte': ctg.get('nroCartaPorte'), 'kilos_confirmados': ctg.get('kilosConfirmados'), 'fecha_confirmacion_ctg': ctg.get('fechaConfirmacionCtg'), 'cod_grano': ctg.get('codGrano'), 'cuit_remitente_comercial': ctg.get('cuitRemitenteComercial'), 'cuit_liquida': ctg.get('cuitLiquida'), 'cuit_certifica': ctg.get('cuitCertifica'), }) return True @inicializar_y_capturar_excepciones def AgregarCTG(self, nro_ctg=None, nro_carta_porte=None, porcentaje_secado_humedad=None, importe_secado=None, peso_neto_merma_secado=None, tarifa_secado=None, importe_zarandeo=None, peso_neto_merma_zarandeo=None, tarifa_zarandeo=None, peso_neto_confirmado_definitivo=None, **kwargs): "Agrega la información referente a una CTG de la certificación" ctg = dict( nroCTG=nro_ctg, nroCartaDePorte=nro_carta_porte, pesoNetoConfirmadoDefinitivo=peso_neto_confirmado_definitivo, porcentajeSecadoHumedad=porcentaje_secado_humedad, importeSecado=importe_secado, pesoNetoMermaSecado=peso_neto_merma_secado, tarifaSecado=tarifa_secado, importeZarandeo=importe_zarandeo, pesoNetoMermaZarandeo=peso_neto_merma_zarandeo, tarifaZarandeo=tarifa_zarandeo, ) self.certificacion['primaria']['ctg'].append(ctg) return True @inicializar_y_capturar_excepciones def BuscarCertConSaldoDisponible(self, cuit_depositante=None, cod_grano=2, campania=1314, coe=None, fecha_emision_des=None, fecha_emision_has=None, ): """Devuelve los certificados de depósito en los que un productor tiene saldo disponible para Liquidar/Retirar/Transferir""" ret = self.client.cgBuscarCertConSaldoDisponible( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, cuitDepositante=cuit_depositante or self.Cuit, codGrano=cod_grano, campania=campania, coe=coe, fechaEmisionDes=fecha_emision_des, fechaEmisionHas=fecha_emision_has, )['oReturn'] self.__analizar_errores(ret) array = ret.get('certificado', []) self.Excepcion = self.Traceback = "" self.params_out['certificados'] = [] for cert in array: self.params_out['certificados'].append(dict( coe=cert['coe'], tipo_certificado=cert['tipoCertificado'], campania=cert['campania'], cuit_depositante=cert['cuitDepositante'], cuit_depositario=cert['cuitDepositario'], nro_planta=cert['nroPlanta'], kilos_disponibles=cert['kilosDisponibles'], cod_grano=cert['codGrano'], )) return True @inicializar_y_capturar_excepciones def AutorizarCertificacion(self): "Autoriza una Certificación Primaria de Depósito de Granos (C1116A/RT)" # limpio los elementos que no correspondan por estar vacios: for k1 in ('primaria', 'retiroTransferencia'): dic = self.certificacion.get(k1) if not dic: continue for k2 in ('ctg', 'detalleMuestraAnalisis', 'certificadoDeposito'): if k2 in dic and not dic[k2]: del dic[k2] # llamo al webservice: ret = self.client.cgAutorizar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, **self.certificacion ) # analizo la respusta ret = ret['oReturn'] self.__analizar_errores(ret) self.AnalizarAutorizarCertificadoResp(ret) return True def AnalizarAutorizarCertificadoResp(self, ret): "Metodo interno para extraer datos de la Respuesta de Certificación" aut = ret.get('autorizacion') if aut: self.PtoEmision = aut['ptoEmision'] self.NroOrden = aut['nroOrden'] self.FechaCertificacion = str(aut.get('fechaCertificacion', "")) self.COE = aut['coe'] self.Estado = aut['estado'] # actualizo parámetros de salida: self.params_out['coe'] = self.COE self.params_out['estado'] = self.Estado self.params_out['nro_orden'] = self.NroOrden self.params_out['fecha_certificacion'] = self.FechaCertificacion.replace("-", "") if "planta" in aut: p = aut.get("planta") self.params_out['nro_planta'] = p.get("nroPlanta") self.params_out['cuit_titular_planta'] = p.get("cuitTitularPlanta") self.params_out['razon_social_titular_planta'] = p.get("razonSocialTitularPlanta") # otros campos devueltos (opcionales) p = aut.get('pesosResumen', {}) self.params_out['peso_bruto_certificado'] = p.get("pesoBrutoCertificado") self.params_out['peso_merma_secado'] = p.get("pesoMermaSecado") self.params_out['peso_merma_volatil'] = p.get("pesoMermaVolatil") self.params_out['peso_merma_zarandeo'] = p.get("pesoMermaZarandeo") self.params_out['peso_neto_certificado'] = p.get("pesoNetoCertificado") p = aut.get('serviciosResumen', {}) self.params_out['importe_iva'] = p.get("importeIVA") self.params_out['servicio_gastos_generales'] = p.get("servicioGastosGenerales") self.params_out['servicio_otros'] = p.get("servicioOtros") self.params_out['servicio_total'] = p.get("servicioTotal") self.params_out['servicio_zarandeo'] = p.get("servicioZarandeo") # datos devueltos según el tipo de certificacion (consultas): cab = ret.get('cabecera') if cab: self.params_out['pto_emision'] = cab.get('ptoEmision') self.params_out['nro_orden'] = cab.get('nroOrden') self.params_out['tipo_certificado'] = cab.get('tipoCertificado') self.params_out['nro_planta'] = cab.get('nroPlanta') self.params_out['nro_ing_bruto_depositario'] = cab.get('nroIngBrutoDepositario') self.params_out['titular_grano'] = cab.get('titularGrano') self.params_out['cuit_depositante'] = cab.get('cuitDepositante') self.params_out['nro_ing_bruto_depositante'] = cab.get('nroIngBrutoDepositante') self.params_out['cuit_corredor'] = cab.get('cuitCorredor') self.params_out['cod_grano'] = cab.get('codGrano') self.params_out['campania'] = cab.get('campania') self.params_out['datos_adicionales'] = cab.get('datosAdicionales') pri = ret.get('primaria') if pri: self.params_out['nro_act_depositario'] = pri.get('nroActDepositario') self.params_out['descripcion_tipo_grano'] = pri.get('descripcionTipoGrano') self.params_out['monto_almacenaje'] = pri.get('montoAlmacenaje') self.params_out['monto_acarreo'] = pri.get('montoAcarreo') self.params_out['monto_gastos_generales'] = pri.get('montoGastosGenerales') self.params_out['monto_zarandeo'] = pri.get('montoZarandeo') self.params_out['porcentaje_secado_de'] = pri.get('porcentajeSecadoDe') self.params_out['porcentaje_secado_a'] = pri.get('porcentajeSecadoA') self.params_out['monto_secado'] = pri.get('montoSecado') self.params_out['monto_por_cada_punto_exceso'] = pri.get('montoPorCadaPuntoExceso') self.params_out['monto_otros'] = pri.get('montoOtros') self.params_out['porcentaje_merma_volatil'] = pri.get('porcentajeMermaVolatil') self.params_out['porcentaje_merma_secado'] = pri.get('porcentajeMermaSecado') self.params_out['peso_neto_merma_secado'] = pri.get('pesoNetoMermaSecado') self.params_out['porcentaje_merma_zarandeo'] = pri.get('pesoNetoMermaZarandeo') self.params_out['peso_neto_certificado'] = pri.get('pesoNetoCertificado') self.params_out['servicios_secado'] = pri.get('serviciosSecado') self.params_out['servicios_zarandeo'] = pri.get('serviciosZarandeo') self.params_out['servicios_otros'] = pri.get('serviciosOtros') self.params_out['servicios_forma_de_pago'] = pri.get('serviciosFormaDePago') # sub estructuras: self.params_out['ctgs'] = [] self.params_out['det_muestra_analisis'] = [] for ctg in pri.get("ctg", []): self.params_out['ctgs'].append({ 'nro_ctg': ctg.get('nroCTG'), 'nro_carta_porte': ctg.get('nroCartaDePorte'), 'peso_neto_confirmado_definitivo': ctg.get('pesoNetoConfirmadoDefinitivo'), 'porcentaje_secado_humedad': ctg.get('porcentajeSecadoHumedad'), 'importe_secado': ctg.get('importeSecado'), 'peso_neto_merma_secado': ctg.get('pesoNetoMermaSecado'), 'importe_zarandeo': ctg.get('importeZarandeo'), 'peso_neto_merma_zarandeo': ctg.get('pesoNetoMermaZarandeo'), 'tarifa_zarandeo': ctg.get('tarifaZarandeo'), }) self.params_out['calidad'] = [] for cal in [pri.get("calidad", {})]: self.params_out['calidad'].append({ 'analisis_muestra': cal.get('analisisMuestra'), 'nro_boletin': cal.get('nroBoletin'), 'nro_act_depositario': cal.get('nroActDepositario'), 'cod_grado': cal.get('codGrado'), 'valor_grado': cal.get('valorGrado'), 'valor_contenido_proteico': cal.get('valorContProteico'), 'valor_factor': cal.get('valorFactor') }) for det in cal.get("detalleMuestraAnalisis", []): self.params_out['det_muestra_analisis'].append({ 'descripcion_rubro': det.get('descripcionRubro'), 'tipo_rubro': det.get('tipoRubro'), 'porcentaje': det.get('porcentaje'), 'valor': det.get('valor'), }) rt = ret.get('retiroTransferencia') if rt: self.params_out['nro_act_depositario'] = rt.get('nroActDepositario') self.params_out['cuit_receptor'] = rt.get('cuitReceptor') self.params_out['nro_carta_porte_a_utilizar'] = rt.get('nroCartaPorteAUtilizar') # sub estructuras: self.params_out['certificados'] = [] cert = rt.get("certificadoDeposito") if cert: self.params_out['certificados'].append({ 'coe_certificado_deposito': cert.get('coeCertificadoDeposito'), 'peso_neto': cert.get('pesoNeto'), }) pre = ret.get('preexistente') if pre: self.params_out['nro_planta'] = pre.get('nroPlanta') self.params_out['tipo_certificado_deposito_preexistente'] = pre.get('tipoCertificadoDepositoPreexistente') self.params_out['nro_certificado_deposito_preexistente'] = pre.get('nroCertificadoDepositoPreexistente') self.params_out['cac_certificado_deposito_preexistente'] = pre.get('cacCertificadoDepositoPreexistente') self.params_out['fecha_emision_certificado_deposito_preexistente'] = pre.get('fechaEmisionCertificadoDepositoPreexistente') self.params_out['peso_neto'] = pre.get('pesoNeto') @inicializar_y_capturar_excepciones def InformarCalidadCertificacion(self, coe): "Informar calidad de un certificado (C1116A/RT)" # llamo al webservice: ret = self.client.cgInformarCalidad( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, coe=coe, calidad=self.certificacion['primaria']['calidad'], ) # analizo la respusta ret = ret['oReturn'] self.__analizar_errores(ret) self.AnalizarAutorizarCertificadoResp(ret) return True @inicializar_y_capturar_excepciones def AnularCertificacion(self, coe): "Anular liquidación activa" ret = self.client.cgSolicitarAnulacion( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, coe=coe, ) ret = ret['oReturn'] self.__analizar_errores(ret) self.Estado = ret.get('estadoCertificado', "") return self.COE @inicializar_y_capturar_excepciones def AsociarLiquidacionAContrato(self, coe=None, nro_contrato=None, cuit_comprador=None, cuit_vendedor=None, cuit_corredor=None, cod_grano=None, **kwargs): "Asociar una Liquidación a un contrato" ret = self.client.asociarLiquidacionAContrato( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, coe=coe, nroContrato=nro_contrato, cuitComprador=cuit_comprador, cuitVendedor=cuit_vendedor, cuitCorredor=cuit_corredor, codGrano=cod_grano, ) ret = ret['liquidacion'] self.__analizar_errores(ret) if 'liquidacion' in ret: # analizo la respusta liq = ret['liquidacion'] aut = ret['autorizacion'] self.AnalizarLiquidacion(aut, liq) return True @inicializar_y_capturar_excepciones def ConsultarLiquidacionesPorContrato(self, nro_contrato=None, cuit_comprador=None, cuit_vendedor=None, cuit_corredor=None, cod_grano=None, **kwargs): "Obtener los COE de liquidaciones relacionadas a un contrato" ret = self.client.liquidacionPorContratoConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, nroContrato=nro_contrato, cuitComprador=cuit_comprador, cuitVendedor=cuit_vendedor, cuitCorredor=cuit_corredor, codGrano=cod_grano, ) ret = ret['liqPorContratoCons'] self.__analizar_errores(ret) if 'coeRelacionados' in ret: # analizo la respuesta = [{'coe': "...."}] self.DatosLiquidacion = sorted(ret['coeRelacionados']) # establezco el primer COE self.LeerDatosLiquidacion() return True @inicializar_y_capturar_excepciones def ConsultarLiquidacion(self, pto_emision=None, nro_orden=None, coe=None, pdf=None): "Consulta una liquidación por No de orden" if coe: ret = self.client.liquidacionXCoeConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, coe=coe, pdf='S' if pdf else 'N', ) else: ret = self.client.liquidacionXNroOrdenConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, ptoEmision=pto_emision, nroOrden=nro_orden, ) ret = ret['liqConsReturn'] self.__analizar_errores(ret) if 'liquidacion' in ret: aut = ret['autorizacion'] liq = ret['liquidacion'] self.AnalizarLiquidacion(aut, liq) # guardo el PDF si se indico archivo y vino en la respuesta: if pdf and 'pdf' in ret: open(pdf, "wb").write(ret['pdf']) return True @inicializar_y_capturar_excepciones def ConsultarLiquidacionSecundaria(self, pto_emision=None, nro_orden=None, coe=None, pdf=None): "Consulta una liquidación sequndaria por No de orden o coe" if coe: ret = self.client.lsgConsultarXCoe( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, coe=coe, pdf='S' if pdf else 'N', ) else: ret = self.client.lsgConsultarXNroOrden( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, ptoEmision=pto_emision, nroOrden=nro_orden, ) ret = ret['oReturn'] self.__analizar_errores(ret) for it in ret['liquidaciones']: aut = it['autorizacion'] if 'liquidacion' in it: liq = it['liquidacion'] elif 'ajuste' in it: liq = it['ajuste'] self.AnalizarLiquidacion(aut, liq) # guardo el PDF si se indico archivo y vino en la respuesta: if pdf and 'pdf' in ret: open(pdf, "wb").write(ret['pdf']) return True @inicializar_y_capturar_excepciones def ConsultarLiquidacionesSecundariasPorContrato(self, nro_contrato=None, cuit_comprador=None, cuit_vendedor=None, cuit_corredor=None, cod_grano=None, **kwargs): "Obtener los COE de liquidaciones relacionadas a un contrato" ret = self.client.lsgConsultarXContrato( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, nroContrato=nro_contrato, cuitComprador=cuit_comprador, cuitVendedor=cuit_vendedor, cuitCorredor=cuit_corredor, codGrano=cod_grano, ) ret = ret['liqPorContratoCons'] self.__analizar_errores(ret) if 'coeRelacionados' in ret: # analizo la respuesta = [{'coe': "...."}] self.DatosLiquidacion = sorted(ret['coeRelacionados']) # establezco el primer COE self.LeerDatosLiquidacion() return True @inicializar_y_capturar_excepciones def AsociarLiquidacionSecundariaAContrato(self, coe=None, nro_contrato=None, cuit_comprador=None, cuit_vendedor=None, cuit_corredor=None, cod_grano=None, **kwargs): "Asociar una Liquidación a un contrato" ret = self.client.lsgAsociarAContrato( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, coe=coe, nroContrato=nro_contrato, cuitComprador=cuit_comprador, cuitVendedor=cuit_vendedor, cuitCorredor=cuit_corredor, codGrano=cod_grano, ) ret = ret['oReturn'] self.__analizar_errores(ret) if 'liquidacion' in ret: # analizo la respusta liq = ret['liquidacion'] aut = ret['autorizacion'] self.AnalizarLiquidacion(aut, liq) return True @inicializar_y_capturar_excepciones def ConsultarCertificacion(self, pto_emision=None, nro_orden=None, coe=None, pdf=None): "Consulta una certificacion por No de orden o COE" if coe: ret = self.client.cgConsultarXCoe( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, coe=coe, pdf='S' if pdf else 'N', ) else: ret = self.client.cgConsultarXNroOrden( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, ptoEmision=pto_emision, nroOrden=nro_orden, ) ret = ret['oReturn'] self.__analizar_errores(ret) if 'autorizacion' in ret: self.AnalizarAutorizarCertificadoResp(ret) # guardo el PDF si se indico archivo y vino en la respuesta: if pdf and 'pdf' in ret: open(pdf, "wb").write(ret['pdf']) return True @inicializar_y_capturar_excepciones def ConsultarAjuste(self, pto_emision=None, nro_orden=None, nro_contrato=None, coe=None, pdf=None): "Consulta un ajuste de liquidación por No de orden o numero de contrato" if nro_contrato: ret = self.client.ajustePorContratoConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, nroContrato=nro_contrato, ) ret = ret['ajusteContratoReturn'] elif coe is None or pdf is None: ret = self.client.ajusteXNroOrdenConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, ptoEmision=pto_emision, nroOrden=nro_orden, pdf='S' if pdf else 'N', ) ret = ret['ajusteXNroOrdenConsReturn'] else: ret = self.client.ajusteXCoeConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, coe=coe, pdf='S' if pdf else 'N', ) ret = ret['ajusteConsReturn'] self.__analizar_errores(ret) if 'ajusteUnificado' in ret: aut = ret['ajusteUnificado'] self.AnalizarAjuste(aut) # guardo el PDF si se indico archivo y vino en la respuesta: if pdf and 'pdf' in ret: open(pdf, "wb").write(ret['pdf']) return True @inicializar_y_capturar_excepciones def ConsultarUltNroOrden(self, pto_emision=1): "Consulta el último No de orden registrado" ret = self.client.liquidacionUltimoNroOrdenConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, ptoEmision=pto_emision, ) ret = ret['liqUltNroOrdenReturn'] self.__analizar_errores(ret) self.NroOrden = ret['nroOrden'] return True @inicializar_y_capturar_excepciones def ConsultarLiquidacionSecundariaUltNroOrden(self, pto_emision=1): "Consulta el último No de orden registrado para LSG" ret = self.client.lsgConsultarUltimoNroOrden( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, ptoEmision=pto_emision, ) ret = ret['liqUltNroOrdenReturn'] self.__analizar_errores(ret) self.NroOrden = ret['nroOrden'] return True @inicializar_y_capturar_excepciones def ConsultarCertificacionUltNroOrden(self, pto_emision=1): "Consulta el último No de orden registrado para CG" ret = self.client.cgConsultarUltimoNroOrden( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, ptoEmision=pto_emision, ) ret = ret['liqUltNroOrdenReturn'] self.__analizar_errores(ret) self.NroOrden = ret['nroOrden'] return True @inicializar_y_capturar_excepciones def LeerDatosLiquidacion(self, pop=True): "Recorro los datos devueltos y devuelvo el primero si existe" if self.DatosLiquidacion: # extraigo el primer item if pop: datos_liq = self.DatosLiquidacion.pop(0) else: datos_liq = self.DatosLiquidacion[0] self.COE = str(datos_liq['coe']) self.Estado = unicode(datos_liq.get('estado', "")) return self.COE else: return "" @inicializar_y_capturar_excepciones def AnularLiquidacion(self, coe): "Anular liquidación activa" ret = self.client.liquidacionAnular( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, coe=coe, ) ret = ret['anulacionReturn'] self.__analizar_errores(ret) self.Resultado = ret['resultado'] return self.COE @inicializar_y_capturar_excepciones def AnularLiquidacionSecundaria(self, coe): "Anular liquidación secundaria activa" ret = self.client.lsgAnular( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, coe=coe, ) ret = ret['anulacionReturn'] self.__analizar_errores(ret) self.Resultado = ret['resultado'] return self.COE def ConsultarCampanias(self, sep="||"): ret = self.client.campaniasConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['campaniaReturn'] self.__analizar_errores(ret) array = ret.get('campanias', []) return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array] def ConsultarTipoGrano(self, sep="||"): ret = self.client.tipoGranoConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['tipoGranoReturn'] self.__analizar_errores(ret) array = ret.get('granos', []) if sep is None: return dict([(it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array]) else: return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array] def ConsultarCodigoGradoReferencia(self, sep="||"): "Consulta de Grados según Grano." ret = self.client.codigoGradoReferenciaConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['gradoRefReturn'] self.__analizar_errores(ret) array = ret.get('gradosRef', []) if sep is None: return dict([(it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array]) else: return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array] def ConsultarGradoEntregadoXTipoGrano(self, cod_grano, sep="||"): "Consulta de Grado y Valor según Grano Entregado." ret = self.client.codigoGradoEntregadoXTipoGranoConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, codGrano=cod_grano, )['gradoEntReturn'] self.__analizar_errores(ret) array = ret.get('gradoEnt', []) if sep is None: return dict([(it['gradoEnt']['codigoDescripcion']['codigo'], it['gradoEnt']['valor']) for it in array]) else: return [("%s %%s %s %%s %s %%s %s" % (sep, sep, sep, sep)) % (it['gradoEnt']['codigoDescripcion']['codigo'], it['gradoEnt']['codigoDescripcion']['descripcion'], it['gradoEnt']['valor'], ) for it in array] def ConsultarTipoCertificadoDeposito(self, sep="||"): "Consulta de tipos de Certificados de Depósito" ret = self.client.tipoCertificadoDepositoConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['tipoCertDepReturn'] self.__analizar_errores(ret) array = ret.get('tiposCertDep', []) return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array] def ConsultarTipoDeduccion(self, sep="||"): "Consulta de tipos de Deducciones" ret = self.client.tipoDeduccionConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['tipoDeduccionReturn'] self.__analizar_errores(ret) array = ret.get('tiposDeduccion', []) return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array] def ConsultarTipoRetencion(self, sep="||"): "Consulta de tipos de Retenciones." ret = self.client.tipoRetencionConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['tipoRetencionReturn'] self.__analizar_errores(ret) array = ret.get('tiposRetencion', []) return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array] def ConsultarPuerto(self, sep="||"): "Consulta de Puertos habilitados" ret = self.client.puertoConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['puertoReturn'] self.__analizar_errores(ret) array = ret.get('puertos', []) return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array] def ConsultarTipoActividad(self, sep="||"): "Consulta de Tipos de Actividad." ret = self.client.tipoActividadConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['tipoActividadReturn'] self.__analizar_errores(ret) array = ret.get('tiposActividad', []) return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array] def ConsultarTipoActividadRepresentado(self, sep="||"): "Consulta de Tipos de Actividad inscripta en el RUOCA." try: ret = self.client.tipoActividadRepresentadoConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['tipoActividadReturn'] self.__analizar_errores(ret) array = ret.get('tiposActividad', []) self.Excepcion = self.Traceback = "" return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array] except Exception: ex = utils.exception_info() self.Excepcion = ex['msg'] self.Traceback = ex['tb'] if sep: return ["ERROR"] def ConsultarProvincias(self, sep="||"): "Consulta las provincias habilitadas" ret = self.client.provinciasConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['provinciasReturn'] self.__analizar_errores(ret) array = ret.get('provincias', []) if sep is None: return dict([(int(it['codigoDescripcion']['codigo']), it['codigoDescripcion']['descripcion']) for it in array]) else: return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array] def ConsultarLocalidadesPorProvincia(self, codigo_provincia, sep="||"): ret = self.client.localidadXProvinciaConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, codProvincia=codigo_provincia, )['localidadesReturn'] self.__analizar_errores(ret) array = ret.get('localidades', []) if sep is None: return dict([(str(it['codigoDescripcion']['codigo']), it['codigoDescripcion']['descripcion']) for it in array]) else: return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array] def BuscarLocalidades(self, cod_prov, cod_localidad=None, consultar=True): "Devuelve la localidad o la consulta en AFIP (uso interno)" # si no se especifíca cod_localidad, es util para reconstruir la cache import wslpg_datos as datos if not str(cod_localidad) in datos.LOCALIDADES and consultar: d = self.ConsultarLocalidadesPorProvincia(cod_prov, sep=None) try: # actualizar el diccionario persistente (shelve) datos.LOCALIDADES.update(d) except Exception, e: print "EXCEPCION CAPTURADA", e # capturo errores por permisos (o por concurrencia) datos.LOCALIDADES = d return datos.LOCALIDADES.get(str(cod_localidad), "") def ConsultarTiposOperacion(self, sep="||"): "Consulta tipo de Operación por Actividad." ops = [] ret = self.client.tipoActividadConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['tipoActividadReturn'] self.__analizar_errores(ret) for it_act in ret.get('tiposActividad', []): ret = self.client.tipoOperacionXActividadConsultar( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, nroActLiquida=it_act['codigoDescripcion']['codigo'], )['tipoOperacionReturn'] self.__analizar_errores(ret) array = ret.get('tiposOperacion', []) if sep: ops.extend([("%s %%s %s %%s %s %%s %s" % (sep, sep, sep, sep)) % (it_act['codigoDescripcion']['codigo'], it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array]) else: ops.extend([(it_act['codigoDescripcion']['codigo'], it['codigoDescripcion']['codigo'], it['codigoDescripcion']['descripcion']) for it in array]) return ops # Funciones para generar PDF: def CargarFormatoPDF(self, archivo="liquidacion_form_c1116b_wslpg.csv"): "Cargo el formato de campos a generar desde una planilla CSV" # si no encuentro archivo, lo busco en el directorio predeterminado: if not os.path.exists(archivo): archivo = os.path.join(self.InstallDir, "plantillas", os.path.basename(archivo)) if DEBUG: print "abriendo archivo ", archivo # inicializo la lista de los elementos: self.elements = [] for lno, linea in enumerate(open(archivo.encode('latin1')).readlines()): if DEBUG: print "procesando linea ", lno, linea args = [] for i,v in enumerate(linea.split(";")): if not v.startswith("'"): v = v.replace(",",".") else: v = v#.decode('latin1') if v.strip()=='': v = None else: v = eval(v.strip()) args.append(v) # corrijo path relativo para las imágenes: if args[1] == 'I': if not os.path.exists(args[14]): args[14] = os.path.join(self.InstallDir, "plantillas", os.path.basename(args[14])) if DEBUG: print "NUEVO PATH:", args[14] self.AgregarCampoPDF(*args) self.AgregarCampoPDF("anulado", 'T', 150, 250, 0, 0, size=70, rotate=45, foreground=0x808080, priority=-1) if HOMO: self.AgregarCampoPDF("homo", 'T', 100, 250, 0, 0, size=70, rotate=45, foreground=0x808080, priority=-1) # cargo los elementos en la plantilla self.template.load_elements(self.elements) return True def AgregarCampoPDF(self, nombre, tipo, x1, y1, x2, y2, font="Arial", size=12, bold=False, italic=False, underline=False, foreground= 0x000000, background=0xFFFFFF, align="L", text="", priority=0, **kwargs): "Agrego un campo a la plantilla" # convierto colores de string (en hexadecimal) if isinstance(foreground, basestring): foreground = int(foreground, 16) if isinstance(background, basestring): background = int(background, 16) if isinstance(text, unicode): text = text.encode("latin1") field = { 'name': nombre, 'type': tipo, 'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'font': font, 'size': size, 'bold': bold, 'italic': italic, 'underline': underline, 'foreground': foreground, 'background': background, 'align': align, 'text': text, 'priority': priority} field.update(kwargs) self.elements.append(field) return True def CrearPlantillaPDF(self, papel="A4", orientacion="portrait"): "Iniciar la creación del archivo PDF" # genero el renderizador con propiedades del PDF t = Template( format=papel, orientation=orientacion, title="F 1116 B/C %s" % (self.NroOrden), author="CUIT %s" % self.Cuit, subject="COE %s" % self.params_out.get('coe'), keywords="AFIP Liquidacion Electronica Primaria de Granos", creator='wslpg.py %s (http://www.PyAfipWs.com.ar)' % __version__,) self.template = t return True def AgregarDatoPDF(self, campo, valor, pagina='T'): "Agrego un dato a la factura (internamente)" # corrijo path relativo para las imágenes (compatibilidad hacia atrás): if campo == 'fondo' and valor.startswith(self.InstallDir): if not os.path.exists(valor): valor = os.path.join(self.InstallDir, "plantillas", os.path.basename(valor)) if DEBUG: print "NUEVO PATH:", valor self.datos[campo] = valor return True def ProcesarPlantillaPDF(self, num_copias=1, lineas_max=24, qty_pos='izq', clave=''): "Generar el PDF según la factura creada y plantilla cargada" try: f = self.template liq = self.params_out # actualizo los campos según la clave (ajuste debitos / creditos) if clave and clave in liq: liq = liq.copy() liq.update(liq[clave]) # unificar con AnalizarAjusteCredito/Debito if HOMO: self.AgregarDatoPDF("homo", u"HOMOLOGACIÓN") copias = {1: 'Original', 2: 'Duplicado', 3: 'Triplicado', 4: 'Cuadruplicado', 5: 'Quintuplicado'} # convierto el formato de intercambio para representar los valores: fmt_encabezado = dict([(v[0], v[1:]) for v in ENCABEZADO]) fmt_deduccion = dict([(v[0], v[1:]) for v in DEDUCCION]) fmt_retencion = dict([(v[0], v[1:]) for v in RETENCION]) def formatear(campo, valor, formato): "Convertir el valor a una cadena correctamente s/ formato ($ % ...)" if campo in formato and v is not None: fmt = formato[campo] if fmt[1] == N: if 'cuit' in campo: c = str(valor) if len(c) == 11: valor = "%s-%s-%s" % (c[0:2], c[2:10], c[10:]) else: valor = "" elif 'peso' in campo: valor = "%s Kg" % valor elif valor is not None and valor != "": valor = "%d" % int(valor) else: valor = "" elif fmt[1] == I: valor = ("%%0.%df" % fmt[2]) % valor if 'alic' in campo or 'comision' in campo: valor = valor + " %" elif 'factor' in campo or 'cont' in campo or 'cant' in campo: pass else: valor = "$ " + valor elif 'fecha' in campo: d = valor if isinstance(d, (datetime.date, datetime.datetime)): valor = d.strftime("%d/%m/%Y") else: valor = "%s/%s/%s" % (d[8:10], d[5:7], d[0:4]) return valor def buscar_localidad_provincia(cod_prov, cod_localidad): "obtener la descripción de la provincia/localidad (usar cache)" cod_prov = int(cod_prov) cod_localidad = str(cod_localidad) provincia = datos.PROVINCIAS[cod_prov] localidad = self.BuscarLocalidades(cod_prov, cod_localidad) return localidad, provincia # divido los datos adicionales (debe haber renglones 1 al 9): if liq.get('datos_adicionales') and f.has_key('datos_adicionales1'): d = liq.get('datos_adicionales') for i, ds in enumerate(f.split_multicell(d, 'datos_adicionales1')): liq['datos_adicionales%s' % (i + 1)] = ds for copia in range(1, num_copias+1): # completo campos y hojas f.add_page() f.set('copia', copias.get(copia, "Adicional %s" % copia)) f.set('anulado', {'AC': '', '': 'SIN ESTADO', 'AN': "ANULADO"}.get(liq['estado'], "ERROR")) try: cod_tipo_ajuste = int(liq["cod_tipo_ajuste"] or '0') except: cod_tipo_ajuste = None f.set('tipo_ajuste', {3: u'Liquidación de Débito', 4: u'Liquidación de Crédito', }.get(cod_tipo_ajuste, '')) # limpio datos del corredor si no corresponden: if liq.get('actua_corredor', 'N') == 'N': if liq.get('cuit_corredor', None) == 0: del liq['cuit_corredor'] # establezco campos según tabla encabezado: for k,v in liq.items(): v = formatear(k, v, fmt_encabezado) if isinstance(v, (basestring, int, long, float)): f.set(k, v) elif isinstance(v, decimal.Decimal): f.set(k, str(v)) elif isinstance(v, datetime.datetime): f.set(k, str(v)) import wslpg_datos as datos campania = int(liq.get('campania_ppal') or 0) f.set("campania_ppal", datos.CAMPANIAS.get(campania, campania)) f.set("tipo_operacion", datos.TIPOS_OP.get(int(liq.get('cod_tipo_operacion') or 0), "")) f.set("actividad", datos.ACTIVIDADES.get(int(liq.get('nro_act_comprador') or 0), "")) if 'cod_grano' in liq and liq['cod_grano']: cod_grano = int(liq['cod_grano']) else: cod_grano = int(self.datos.get('cod_grano') or 0) f.set("grano", datos.GRANOS.get(cod_grano, "")) cod_puerto =
codeparrot/github-code-clean
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for variable store.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gc import threading import numpy from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.eager import wrap_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.layers import core as core_layers from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import test from tensorflow.python.util import compat from tensorflow.python.util import tf_inspect def run_inside_wrap_function_in_eager_mode(graph_function): """Decorator to execute the same graph code in eager and graph modes. In graph mode, we just execute the graph_function passed as argument. In eager mode, we wrap the function using wrap_function and then execute the wrapped result. Args: graph_function: python function containing graph code to be wrapped Returns: decorated function """ def wrap_and_execute(self): if context.executing_eagerly(): wrapped = wrap_function.wrap_function(graph_function, [self]) # use the wrapped graph function wrapped() else: # use the original function graph_function(self) return wrap_and_execute class VariableScopeTest(test.TestCase): def tearDown(self): gc.collect() # This will only contain uncollectable garbage, i.e. reference cycles # involving objects with __del__ defined. self.assertEqual(0, len(gc.garbage)) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testGetVar(self): vs = variable_scope._get_default_variable_store() v = vs.get_variable("v", [1]) v1 = vs.get_variable("v", [1]) self.assertIs(v, v1) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testResource(self): vs = variable_scope._get_default_variable_store() v1 = vs.get_variable("v", [1], use_resource=True) self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable)) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testNameExists(self): vs = variable_scope._get_default_variable_store() # No check by default, so we can both create and get existing names. v = vs.get_variable("v", [1]) v1 = vs.get_variable("v", [1]) self.assertIs(v, v1) # When reuse is False, we fail when variables are already there. vs.get_variable("w", [1], reuse=False) # That's ok. with self.assertRaises(ValueError): vs.get_variable("v", [1], reuse=False) # That fails. # When reuse is True, we fail when variables are new. vs.get_variable("v", [1], reuse=True) # That's ok. with self.assertRaises(ValueError): vs.get_variable("u", [1], reuse=True) # That fails. @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testNamelessStore(self): vs = variable_scope._get_default_variable_store() vs.get_variable("v1", [2]) vs.get_variable("v2", [2]) expected_names = ["%s:0" % name for name in ["v1", "v2"]] self.assertEqual( set(expected_names), set(v.name for v in vs._vars.values())) # TODO(mihaimaruseac): Not converted to use wrap_function because of # TypeError: Expected tf.group() expected Tensor arguments not 'None' with # type '<type 'NoneType'>' @test_util.run_in_graph_and_eager_modes def testVarScopeInitializer(self): init = init_ops.constant_initializer(0.3) with variable_scope.variable_scope("tower0") as tower: with variable_scope.variable_scope("foo", initializer=init): v = variable_scope.get_variable("v", []) self.evaluate(variables_lib.variables_initializer([v])) self.assertAllClose(self.evaluate(v.value()), 0.3) with variable_scope.variable_scope(tower, initializer=init): w = variable_scope.get_variable("w", []) self.evaluate(variables_lib.variables_initializer([w])) self.assertAllClose(self.evaluate(w.value()), 0.3) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeConstraint(self): constraint = lambda x: 0. * x with variable_scope.variable_scope("tower1") as tower: with variable_scope.variable_scope("foo", constraint=constraint): v = variable_scope.get_variable("v", []) self.assertEqual(v.constraint, constraint) with variable_scope.variable_scope(tower, constraint=constraint): w = variable_scope.get_variable("w", []) self.assertEqual(w.constraint, constraint) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeNestingError(self): with variable_scope.variable_scope("aa"): scope = variable_scope.variable_scope("bb") scope.__enter__() with variable_scope.variable_scope("cc"): with self.assertRaises(RuntimeError): scope.__exit__(None, None, None) scope.__exit__(None, None, None) # TODO(mihaimaruseac): Not converted to use wrap_function because of # TypeError: Fetch argument <tf.Variable 'string:0' shape=() dtype=string> # has invalid type <class '...ResourceVariable'>, must be a string or Tensor. # (Can not convert a ResourceVariable into a Tensor or Operation.) @test_util.run_deprecated_v1 def testStringDefaultInitializer(self): with self.cached_session(): v = variable_scope.get_variable("string", shape=[], dtype=dtypes.string) variables_lib.global_variables_initializer().run() self.assertAllEqual(compat.as_bytes(self.evaluate(v)), b"") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeDType(self): with variable_scope.variable_scope("tower2") as tower: with variable_scope.variable_scope("foo", dtype=dtypes.float16): v = variable_scope.get_variable("v", []) self.assertEqual(v.dtype.base_dtype, dtypes.float16) with variable_scope.variable_scope(tower, dtype=dtypes.float16): w = variable_scope.get_variable("w", []) self.assertEqual(w.dtype.base_dtype, dtypes.float16) def testGetVariableInGraphNestedUnderEagerContext(self): with context.eager_mode(): @function.defun def f(): v = variable_scope.get_variable("should_be_resource", []) self.assertEqual(type(v), resource_variable_ops.ResourceVariable) f() def testEagerVariableStore(self): with context.eager_mode(): store = variable_scope.EagerVariableStore() with store.as_default(): v = variable_scope.get_variable("v", shape=(), trainable=True) w = variable_scope.get_variable("w", shape=(), trainable=False) self.assertTrue(v in store.variables()) self.assertTrue(w in store.variables()) self.assertTrue(v in store.trainable_variables()) self.assertFalse(w in store.trainable_variables()) self.assertFalse(v in store.non_trainable_variables()) self.assertTrue(w in store.non_trainable_variables()) # Test copying. new_store = store.copy() with new_store.as_default(): new_v = variable_scope.get_variable("v") new_w = variable_scope.get_variable("w") self.assertEqual(new_v.numpy(), v.numpy()) self.assertEqual(new_w.numpy(), w.numpy()) self.assertTrue(new_v in new_store.variables()) self.assertTrue(new_w in new_store.variables()) self.assertTrue(new_v in new_store.trainable_variables()) self.assertFalse(new_w in new_store.trainable_variables()) self.assertFalse(new_v in new_store.non_trainable_variables()) self.assertTrue(new_w in new_store.non_trainable_variables()) # Check that variables are separate instances. for v in store.variables(): v.assign(-1) for v in new_store.variables(): v.assign(1) for v in store.variables(): self.assertEqual(v.numpy(), -1) for v in new_store.variables(): self.assertEqual(v.numpy(), 1) def testEagerVariableStoreWithEagerDefun(self): with context.eager_mode(): @function.defun def f(): x = constant_op.constant([[2.0]]) d1 = core_layers.Dense( 1, name="my_dense", kernel_initializer=init_ops.ones_initializer()) _ = d1(x) # create variables self.assertEqual(len(d1.variables), 2) v1, v2 = d1.variables d2 = core_layers.Dense( 1, name="my_dense", kernel_initializer=init_ops.ones_initializer(), _reuse=True) _ = d2(x) self.assertEqual(len(d2.variables), 2) v3, v4 = d2.variables self.assertIs(v1, v3) self.assertIs(v2, v4) f() # TODO(mihaimaruseac): Not converted to use wrap_function because of # obtaining different results in the eager case compared to the graph one @test_util.run_in_graph_and_eager_modes def testEagerVariablesStoreAddsToCollections(self): store = variable_scope.EagerVariableStore() with store.as_default(): trainable = variable_scope.get_variable("v1", [], trainable=True) not_trainable = variable_scope.get_variable("v2", [], trainable=False) concat = variable_scope.get_variable( "v3", [], collections=[ops.GraphKeys.CONCATENATED_VARIABLES]) self.assertEqual( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES), [trainable, not_trainable]) self.assertEqual( ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES), [trainable, concat]) self.assertEqual( ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES), [concat]) def testEagerVariablesOutsideStoreNotAddedToCollections(self): with context.eager_mode(): variable_scope.get_variable("v1", [], trainable=True) variable_scope.get_variable("v2", [], trainable=False) self.assertFalse(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)) self.assertFalse(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)) # TODO(mihaimaruseac): Not converted to use wrap_function because of # TypeError: Expected tf.group() expected Tensor arguments not 'None' with # type '<type 'NoneType'>'. @test_util.run_in_graph_and_eager_modes def testInitFromNonTensorValue(self): v = variable_scope.get_variable("v4", initializer=4, dtype=dtypes.int32) self.evaluate(variables_lib.variables_initializer([v])) self.assertAllClose(self.evaluate(v.value()), 4) w = variable_scope.get_variable( "w4", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64) self.evaluate(variables_lib.variables_initializer([w])) self.assertAllClose(self.evaluate(w.value()), [1, 2, 3]) # A quirk to be revisited? error = ValueError if context.executing_eagerly() else TypeError with self.assertRaises(error): variable_scope.get_variable("x4", initializer={}) # TODO(mihaimaruseac): Not converted to use wrap_function because of # InvalidArgumentError=: You must feed a value for placeholder tensor # 'ReadVariableOp/resource' with dtype resource @test_util.run_in_graph_and_eager_modes def testInitFromNonInitializer(self): # Test various dtypes with zeros initializer as following: types = [ dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32, dtypes.int64, dtypes.bool ] # Use different variable_name to distinguish various dtypes for (i, dtype) in enumerate(types): x = variable_scope.get_variable( name="xx%d" % i, shape=(3, 4), dtype=dtype) y = variable_scope.get_variable( name="yy%d" % i, shape=(3, 4), dtype=dtype, initializer=init_ops.zeros_initializer(dtype=dtype)) self.evaluate(variables_lib.global_variables_initializer()) self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value())) # TODO(mihaimaruseac): Not converted to use wrap_function because of # InvalidArgumentError: /job:moo/replica:0/task:0/device:CPU:0 unknown device. @test_util.run_deprecated_v1 def testVarScopeCachingDevice(self): with self.cached_session(): caching_device = "/job:moo" with variable_scope.variable_scope("tower"): with variable_scope.variable_scope( "caching", caching_device=caching_device): v = variable_scope.get_variable("v", []) self.assertTrue(v.value().device.startswith(caching_device)) with variable_scope.variable_scope("child"): v2 = variable_scope.get_variable("v", []) self.assertTrue(v2.value().device.startswith(caching_device)) with variable_scope.variable_scope("not_cached", caching_device=""): v2_not_cached = variable_scope.get_variable("v", []) self.assertFalse( v2_not_cached.value().device.startswith(caching_device)) with variable_scope.variable_scope( "not_cached_identity_device", caching_device=lambda op: op.device): v2_identity_device = variable_scope.get_variable("v", []) self.assertFalse( v2_identity_device.value().device.startswith(caching_device)) with variable_scope.variable_scope("we_will_do_it_live") as vs_live: vs_live.set_caching_device("/job:live") v_live = variable_scope.get_variable("v", []) self.assertTrue(v_live.value().device.startswith("/job:live")) v_tower = variable_scope.get_variable("v", []) self.assertFalse(v_tower.value().device.startswith(caching_device)) # TODO(mihaimaruseac): Not converted to use wrap_function because of # AttributeError: Tensor.name is meaningless when eager execution is enabled. @test_util.run_in_graph_and_eager_modes def testVarScopeRegularizer(self): init = init_ops.constant_initializer(0.3) def regularizer1(v): return math_ops.reduce_mean(v) + 0.1 def regularizer2(v): return math_ops.reduce_mean(v) + 0.2 with variable_scope.variable_scope( "tower3", regularizer=regularizer1) as tower: with variable_scope.variable_scope("foo", initializer=init): v = variable_scope.get_variable("v", []) self.evaluate(variables_lib.variables_initializer([v])) losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(1, len(losses)) self.assertAllClose(self.evaluate(losses[0]), 0.4) with variable_scope.variable_scope(tower, initializer=init) as vs: u = variable_scope.get_variable("u", []) vs.set_regularizer(regularizer2) w = variable_scope.get_variable("w", []) # Next 3 variable not regularized to test disabling regularization. x = variable_scope.get_variable( "x", [], regularizer=variable_scope.no_regularizer) with variable_scope.variable_scope( "baz", regularizer=variable_scope.no_regularizer): y = variable_scope.get_variable("y", []) vs.set_regularizer(variable_scope.no_regularizer) z = variable_scope.get_variable("z", []) # Check results. losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(3, len(losses)) self.evaluate(variables_lib.variables_initializer([u, w, x, y, z])) self.assertAllClose(self.evaluate(losses[0]), 0.4) self.assertAllClose(self.evaluate(losses[1]), 0.4) self.assertAllClose(self.evaluate(losses[2]), 0.5) with variable_scope.variable_scope("foo", reuse=True): # reuse=True is for now only supported when eager execution is disabled. if not context.executing_eagerly(): v = variable_scope.get_variable("v", []) # "v" is already there, reused losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(3, len(losses)) # No new loss added. # TODO(mihaimaruseac): Not converted to use wrap_function because of # ValueError: Tensor-typed variable initializers must either be wrapped in an # init_scope or callable... @test_util.run_in_graph_and_eager_modes def testInitializeFromValue(self): init = constant_op.constant(0.1) w = variable_scope.get_variable("v", initializer=init) self.evaluate(variables_lib.variables_initializer([w])) self.assertAllClose(self.evaluate(w.value()), 0.1) with self.assertRaisesRegexp(ValueError, "shape"): # We disallow explicit shape specification when initializer is constant. variable_scope.get_variable("u", [1], initializer=init) with variable_scope.variable_scope("foo", initializer=init): # Constant initializer can be passed through scopes if needed. v = variable_scope.get_variable("v") self.evaluate(variables_lib.variables_initializer([v])) self.assertAllClose(self.evaluate(v.value()), 0.1) # Check that non-float32 initializer creates a non-float32 variable. init = constant_op.constant(1, dtype=dtypes.int32) t = variable_scope.get_variable("t", initializer=init) self.assertEqual(t.dtype.base_dtype, dtypes.int32) # Raise error if `initializer` dtype and `dtype` are not identical. with self.assertRaisesRegexp(ValueError, "don't match"): variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64) # TODO(mihaimaruseac): Not converted to use wrap_function because of # TypeError: Fetch argument <tf.Variable 'v0:0' shape=(1,) dtype=float32> has # invalid type <class '...ops.resource_variable_ops.ResourceVariable'>, must # be a string or Tensor. (Can not convert a ResourceVariable into a Tensor or # Operation.) @test_util.run_deprecated_v1 def testControlDeps(self): with self.cached_session() as sess: v0 = variable_scope.get_variable( "v0", [1], initializer=init_ops.constant_initializer(0)) with ops.control_dependencies([v0.value()]): v1 = variable_scope.get_variable( "v1", [1], initializer=init_ops.constant_initializer(1)) add = v1 + v0 # v0 should be uninitialized. with self.assertRaisesRegexp(errors.OpError, "uninitialized"): self.evaluate(v0) # We should be able to initialize and run v1 without initializing # v0, even if the variable was created with a control dep on v0. self.evaluate(v1.initializer) self.assertEqual(1, self.evaluate(v1)) # v0 should still be uninitialized. with self.assertRaisesRegexp(errors.OpError, "uninitialized"): self.evaluate(v0) with self.assertRaisesRegexp(errors.OpError, "uninitialized"): self.evaluate(add) # If we initialize v0 we should be able to run 'add'. self.evaluate(v0.initializer) self.evaluate(add) # TODO(mihaimaruseac): Not converted to use wrap_function because of # AssertionError: True is not false (last assertFalse) @test_util.run_deprecated_v1 def testEnableResourceVariables(self): old = variable_scope._DEFAULT_USE_RESOURCE try: variable_scope.enable_resource_variables() self.assertTrue(isinstance(variables_lib.VariableV1(1.0), resource_variable_ops.ResourceVariable)) variable_scope.disable_resource_variables() self.assertFalse(isinstance(variables_lib.VariableV1(1.0), resource_variable_ops.ResourceVariable)) finally: variable_scope._DEFAULT_USE_RESOURCE = old # TODO(mihaimaruseac): Not converted to use wrap_function because of # TypeError: Fetch argument None has invalid type <type 'NoneType'> @test_util.run_deprecated_v1 def testControlFlow(self): with self.cached_session() as sess: v0 = variable_scope.get_variable( "v0", [], initializer=init_ops.constant_initializer(0)) var_dict = {} # Call get_variable in each of the cond clauses. def var_in_then_clause(): v1 = variable_scope.get_variable( "v1", [1], initializer=init_ops.constant_initializer(1)) var_dict["v1"] = v1 return v1 + v0 def var_in_else_clause(): v2 = variable_scope.get_variable( "v2", [1], initializer=init_ops.constant_initializer(2)) var_dict["v2"] = v2 return v2 + v0 add = control_flow_ops.cond( math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause) v1 = var_dict["v1"] v2 = var_dict["v2"] # We should be able to initialize and run v1 and v2 without initializing # v0, even if the variable was created with a control dep on v0. self.evaluate(v1.initializer) self.assertEqual([1], self.evaluate(v1)) self.evaluate(v2.initializer) self.assertEqual([2], self.evaluate(v2)) # v0 should still be uninitialized. with self.assertRaisesRegexp(errors.OpError, "uninitialized"): self.evaluate(v0) # We should not be able to run 'add' yet. with self.assertRaisesRegexp(errors.OpError, "uninitialized"): self.evaluate(add) # If we initialize v0 we should be able to run 'add'. self.evaluate(v0.initializer) self.evaluate(add) # TODO(mihaimaruseac): Not converted to use wrap_function because of # TypeError: Expected tf.group() expected Tensor arguments not 'None' with # type '<type 'NoneType'>'. @test_util.run_in_graph_and_eager_modes def testGetVariableScope(self): # Test the get_variable_scope() function and setting properties of result. init = init_ops.constant_initializer(0.3) with variable_scope.variable_scope("bar"): new_init1 = variable_scope.get_variable_scope().initializer self.assertEqual(new_init1, None) # Check that we can set initializer like this. variable_scope.get_variable_scope().set_initializer(init) v = variable_scope.get_variable("v", []) self.evaluate(variables_lib.variables_initializer([v])) self.assertAllClose(self.evaluate(v.value()), 0.3) if not context.executing_eagerly(): # Check that we can set reuse. variable_scope.get_variable_scope().reuse_variables() with self.assertRaises(ValueError): # Fail, w does not exist yet. variable_scope.get_variable("w", [1]) # Check that the set initializer goes away. new_init = variable_scope.get_variable_scope().initializer self.assertEqual(new_init, None) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScope(self): with variable_scope.variable_scope("tower4") as tower: self.assertEqual(tower.name, "tower4") with ops.name_scope("scope") as sc: self.assertEqual(sc, "tower4/scope/") with variable_scope.variable_scope("tower5"): with variable_scope.variable_scope("bar") as bar: self.assertEqual(bar.name, "tower5/bar") with ops.name_scope("scope") as sc: self.assertEqual(sc, "tower5/bar/scope/") with variable_scope.variable_scope("tower6"): with variable_scope.variable_scope(tower, reuse=True) as tower_shared: self.assertEqual(tower_shared.name, "tower4") with ops.name_scope("scope") as sc: self.assertEqual(sc, "tower6/tower4/scope/") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeNameScope(self): with ops.name_scope("testVarScopeNameScope1"): with variable_scope.variable_scope("tower") as tower: with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "testVarScopeNameScope1/tower/scope2/") if not context.executing_eagerly(): with variable_scope.variable_scope( tower): # Re-entering acts like another "tower". with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "testVarScopeNameScope1/tower_1/scope2/") with variable_scope.variable_scope( "tower"): # Re-entering by string acts the same. with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "testVarScopeNameScope1/tower_2/scope2/") with ops.name_scope("testVarScopeNameScope2"): with variable_scope.variable_scope("tower"): with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "testVarScopeNameScope2/tower/scope2/") if not context.executing_eagerly(): with variable_scope.variable_scope(tower): with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "testVarScopeNameScope2/tower_1/scope2/") root_var_scope = variable_scope.get_variable_scope() with ops.name_scope("testVarScopeNameScope3"): with variable_scope.variable_scope(root_var_scope): with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "testVarScopeNameScope3/scope2/") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeOriginalNameScope(self): with self.cached_session(): with ops.name_scope("scope1"): with variable_scope.variable_scope("tower") as tower: self.assertEqual(tower.original_name_scope, "scope1/tower/") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "scope1/tower/scope2/") with ops.name_scope("scope2"): with variable_scope.variable_scope(tower) as tower1: # Re-entering preserves original name scope. self.assertEqual(tower1.original_name_scope, "scope1/tower/") with ops.name_scope("foo") as sc2: self.assertEqual(sc2, "scope2/tower/foo/") # Test re-entering original name scope. with ops.name_scope(tower.original_name_scope): with ops.name_scope("bar") as sc3: self.assertEqual(sc3, "scope1/tower/bar/") with ops.name_scope("scope2"): with variable_scope.variable_scope(tower): with ops.name_scope(tower.original_name_scope): with ops.name_scope("bar") as sc3: self.assertEqual(sc3, "scope1/tower/bar_1/") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeObjectReuse(self): with self.cached_session(): vs = None with variable_scope.variable_scope("jump", reuse=True) as scope: vs = scope with variable_scope.variable_scope(vs) as jump: self.assertTrue(jump.reuse) with variable_scope.variable_scope(vs, reuse=True) as jump_reuse: self.assertTrue(jump_reuse.reuse) with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse: self.assertTrue(jump_no_reuse.reuse) # Inherited, cannot be undone. with variable_scope.variable_scope("jump", reuse=False) as scope: vs = scope with variable_scope.variable_scope(vs) as jump: self.assertFalse(jump.reuse) with variable_scope.variable_scope(vs, reuse=True) as jump_reuse: self.assertTrue(jump_reuse.reuse) with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse: self.assertFalse(jump_no_reuse.reuse) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeGetOrCreateReuse(self): with self.cached_session(): def test_value(value): x = constant_op.constant(value) with variable_scope.variable_scope( "testVarScopeGetOrCreateReuse_bar", reuse=variable_scope.AUTO_REUSE): _ = state_ops.assign(variable_scope.get_variable("var", []), x) with variable_scope.variable_scope( "testVarScopeGetOrCreateReuse_bar", reuse=variable_scope.AUTO_REUSE): _ = variable_scope.get_variable("var", []) self.assertEqual(value, self.evaluate(x)) test_value(42.) # Variable is created. test_value(13.) # Variable is reused hereafter. test_value(17.) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScope(self): with self.cached_session(): with ops.name_scope("testVarOpScope1"): with variable_scope.variable_scope("tower", "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "tower/w:0") with ops.name_scope("testVarOpScope2") as sc2: self.assertEqual(sc2, "testVarOpScope1/tower/testVarOpScope2/") with variable_scope.variable_scope("tower", "default", []): with self.assertRaises(ValueError): variable_scope.get_variable("w", []) with ops.name_scope("testVarOpScope2") as sc2: self.assertEqual(sc2, "testVarOpScope1/tower_1/testVarOpScope2/") with ops.name_scope("testVarOpScope2"): with variable_scope.variable_scope(None, "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "default/w:0") with ops.name_scope("testVarOpScope2") as sc2: self.assertEqual(sc2, "testVarOpScope2/default/testVarOpScope2/") with variable_scope.variable_scope(None, "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "default_1/w:0") with ops.name_scope("testVarOpScope2") as sc2: self.assertEqual(sc2, "testVarOpScope2/default_1/testVarOpScope2/") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self): with self.cached_session(): with variable_scope.variable_scope(None, "defaultScope1"): with variable_scope.variable_scope(None, "layer"): self.assertEqual( variable_scope.get_variable("w", []).name, "defaultScope1/layer/w:0") with variable_scope.variable_scope(None, "defaultScope1"): with variable_scope.variable_scope(None, "layer"): self.assertEqual( variable_scope.get_variable("w", []).name, "defaultScope1_1/layer/w:0") with variable_scope.variable_scope(None, "defaultScope"): with variable_scope.variable_scope(None, "layer"): self.assertEqual( variable_scope.get_variable("w", []).name, "defaultScope/layer/w:0") with variable_scope.variable_scope(None, "defaultScope1"): with variable_scope.variable_scope(None, "layer"): self.assertEqual( variable_scope.get_variable("w", []).name, "defaultScope1_2/layer/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeUniqueNamesWithJump(self): with self.cached_session(): with variable_scope.variable_scope("default") as default: with variable_scope.variable_scope(None, "layer"): self.assertEqual( variable_scope.get_variable("w", []).name, "default/layer/w:0") with variable_scope.variable_scope(None, "layer"): self.assertEqual( variable_scope.get_variable("w", []).name, "default/layer_1/w:0") with variable_scope.variable_scope(default): pass # No matter the jump in the middle, unique numbering continues. with variable_scope.variable_scope(None, "layer"): self.assertEqual( variable_scope.get_variable("w", []).name, "default/layer_2/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeReuse(self): with self.cached_session(): with variable_scope.variable_scope("outer") as outer: with variable_scope.variable_scope("tower", "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/tower/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/tower/scope2/") with variable_scope.variable_scope(None, "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/default/scope2/") with variable_scope.variable_scope(outer, reuse=True) as outer: with variable_scope.variable_scope("tower", "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/tower/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/tower/scope2/") with variable_scope.variable_scope(None, "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/default/scope2/") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeGetVar(self): with self.cached_session(): with variable_scope.variable_scope("root"): with variable_scope.variable_scope("towerA") as tower_a: va = variable_scope.get_variable("v", [1]) self.assertEqual(va.name, "root/towerA/v:0") with variable_scope.variable_scope(tower_a, reuse=True): va2 = variable_scope.get_variable("v", [1]) self.assertIs(va2, va) with variable_scope.variable_scope("towerB"): vb = variable_scope.get_variable("v", [1]) self.assertEqual(vb.name, "root/towerB/v:0") with self.assertRaises(ValueError): with variable_scope.variable_scope("towerA"): va2 = variable_scope.get_variable("v", [1]) with variable_scope.variable_scope("towerA", reuse=True): va2 = variable_scope.get_variable("v", [1]) self.assertIs(va2, va) with variable_scope.variable_scope("foo"): with variable_scope.variable_scope("bar"): v = variable_scope.get_variable("v", [1]) self.assertEqual(v.name, "root/foo/bar/v:0") with variable_scope.variable_scope(tower_a, reuse=True): va3 = variable_scope.get_variable("v", [1]) self.assertIs(va, va3) with self.assertRaises(ValueError): with variable_scope.variable_scope(tower_a, reuse=True): with variable_scope.variable_scope("baz"): variable_scope.get_variable("v", [1]) with self.assertRaises(ValueError) as exc: with variable_scope.variable_scope(tower_a, reuse=True): variable_scope.get_variable("v", [2]) # Different shape. self.assertEqual("shape" in str(exc.exception), True) with self.assertRaises(ValueError) as exc: with variable_scope.variable_scope(tower_a, reuse=True): variable_scope.get_variable("v", [1], dtype=dtypes.int32) self.assertEqual("dtype" in str(exc.exception), True) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeOuterScope(self): with self.cached_session(): with variable_scope.variable_scope("outer") as outer: pass with variable_scope.variable_scope(outer): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/scope2/") with variable_scope.variable_scope("default"): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/default/scope2/") with variable_scope.variable_scope(outer, reuse=True): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_2/scope2/") with variable_scope.variable_scope("default", reuse=True): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_2/default/scope2/") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarScopeNestedOuterScope(self): with self.cached_session(): with variable_scope.variable_scope("outer") as outer: with variable_scope.variable_scope(outer): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/outer/scope2/") with variable_scope.variable_scope("default"): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/default/scope2/") with variable_scope.variable_scope(outer, reuse=True): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/outer_1/scope2/") with variable_scope.variable_scope("default", reuse=True): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/default_1/scope2/") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeReuseParam(self): with self.cached_session(): with variable_scope.variable_scope("outer") as outer: with variable_scope.variable_scope("tower", "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/tower/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/tower/scope2/") with variable_scope.variable_scope(None, "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/default/scope2/") with variable_scope.variable_scope(outer) as outer: with variable_scope.variable_scope("tower", "default", reuse=True): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/tower/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/tower/scope2/") outer.reuse_variables() with variable_scope.variable_scope(None, "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/default/scope2/") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeReuseError(self): with self.cached_session(): with self.assertRaises(ValueError): with variable_scope.variable_scope(None, "default", reuse=True): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/tower/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeOuterScope(self): with self.cached_session(): with variable_scope.variable_scope("outer") as outer: pass with variable_scope.variable_scope(outer, "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/scope2/") with variable_scope.variable_scope(None, "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/default/scope2/") with variable_scope.variable_scope(outer, "default", reuse=True): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_2/scope2/") outer.reuse_variables() with variable_scope.variable_scope(None, "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_2/default/scope2/") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVarOpScopeNestedOuterScope(self): with self.cached_session(): with variable_scope.variable_scope("outer") as outer: with variable_scope.variable_scope(outer, "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/outer/scope2/") with variable_scope.variable_scope(None, "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer/default/scope2/") with variable_scope.variable_scope(outer, "default", reuse=True): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/scope2/") with variable_scope.variable_scope(None, "default", []): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") with ops.name_scope("scope2") as sc2: self.assertEqual(sc2, "outer_1/default/scope2/") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testBasicWhenAuxiliaryNameScopeIsFalse(self): with self.cached_session(): with variable_scope.variable_scope( "scope", auxiliary_name_scope=False) as scope: self.assertEqual(scope.original_name_scope, "") self.assertEqual( variable_scope.get_variable("w", []).name, "scope/w:0") self.assertEqual(constant_op.constant([], name="c").name, "c:0") with variable_scope.variable_scope(scope, auxiliary_name_scope=False): self.assertEqual(scope.original_name_scope, "") self.assertEqual( variable_scope.get_variable("w1", []).name, "scope/w1:0") self.assertEqual(constant_op.constant([], name="c1").name, "c1:0") # Recheck: new name scope is NOT created before with ops.name_scope("scope"): self.assertEqual(constant_op.constant([], name="c").name, "scope/c:0") with variable_scope.variable_scope("outer"): with variable_scope.variable_scope( "inner", auxiliary_name_scope=False) as inner: self.assertEqual(inner.original_name_scope, "outer/") self.assertEqual( variable_scope.get_variable("w", []).name, "outer/inner/w:0") self.assertEqual( constant_op.constant([], name="c").name, "outer/c:0") with variable_scope.variable_scope( inner, auxiliary_name_scope=False) as inner1: self.assertEqual(inner1.original_name_scope, "outer/") self.assertEqual( variable_scope.get_variable("w1", []).name, "outer/inner/w1:0") self.assertEqual( constant_op.constant([], name="c1").name, "outer/c1:0") # Recheck: new name scope is NOT created before with ops.name_scope("inner"): self.assertEqual( constant_op.constant([], name="c").name, "outer/inner/c:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self): with self.cached_session(): with variable_scope.variable_scope( None, default_name="default", auxiliary_name_scope=False) as scope: self.assertEqual(scope.original_name_scope, "") self.assertEqual( variable_scope.get_variable("w", []).name, "default/w:0") self.assertEqual(constant_op.constant([], name="c").name, "c:0") # Recheck: new name scope is NOT created before with ops.name_scope("default"): self.assertEqual( constant_op.constant([], name="c").name, "default/c:0") with variable_scope.variable_scope("outer"): with variable_scope.variable_scope( None, default_name="default", auxiliary_name_scope=False) as inner: self.assertEqual(inner.original_name_scope, "outer/") self.assertEqual( variable_scope.get_variable("w", []).name, "outer/default/w:0") self.assertEqual( constant_op.constant([], name="c").name, "outer/c:0") # Recheck: new name scope is NOT created before with ops.name_scope("default"): self.assertEqual( constant_op.constant([], name="c").name, "outer/default/c:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self): with self.cached_session(): root_scope = variable_scope.get_variable_scope() with variable_scope.variable_scope( root_scope, auxiliary_name_scope=False) as scope: self.assertEqual(scope.original_name_scope, "") self.assertEqual(variable_scope.get_variable("w", []).name, "w:0") self.assertEqual(constant_op.constant([], name="c").name, "c:0") with variable_scope.variable_scope("outer"): with variable_scope.variable_scope( root_scope, auxiliary_name_scope=False) as inner: self.assertEqual(inner.original_name_scope, "") self.assertEqual(variable_scope.get_variable("w1", []).name, "w1:0") self.assertEqual( constant_op.constant([], name="c1").name, "outer/c1:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testAuxiliaryNameScopeIsInvalid(self): with self.cached_session(): with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"): with variable_scope.variable_scope( None, default_name="scope", auxiliary_name_scope="invalid"): pass with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"): with variable_scope.variable_scope( "scope", auxiliary_name_scope="invalid"): pass with variable_scope.variable_scope("scope") as scope: pass with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"): with variable_scope.variable_scope( scope, auxiliary_name_scope="invalid"): pass @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testReuseScopeWithoutNameScopeCollision(self): # Github issue: #13429 with self.cached_session(): with variable_scope.variable_scope("outer"): with variable_scope.variable_scope("inner") as inner: pass with variable_scope.variable_scope( inner, auxiliary_name_scope=False) as scope: with ops.name_scope(scope.original_name_scope): self.assertEqual( variable_scope.get_variable("w", []).name, "outer/inner/w:0") self.assertEqual( constant_op.constant([], name="c").name, "outer/inner/c:0") with ops.name_scope("inner"): self.assertEqual( constant_op.constant([], name="c").name, "inner/c:0") with variable_scope.variable_scope("another"): with variable_scope.variable_scope( inner, auxiliary_name_scope=False) as scope1: with ops.name_scope(scope1.original_name_scope): self.assertEqual( variable_scope.get_variable("w1", []).name, "outer/inner/w1:0") self.assertEqual( constant_op.constant([], name="c1").name, "outer/inner/c1:0") with ops.name_scope("inner"): self.assertEqual( constant_op.constant([], name="c").name, "another/inner/c:0") # TODO(mihaimaruseac): Not converted to use wrap_function because of # obtaining different results in the eager case compared to the graph one # (different assertions failing after wrapping, in both execution modes) @test_util.run_in_graph_and_eager_modes def testGetLocalVar(self): # Check that local variable respects naming. with variable_scope.variable_scope("outer") as outer: with variable_scope.variable_scope(outer, "default", []): local_var = variable_scope.get_local_variable( "w", [], collections=["foo"]) self.assertEqual(local_var.name, "outer/w:0") if not context.executing_eagerly(): # Since variable is local, it should be in the local variable collection # but not the trainable collection. self.assertIn(local_var, ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)) self.assertIn(local_var, ops.get_collection("foo")) self.assertNotIn(local_var, ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)) # Check that local variable respects `reuse`. with variable_scope.variable_scope(outer, "default", reuse=True): self.assertEqual( variable_scope.get_local_variable("w", []).name, "outer/w:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testSignatureGetVarVsGetLocalVar(self): """get_{local,}variable() must take the same list of args.""" arg_names = tf_inspect.getargspec(variable_scope.get_variable)[0] local_arg_names = tf_inspect.getargspec( variable_scope.get_local_variable)[0] self.assertEqual(arg_names, local_arg_names) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testGetVarWithDevice(self): g = ops.Graph() varname_type = [] def device_func(op): if op.type in ["Variable", "VariableV2", "VarHandleOp"]: varname_type.append((op.name, op.get_attr("dtype"))) return "/device:GPU:0" with g.as_default(): with ops.device(device_func): _ = variable_scope.get_variable("x", (100, 200)) _ = variable_scope.get_variable( "y", dtype=dtypes.int64, initializer=numpy.arange(73)) self.assertEqual(varname_type[0], ("x", dtypes.float32)) self.assertEqual(varname_type[1], ("y", dtypes.int64)) # TODO(mihaimaruseac): Not converted to use wrap_function because of # obtaining different results in the eager case compared to the graph one @test_util.run_deprecated_v1 def testGetCollection(self): with self.cached_session(): _ = variable_scope.get_variable("testGetCollection_a", []) _ = variable_scope.get_variable( "testGetCollection_b", [], trainable=False) with variable_scope.variable_scope("testGetCollection_foo_") as scope1: _ = variable_scope.get_variable("testGetCollection_a", []) _ = variable_scope.get_variable( "testGetCollection_b", [], trainable=False) self.assertEqual([ v.name for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) ], ["testGetCollection_foo_/testGetCollection_a:0"]) self.assertEqual([ v.name for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) ], [ "testGetCollection_foo_/testGetCollection_a:0", "testGetCollection_foo_/testGetCollection_b:0" ]) with variable_scope.variable_scope("testGetCollection_foo") as scope2: _ = variable_scope.get_variable("testGetCollection_a", []) _ = variable_scope.get_variable( "testGetCollection_b", [], trainable=False) self.assertEqual([ v.name for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) ], ["testGetCollection_foo/testGetCollection_a:0"]) self.assertEqual([ v.name for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) ], [ "testGetCollection_foo/testGetCollection_a:0", "testGetCollection_foo/testGetCollection_b:0" ]) scope = variable_scope.get_variable_scope() self.assertEqual([ v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) ], [ "testGetCollection_a:0", "testGetCollection_b:0", "testGetCollection_foo_/testGetCollection_a:0", "testGetCollection_foo_/testGetCollection_b:0", "testGetCollection_foo/testGetCollection_a:0", "testGetCollection_foo/testGetCollection_b:0" ]) self.assertEqual([ v.name for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) ], [ "testGetCollection_a:0", "testGetCollection_foo_/testGetCollection_a:0", "testGetCollection_foo/testGetCollection_a:0" ]) # TODO(mihaimaruseac): Not converted to use wrap_function because of # obtaining different results in the eager case compared to the graph one @test_util.run_deprecated_v1 def testGetTrainableVariablesWithGetVariable(self): with self.cached_session(): _ = variable_scope.get_variable("testGetTrainableVariables_a", []) with variable_scope.variable_scope( "testGetTrainableVariables_foo") as scope: _ = variable_scope.get_variable("testGetTrainableVariables_b", []) _ = variable_scope.get_variable( "testGetTrainableVariables_c", [], trainable=False) # sync `ON_READ` sets trainable=False _ = variable_scope.get_variable( "testGetTrainableVariables_d", [], synchronization=variable_scope.VariableSynchronization.ON_READ) self.assertEqual( [v.name for v in scope.trainable_variables()], ["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"]) _ = variable_scope.get_variable( "testGetTrainableVariables_e", [], synchronization=variable_scope.VariableSynchronization.ON_READ, trainable=True) self.assertEqual([v.name for v in scope.trainable_variables()], [ "testGetTrainableVariables_foo/testGetTrainableVariables_b:0", "testGetTrainableVariables_foo/testGetTrainableVariables_e:0", ]) # All other sync values sets trainable=True _ = variable_scope.get_variable( "testGetTrainableVariables_f", [], synchronization=variable_scope.VariableSynchronization.ON_WRITE) self.assertEqual([v.name for v in scope.trainable_variables()], [ "testGetTrainableVariables_foo/testGetTrainableVariables_b:0", "testGetTrainableVariables_foo/testGetTrainableVariables_e:0", "testGetTrainableVariables_foo/testGetTrainableVariables_f:0", ]) # TODO(mihaimaruseac): Not converted to use wrap_function because of # obtaining different results in the eager case compared to the graph one @test_util.run_deprecated_v1 def testGetTrainableVariablesWithVariable(self): with self.cached_session(): _ = variable_scope.variable(1.0, name="testGetTrainableVariables_a") with variable_scope.variable_scope( "testGetTrainableVariables_foo") as scope: _ = variable_scope.variable(1.0, name="testGetTrainableVariables_b") _ = variable_scope.variable( 1.0, name="testGetTrainableVariables_c", trainable=False) # sync `ON_READ` sets trainable=False _ = variable_scope.variable( 1.0, name="testGetTrainableVariables_d", synchronization=variable_scope.VariableSynchronization.ON_READ) self.assertEqual( [v.name for v in scope.trainable_variables()], ["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"]) _ = variable_scope.variable( 1.0, name="testGetTrainableVariables_e", synchronization=variable_scope.VariableSynchronization.ON_READ, trainable=True) self.assertEqual([v.name for v in scope.trainable_variables()], [ "testGetTrainableVariables_foo/testGetTrainableVariables_b:0", "testGetTrainableVariables_foo/testGetTrainableVariables_e:0", ]) # All other sync values sets trainable=True _ = variable_scope.variable( 1.0, name="testGetTrainableVariables_f", synchronization=variable_scope.VariableSynchronization.ON_WRITE) self.assertEqual([v.name for v in scope.trainable_variables()], [ "testGetTrainableVariables_foo/testGetTrainableVariables_b:0", "testGetTrainableVariables_foo/testGetTrainableVariables_e:0", "testGetTrainableVariables_foo/testGetTrainableVariables_f:0", ]) # TODO(mihaimaruseac): Not converted to use wrap_function because of # obtaining different results in the eager case compared to the graph one @test_util.run_deprecated_v1 def testGetGlobalVariables(self): with self.cached_session(): _ = variable_scope.get_variable("testGetGlobalVariables_a", []) with variable_scope.variable_scope("testGetGlobalVariables_foo") as scope: _ = variable_scope.get_variable("testGetGlobalVariables_b", []) self.assertEqual( [v.name for v in scope.global_variables()], ["testGetGlobalVariables_foo/" "testGetGlobalVariables_b:0"]) # TODO(mihaimaruseac): Not converted to use wrap_function because of # obtaining different results in the eager case compared to the graph one @test_util.run_deprecated_v1 def testGetLocalVariables(self): with self.cached_session(): _ = variable_scope.get_variable( "a", [], collections=[ops.GraphKeys.LOCAL_VARIABLES]) with variable_scope.variable_scope("foo") as scope: _ = variable_scope.get_variable( "b", [], collections=[ops.GraphKeys.LOCAL_VARIABLES]) _ = variable_scope.get_variable("c", []) self.assertEqual([v.name for v in scope.local_variables()], ["foo/b:0"]) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testGetVariableWithRefDtype(self): v = variable_scope.get_variable("v", shape=[3, 4], dtype=dtypes.float32) # Ensure it is possible to do get_variable with a _ref dtype passed in. _ = variable_scope.get_variable("w", shape=[5, 6], dtype=v.dtype) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testGetVariableWithInitializerWhichTakesNoArgs(self): v = variable_scope.get_variable("foo", initializer=lambda: [2]) self.assertEqual(v.name, "foo:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testGetVariableWithInitializerWhichTakesOptionalArgs(self): v = variable_scope.get_variable("foo", initializer=lambda x=True: [2]) self.assertEqual(v.name, "foo:0") @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testGetVariableWithInitializerWhichTakesUnprovidedArgsAndNoShape(self): with self.assertRaisesRegexp( ValueError, "The initializer passed is not valid. It should be a callable with no " "arguments and the shape should not be provided or an instance of " "`tf.keras.initializers.*' and `shape` should be fully defined."): variable_scope.get_variable("foo", initializer=lambda x: [2]) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testTwoGraphs(self): def f(): g1 = ops.Graph() g2 = ops.Graph() with g1.as_default(): with g2.as_default(): with variable_scope.variable_scope("_"): pass self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f) def axis0_into1_partitioner(shape=None, **unused_kwargs): part = [1] * len(shape) return part def axis0_into2_partitioner(shape=None, **unused_kwargs): part = [1] * len(shape) part[0] = 2 return part def axis0_into3_partitioner(shape=None, **unused_kwargs): part = [1] * len(shape) part[0] = 3 return part class VariableScopeWithPartitioningTest(test.TestCase): # TODO(mihaimaruseac): Not converted to use wrap_function because of # obtaining different results in the eager case compared to the graph one @test_util.run_deprecated_v1 def testResultNameMatchesRequested(self): with variable_scope.variable_scope( "scope0", partitioner=axis0_into2_partitioner): v = variable_scope.get_variable("name0", shape=(3, 1, 1)) self.assertEqual(v.name, "scope0/name0") v_concat = v.as_tensor() self.assertEqual(v_concat.name, "scope0/name0:0") variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertIn("scope0/name0/part_0:0", [x.name for x in variables]) self.assertIn("scope0/name0/part_1:0", [x.name for x in variables]) self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables]) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testBreaksIfPartitioningChanges(self): with variable_scope.variable_scope( "scope0", partitioner=axis0_into2_partitioner): variable_scope.get_variable("name0", shape=(3, 1, 1)) with variable_scope.variable_scope( "scope0", partitioner=axis0_into3_partitioner, reuse=True): with self.assertRaisesRegexp( ValueError, "Trying to reuse partitioned variable .* but specified partitions " ".* and found partitions .*"): variable_scope.get_variable("name0", shape=(3, 1, 1)) with variable_scope.variable_scope( "scope0", partitioner=axis0_into1_partitioner, reuse=True): with self.assertRaisesRegexp( ValueError, "Trying to reuse partitioned variable .* but specified partitions " ".* and found partitions .*"): variable_scope.get_variable("name0", shape=(3, 1, 1)) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testReturnsExistingConcatenatedValueIfReuse(self): with variable_scope.variable_scope( "scope0", partitioner=axis0_into2_partitioner): v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1)) variable_scope.get_variable_scope().reuse_variables() v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1)) self.assertEqual(v_concat, v_concat_2) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testAllowsReuseWithoutPartitioner(self): with variable_scope.variable_scope( "scope0", partitioner=axis0_into2_partitioner): v = variable_scope.get_variable("name0", shape=(3, 1, 1)) with variable_scope.variable_scope("scope0", reuse=True): v_reused = variable_scope.get_variable("name0") self.assertIs(v, v_reused) def testNoReuseInEagerByDefault(self): with context.eager_mode(): with variable_scope.variable_scope( "scope0", partitioner=axis0_into2_partitioner): v1 = variable_scope.get_variable("name0", shape=(3, 1, 1)) v2 = variable_scope.get_variable("name0", shape=(3, 1, 1)) self.assertIsNot(v1, v2) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testPropagatePartitionerOnReopening(self): with variable_scope.variable_scope( "scope0", partitioner=axis0_into2_partitioner) as vs: self.assertEqual(axis0_into2_partitioner, vs.partitioner) with variable_scope.variable_scope(vs) as vs1: self.assertEqual(axis0_into2_partitioner, vs1.partitioner) # TODO(mihaimaruseac): Not converted to use wrap_function because of # obtaining different results in the eager case compared to the graph one @test_util.run_deprecated_v1 def testScalarIgnoresPartitioner(self): with variable_scope.variable_scope( "scope0", partitioner=axis0_into2_partitioner): v = variable_scope.get_variable("name0", shape=()) self.assertEqual(v.name, "scope0/name0:0") variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertIn("scope0/name0:0", [x.name for x in variables]) def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource): def _part_axis_0(**unused_kwargs): return (2, 1, 1) def _part_axis_1(**unused_kwargs): return (1, 2, 1) with variable_scope.variable_scope("root", use_resource=use_resource): v0 = variable_scope.get_variable( "n0", shape=(2, 2, 2), partitioner=_part_axis_0) v1 = variable_scope.get_variable( "n1", shape=(2, 2, 2), partitioner=_part_axis_1) self.assertEqual(v0.get_shape(), (2, 2, 2)) self.assertEqual(v1.get_shape(), (2, 2, 2)) n0_0 = list(v0)[0] n0_1 = list(v0)[1] self.assertEqual(n0_0.get_shape(), (1, 2, 2)) self.assertEqual(n0_1.get_shape(), (1, 2, 2)) n1_0 = list(v1)[0] n1_1 = list(v1)[1] self.assertEqual(n1_0.get_shape(), (2, 1, 2)) self.assertEqual(n1_1.get_shape(), (2, 1, 2)) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testPartitionConcatenatesAlongCorrectAxis(self): self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testPartitionConcatenatesAlongCorrectAxisResource(self): self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True) def testPartitionConcatenatesAlongCorrectAxisResourceInEager(self): with context.eager_mode(): self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True) class VariableScopeWithCustomGetterTest(test.TestCase): @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testNonCallableGetterFails(self): with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"): with variable_scope.variable_scope("scope0", custom_getter=3): variable_scope.get_variable("name0") with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"): variable_scope.get_variable("name0", custom_getter=3) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testNoSideEffectsWithIdentityCustomGetter(self): called = [0] def custom_getter(getter, *args, **kwargs): called[0] += 1 return getter(*args, **kwargs) with variable_scope.variable_scope( "scope", custom_getter=custom_getter) as scope: v = variable_scope.get_variable("v", [1]) with variable_scope.variable_scope(scope, reuse=True): v2 = variable_scope.get_variable("v", [1]) with variable_scope.variable_scope("new_scope") as new_scope: v3 = variable_scope.get_variable("v3", [1]) with variable_scope.variable_scope( new_scope, reuse=True, custom_getter=custom_getter): v4 = variable_scope.get_variable("v3", [1]) self.assertIs(v, v2) self.assertIs(v3, v4) self.assertEqual(3, called[0]) # skipped one in the first new_scope @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testSynchronizationAndAggregationWithCustomGetter(self): called = [0] synchronization = variable_scope.VariableSynchronization.AUTO aggregation = variable_scope.VariableAggregation.NONE def custom_getter(getter, *args, **kwargs): called[0] += 1 # Verify synchronization and aggregation kwargs are as expected. self.assertEqual(kwargs["synchronization"], synchronization) self.assertEqual(kwargs["aggregation"], aggregation) return getter(*args, **kwargs) with variable_scope.variable_scope("scope", custom_getter=custom_getter): variable_scope.get_variable("v", [1]) self.assertEqual(1, called[0]) with variable_scope.variable_scope("scope", custom_getter=custom_getter): synchronization = variable_scope.VariableSynchronization.ON_READ aggregation = variable_scope.VariableAggregation.MEAN variable_scope.get_variable( "v1", [1], synchronization=synchronization, aggregation=aggregation) self.assertEqual(2, called[0]) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testCustomGetterWithReuse(self): # Custom getter can choose to behave differently on reused variables. def custom_getter(getter, *args, **kwargs): var = getter(*args, **kwargs) if kwargs["reuse"]: # This can be used, e.g., for changing the caching device if needed. return array_ops.identity(var, name="reused") else: return array_ops.identity(var, name="not_reused") with variable_scope.variable_scope( "scope", custom_getter=custom_getter) as scope: v = variable_scope.get_variable("v", [1]) with variable_scope.variable_scope(scope, reuse=True): v2 = variable_scope.get_variable("v", [1]) self.assertEqual(v.name, "not_reused:0") self.assertEqual(v2.name, "reused:0") # TODO(mihaimaruseac): Not converted to use wrap_function because of # ValueError: Fetch argument <tf.Tensor 'custom_getter/add:0' shape=(1, 2, 3) # dtype=float32> cannot be interpreted as a Tensor. (Tensor # Tensor("custom_getter/add:0", shape=(1, 2, 3), dtype=float32) is not an # element of this graph.) @test_util.run_deprecated_v1 def testGetterThatCreatesTwoVariablesAndSumsThem(self): def custom_getter(getter, name, *args, **kwargs): g_0 = getter("%s/0" % name, *args, **kwargs) g_1 = getter("%s/1" % name, *args, **kwargs) with ops.name_scope("custom_getter"): return g_0 + g_1 with variable_scope.variable_scope("scope", custom_getter=custom_getter): v = variable_scope.get_variable("v", [1, 2, 3]) self.assertEqual([1, 2, 3], v.get_shape()) true_vars = variables_lib.trainable_variables() self.assertEqual(2, len(true_vars)) self.assertEqual("scope/v/0:0", true_vars[0].name) self.assertEqual("scope/v/1:0", true_vars[1].name) self.assertEqual("custom_getter/add:0", v.name) with self.cached_session() as sess: variables_lib.global_variables_initializer().run() np_vars, np_v = self.evaluate([true_vars, v]) self.assertAllClose(np_v, sum(np_vars)) # TODO(mihaimaruseac): Not converted to use wrap_function because of # ValueError: Fetch argument <tf.Tensor 'sum_getter_2/add:0' shape=(1, 2, 3) # dtype=float32> cannot be interpreted as a Tensor. (Tensor # Tensor("sum_getter_2/add:0", shape=(1, 2, 3), dtype=float32) is not an # element of this graph.) @test_util.run_deprecated_v1 def testNestedCustomGetters(self): def sum_getter(getter, name, *args, **kwargs): g_0 = getter("%s/sum_0" % name, *args, **kwargs) g_1 = getter("%s/sum_1" % name, *args, **kwargs) with ops.name_scope("sum_getter"): return g_0 + g_1 def prod_getter(getter, name, *args, **kwargs): g_0 = getter("%s/prod_0" % name, *args, **kwargs) g_1 = getter("%s/prod_1" % name, *args, **kwargs) with ops.name_scope("prod_getter"): return g_0 * g_1 with variable_scope.variable_scope("prod_scope", custom_getter=prod_getter): with variable_scope.variable_scope("sum_scope", custom_getter=sum_getter): with variable_scope.variable_scope( "inner_sum_scope", custom_getter=sum_getter): # take sums of sums of products v = variable_scope.get_variable("v", [1, 2, 3]) self.assertEqual([1, 2, 3], v.get_shape()) true_vars = variables_lib.trainable_variables() self.assertEqual(8, len(true_vars)) template = ( "prod_scope/sum_scope/inner_sum_scope/v/sum_%d/sum_%d/prod_%d:0") self.assertEqual(template % (0, 0, 0), true_vars[0].name) self.assertEqual(template % (0, 0, 1), true_vars[1].name) self.assertEqual(template % (0, 1, 0), true_vars[2].name) self.assertEqual(template % (0, 1, 1), true_vars[3].name) self.assertEqual(template % (1, 0, 0), true_vars[4].name) self.assertEqual(template % (1, 0, 1), true_vars[5].name) self.assertEqual(template % (1, 1, 0), true_vars[6].name) self.assertEqual(template % (1, 1, 1), true_vars[7].name) with self.cached_session() as sess: variables_lib.global_variables_initializer().run() np_vars, np_v = self.evaluate([true_vars, v]) # take products of sums of products self.assertAllClose( np_v, (((np_vars[0] * np_vars[1]) + (np_vars[2] * np_vars[3])) + ( (np_vars[4] * np_vars[5]) + (np_vars[6] * np_vars[7])))) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVariableCreator(self): variable_names = [] def creator_a(next_creator, **kwargs): variable_names.append(kwargs.get("name", "")) return next_creator(**kwargs) def creator_b(next_creator, **kwargs): kwargs["name"] = "forced_name" return next_creator(**kwargs) with variable_scope.variable_creator_scope(creator_a): with variable_scope.variable_creator_scope(creator_b): variable_scope.variable(1.0, name="one_name") self.assertEqual(variable_names[0], "forced_name") called = [False] def creater_c(next_creator, **kwargs): called[0] = True self.assertEqual(kwargs["synchronization"], variable_scope.VariableSynchronization.ON_WRITE) self.assertEqual(kwargs["aggregation"], variable_scope.VariableAggregation.MEAN) return next_creator(**kwargs) with variable_scope.variable_creator_scope(creater_c): variable_scope.get_variable( "v", [], synchronization=variable_scope.VariableSynchronization.ON_WRITE, aggregation=variable_scope.VariableAggregation.MEAN) self.assertTrue(called[0]) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testVariableCreatorNestingError(self): def creator(next_creator, **kwargs): return next_creator(**kwargs) # Save the state so we can clean up at the end. graph = ops.get_default_graph() old_creator_stack = graph._variable_creator_stack try: scope = variable_scope.variable_creator_scope(creator) scope.__enter__() with variable_scope.variable_creator_scope(creator): with self.assertRaises(RuntimeError): scope.__exit__(None, None, None) finally: graph._variable_creator_stack = old_creator_stack class PartitionInfoTest(test.TestCase): @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testConstructorChecks(self): # Invalid arg types. with self.assertRaises(TypeError): variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1]) with self.assertRaises(TypeError): variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None) with self.assertRaises(TypeError): variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1]) with self.assertRaises(TypeError): variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo") # full_shape and var_offset must have same length. with self.assertRaises(ValueError): variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0]) # Offset must always be less than shape. with self.assertRaises(ValueError): variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1]) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testSingleOffset(self): partition_info = variable_scope._PartitionInfo( full_shape=[9, 3], var_offset=[4, 0]) self.assertEqual(4, partition_info.single_offset([1, 3])) # Tests when the variable isn't partitioned at all. partition_info = variable_scope._PartitionInfo( full_shape=[9, 3], var_offset=[0, 0]) self.assertEqual(0, partition_info.single_offset([9, 3])) @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testSingleSliceDim(self): partition_info = variable_scope._PartitionInfo( full_shape=[9, 3], var_offset=[4, 0]) # Invalid shape. with self.assertRaises(TypeError): partition_info.single_slice_dim(None) # Rank of shape differs from full_shape. with self.assertRaises(ValueError): partition_info.single_slice_dim([1, 2, 3]) # Shape is too large given var_offset (4+6 > 9). with self.assertRaises(ValueError): partition_info.single_slice_dim([6, 3]) # Multiple possible slice dim from shape. with self.assertRaises(ValueError): partition_info.single_slice_dim([1, 1]) partition_info = variable_scope._PartitionInfo( full_shape=[9, 3], var_offset=[0, 0]) self.assertEqual(1, partition_info.single_slice_dim([9, 2])) partition_info = variable_scope._PartitionInfo( full_shape=[9, 3], var_offset=[4, 0]) self.assertEqual(0, partition_info.single_slice_dim([2, 3])) class VariableScopeMultithreadedTest(test.TestCase): @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testTwoThreadsDisjointScopeEntry(self): def thread_fn(i, graph): with graph.as_default(): with variable_scope.variable_scope("foo"): if i == 0: v = variable_scope.get_variable("v", []) self.assertEquals("foo/v:0", v.name) else: # Any thread after the first one should fail to create variable # with the same name. with self.assertRaises(ValueError): variable_scope.get_variable("v", []) graph = ops.get_default_graph() threads = [ threading.Thread(target=thread_fn, args=( i, graph, )) for i in range(2) ] threads[0].start() # Allow thread 0 to finish before starting thread 1. threads[0].join() threads[1].start() threads[1].join() @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testTwoThreadsNestedScopeEntry(self): def thread_fn(i, graph, run_event, pause_event): with graph.as_default(): with variable_scope.variable_scope("foo"): if i == 0: v = variable_scope.get_variable("v", []) self.assertEquals("foo/v:0", v.name) else: # Any thread after the first one should fail to create variable # with the same name. with self.assertRaises(ValueError): variable_scope.get_variable("v", []) pause_event.set() run_event.wait() graph = ops.get_default_graph() run_events = [threading.Event() for _ in range(2)] pause_events = [threading.Event() for _ in range(2)] threads = [ threading.Thread( target=thread_fn, args=(i, graph, run_events[i], pause_events[i])) for i in range(2) ] # Start first thread. threads[0].start() pause_events[0].wait() # Start next thread once the first thread has paused. threads[1].start() pause_events[1].wait() # Resume both threads. run_events[0].set() run_events[1].set() threads[0].join() threads[1].join() @test_util.run_in_graph_and_eager_modes @run_inside_wrap_function_in_eager_mode def testReenterMainScope(self): def thread_fn(graph, main_thread_scope): with graph.as_default(): # Variable created with main scope will have prefix "main". with variable_scope.variable_scope(main_thread_scope): with variable_scope.variable_scope("foo"): v = variable_scope.get_variable("v", []) self.assertEquals("main/foo/v:0", v.name) # Variable created outside main scope will not have prefix "main". with variable_scope.variable_scope("bar"): v = variable_scope.get_variable("v", []) self.assertEquals("bar/v:0", v.name) graph = ops.get_default_graph() with variable_scope.variable_scope("main") as main_thread_scope: thread = threading.Thread( target=thread_fn, args=(graph, main_thread_scope)) thread.start() thread.join() if __name__ == "__main__": test.main()
codeparrot/github-code-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from datetime import date, datetime from dateutil import relativedelta import json import time from openerp.osv import fields, osv from openerp.tools.float_utils import float_compare, float_round from openerp.tools.translate import _ from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT from openerp.exceptions import Warning from openerp import SUPERUSER_ID, api import openerp.addons.decimal_precision as dp from openerp.addons.procurement import procurement import logging _logger = logging.getLogger(__name__) #---------------------------------------------------------- # Incoterms #---------------------------------------------------------- class stock_incoterms(osv.osv): _name = "stock.incoterms" _description = "Incoterms" _columns = { 'name': fields.char('Name', required=True, help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."), 'code': fields.char('Code', size=3, required=True, help="Incoterm Standard Code"), 'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM you will not use."), } _defaults = { 'active': True, } #---------------------------------------------------------- # Stock Location #---------------------------------------------------------- class stock_location(osv.osv): _name = "stock.location" _description = "Inventory Locations" _parent_name = "location_id" _parent_store = True _parent_order = 'name' _order = 'parent_left' _rec_name = 'complete_name' def _location_owner(self, cr, uid, location, context=None): ''' Return the company owning the location if any ''' return location and (location.usage == 'internal') and location.company_id or False def _complete_name(self, cr, uid, ids, name, args, context=None): """ Forms complete name of location from parent location to child location. @return: Dictionary of values """ res = {} for m in self.browse(cr, uid, ids, context=context): res[m.id] = m.name parent = m.location_id while parent: res[m.id] = parent.name + ' / ' + res[m.id] parent = parent.location_id return res def _get_sublocations(self, cr, uid, ids, context=None): """ return all sublocations of the given stock locations (included) """ if context is None: context = {} context_with_inactive = context.copy() context_with_inactive['active_test'] = False return self.search(cr, uid, [('id', 'child_of', ids)], context=context_with_inactive) def _name_get(self, cr, uid, location, context=None): name = location.name while location.location_id and location.usage != 'view': location = location.location_id name = location.name + '/' + name return name def name_get(self, cr, uid, ids, context=None): res = [] for location in self.browse(cr, uid, ids, context=context): res.append((location.id, self._name_get(cr, uid, location, context=context))) return res _columns = { 'name': fields.char('Location Name', required=True, translate=True), 'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."), 'usage': fields.selection([ ('supplier', 'Supplier Location'), ('view', 'View'), ('internal', 'Internal Location'), ('customer', 'Customer Location'), ('inventory', 'Inventory'), ('procurement', 'Procurement'), ('production', 'Production'), ('transit', 'Transit Location')], 'Location Type', required=True, help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers \n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products \n* Internal Location: Physical locations inside your own warehouses, \n* Customer Location: Virtual location representing the destination location for products sent to your customers \n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories) \n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running. \n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products \n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations """, select=True), 'complete_name': fields.function(_complete_name, type='char', string="Location Name", store={'stock.location': (_get_sublocations, ['name', 'location_id', 'active'], 10)}), 'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'), 'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'), 'partner_id': fields.many2one('res.partner', 'Owner', help="Owner of the location if not internal"), 'comment': fields.text('Additional Information'), 'posx': fields.integer('Corridor (X)', help="Optional localization details, for information purpose only"), 'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"), 'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"), 'parent_left': fields.integer('Left Parent', select=1), 'parent_right': fields.integer('Right Parent', select=1), 'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between companies'), 'scrap_location': fields.boolean('Is a Scrap Location?', help='Check this box to allow using this location to put scrapped/damaged goods.'), 'removal_strategy_id': fields.many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."), 'putaway_strategy_id': fields.many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to store the products. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."), 'loc_barcode': fields.char('Location Barcode'), } _defaults = { 'active': True, 'usage': 'internal', 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c), 'posx': 0, 'posy': 0, 'posz': 0, 'scrap_location': False, } _sql_constraints = [('loc_barcode_company_uniq', 'unique (loc_barcode,company_id)', 'The barcode for a location must be unique per company !')] def create(self, cr, uid, default, context=None): if not default.get('loc_barcode', False): default.update({'loc_barcode': default.get('complete_name', False)}) return super(stock_location, self).create(cr, uid, default, context=context) def get_putaway_strategy(self, cr, uid, location, product, context=None): ''' Returns the location where the product has to be put, if any compliant putaway strategy is found. Otherwise returns None.''' putaway_obj = self.pool.get('product.putaway') loc = location while loc: if loc.putaway_strategy_id: res = putaway_obj.putaway_apply(cr, uid, loc.putaway_strategy_id, product, context=context) if res: return res loc = loc.location_id def _default_removal_strategy(self, cr, uid, context=None): return 'fifo' def get_removal_strategy(self, cr, uid, location, product, context=None): ''' Returns the removal strategy to consider for the given product and location. :param location: browse record (stock.location) :param product: browse record (product.product) :rtype: char ''' if product.categ_id.removal_strategy_id: return product.categ_id.removal_strategy_id.method loc = location while loc: if loc.removal_strategy_id: return loc.removal_strategy_id.method loc = loc.location_id return self._default_removal_strategy(cr, uid, context=context) def get_warehouse(self, cr, uid, location, context=None): """ Returns warehouse id of warehouse that contains location :param location: browse record (stock.location) """ wh_obj = self.pool.get("stock.warehouse") whs = wh_obj.search(cr, uid, [('view_location_id.parent_left', '<=', location.parent_left), ('view_location_id.parent_right', '>=', location.parent_left)], context=context) return whs and whs[0] or False #---------------------------------------------------------- # Routes #---------------------------------------------------------- class stock_location_route(osv.osv): _name = 'stock.location.route' _description = "Inventory Routes" _order = 'sequence' _columns = { 'name': fields.char('Route Name', required=True, translate=True), 'sequence': fields.integer('Sequence'), 'pull_ids': fields.one2many('procurement.rule', 'route_id', 'Pull Rules', copy=True), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the route without removing it."), 'push_ids': fields.one2many('stock.location.path', 'route_id', 'Push Rules', copy=True), 'product_selectable': fields.boolean('Applicable on Product'), 'product_categ_selectable': fields.boolean('Applicable on Product Category'), 'warehouse_selectable': fields.boolean('Applicable on Warehouse'), 'supplied_wh_id': fields.many2one('stock.warehouse', 'Supplied Warehouse'), 'supplier_wh_id': fields.many2one('stock.warehouse', 'Supplier Warehouse'), 'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this route is shared between all companies'), } _defaults = { 'sequence': lambda self, cr, uid, ctx: 0, 'active': True, 'product_selectable': True, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location.route', context=c), } def write(self, cr, uid, ids, vals, context=None): '''when a route is deactivated, deactivate also its pull and push rules''' if isinstance(ids, (int, long)): ids = [ids] res = super(stock_location_route, self).write(cr, uid, ids, vals, context=context) if 'active' in vals: push_ids = [] pull_ids = [] for route in self.browse(cr, uid, ids, context=context): if route.push_ids: push_ids += [r.id for r in route.push_ids if r.active != vals['active']] if route.pull_ids: pull_ids += [r.id for r in route.pull_ids if r.active != vals['active']] if push_ids: self.pool.get('stock.location.path').write(cr, uid, push_ids, {'active': vals['active']}, context=context) if pull_ids: self.pool.get('procurement.rule').write(cr, uid, pull_ids, {'active': vals['active']}, context=context) return res #---------------------------------------------------------- # Quants #---------------------------------------------------------- class stock_quant(osv.osv): """ Quants are the smallest unit of stock physical instances """ _name = "stock.quant" _description = "Quants" def _get_quant_name(self, cr, uid, ids, name, args, context=None): """ Forms complete name of location from parent location to child location. @return: Dictionary of values """ res = {} for q in self.browse(cr, uid, ids, context=context): res[q.id] = q.product_id.code or '' if q.lot_id: res[q.id] = q.lot_id.name res[q.id] += ': ' + str(q.qty) + q.product_id.uom_id.name return res def _calc_inventory_value(self, cr, uid, ids, name, attr, context=None): context = dict(context or {}) res = {} uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id for quant in self.browse(cr, uid, ids, context=context): context.pop('force_company', None) if quant.company_id.id != uid_company_id: #if the company of the quant is different than the current user company, force the company in the context #then re-do a browse to read the property fields for the good company. context['force_company'] = quant.company_id.id quant = self.browse(cr, uid, quant.id, context=context) res[quant.id] = self._get_inventory_value(cr, uid, quant, context=context) return res def _get_inventory_value(self, cr, uid, quant, context=None): return quant.product_id.standard_price * quant.qty _columns = { 'name': fields.function(_get_quant_name, type='char', string='Identifier'), 'product_id': fields.many2one('product.product', 'Product', required=True, ondelete="restrict", readonly=True, select=True), 'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="restrict", readonly=True, select=True, auto_join=True), 'qty': fields.float('Quantity', required=True, help="Quantity of products in this quant, in the default unit of measure of the product", readonly=True, select=True), 'package_id': fields.many2one('stock.quant.package', string='Package', help="The package containing this quant", readonly=True, select=True), 'packaging_type_id': fields.related('package_id', 'packaging_id', type='many2one', relation='product.packaging', string='Type of packaging', readonly=True, store=True), 'reservation_id': fields.many2one('stock.move', 'Reserved for Move', help="The move the quant is reserved for", readonly=True, select=True), 'lot_id': fields.many2one('stock.production.lot', 'Lot', readonly=True, select=True, ondelete="restrict"), 'cost': fields.float('Unit Cost'), 'owner_id': fields.many2one('res.partner', 'Owner', help="This is the owner of the quant", readonly=True, select=True), 'create_date': fields.datetime('Creation Date', readonly=True), 'in_date': fields.datetime('Incoming Date', readonly=True, select=True), 'history_ids': fields.many2many('stock.move', 'stock_quant_move_rel', 'quant_id', 'move_id', 'Moves', help='Moves that operate(d) on this quant', copy=False), 'company_id': fields.many2one('res.company', 'Company', help="The company to which the quants belong", required=True, readonly=True, select=True), 'inventory_value': fields.function(_calc_inventory_value, string="Inventory Value", type='float', readonly=True), # Used for negative quants to reconcile after compensated by a new positive one 'propagated_from_id': fields.many2one('stock.quant', 'Linked Quant', help='The negative quant this is coming from', readonly=True, select=True), 'negative_move_id': fields.many2one('stock.move', 'Move Negative Quant', help='If this is a negative quant, this will be the move that caused this negative quant.', readonly=True), 'negative_dest_location_id': fields.related('negative_move_id', 'location_dest_id', type='many2one', relation='stock.location', string="Negative Destination Location", readonly=True, help="Technical field used to record the destination location of a move that created a negative quant"), } _defaults = { 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.quant', context=c), } def init(self, cr): cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_quant_product_location_index',)) if not cr.fetchone(): cr.execute('CREATE INDEX stock_quant_product_location_index ON stock_quant (product_id, location_id, company_id, qty, in_date, reservation_id)') def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True): ''' Overwrite the read_group in order to sum the function field 'inventory_value' in group by''' res = super(stock_quant, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy) if 'inventory_value' in fields: for line in res: if '__domain' in line: lines = self.search(cr, uid, line['__domain'], context=context) inv_value = 0.0 for line2 in self.browse(cr, uid, lines, context=context): inv_value += line2.inventory_value line['inventory_value'] = inv_value return res def action_view_quant_history(self, cr, uid, ids, context=None): ''' This function returns an action that display the history of the quant, which mean all the stock moves that lead to this quant creation with this quant quantity. ''' mod_obj = self.pool.get('ir.model.data') act_obj = self.pool.get('ir.actions.act_window') result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_move_form2') id = result and result[1] or False result = act_obj.read(cr, uid, [id], context={})[0] move_ids = [] for quant in self.browse(cr, uid, ids, context=context): move_ids += [move.id for move in quant.history_ids] result['domain'] = "[('id','in',[" + ','.join(map(str, move_ids)) + "])]" return result def quants_reserve(self, cr, uid, quants, move, link=False, context=None): '''This function reserves quants for the given move (and optionally given link). If the total of quantity reserved is enough, the move's state is also set to 'assigned' :param quants: list of tuple(quant browse record or None, qty to reserve). If None is given as first tuple element, the item will be ignored. Negative quants should not be received as argument :param move: browse record :param link: browse record (stock.move.operation.link) ''' toreserve = [] reserved_availability = move.reserved_availability #split quants if needed for quant, qty in quants: if qty <= 0.0 or (quant and quant.qty <= 0.0): raise osv.except_osv(_('Error!'), _('You can not reserve a negative quantity or a negative quant.')) if not quant: continue self._quant_split(cr, uid, quant, qty, context=context) toreserve.append(quant.id) reserved_availability += quant.qty #reserve quants if toreserve: self.write(cr, SUPERUSER_ID, toreserve, {'reservation_id': move.id}, context=context) #if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed if move.picking_id: self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context) #check if move'state needs to be set as 'assigned' rounding = move.product_id.uom_id.rounding if float_compare(reserved_availability, move.product_qty, precision_rounding=rounding) == 0 and move.state in ('confirmed', 'waiting') : self.pool.get('stock.move').write(cr, uid, [move.id], {'state': 'assigned'}, context=context) elif float_compare(reserved_availability, 0, precision_rounding=rounding) > 0 and not move.partially_available: self.pool.get('stock.move').write(cr, uid, [move.id], {'partially_available': True}, context=context) def quants_move(self, cr, uid, quants, move, location_to, location_from=False, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, context=None): """Moves all given stock.quant in the given destination location. Unreserve from current move. :param quants: list of tuple(browse record(stock.quant) or None, quantity to move) :param move: browse record (stock.move) :param location_to: browse record (stock.location) depicting where the quants have to be moved :param location_from: optional browse record (stock.location) explaining where the quant has to be taken (may differ from the move source location in case a removal strategy applied). This parameter is only used to pass to _quant_create if a negative quant must be created :param lot_id: ID of the lot that must be set on the quants to move :param owner_id: ID of the partner that must own the quants to move :param src_package_id: ID of the package that contains the quants to move :param dest_package_id: ID of the package that must be set on the moved quant """ quants_reconcile = [] to_move_quants = [] self._check_location(cr, uid, location_to, context=context) for quant, qty in quants: if not quant: #If quant is None, we will create a quant to move (and potentially a negative counterpart too) quant = self._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=location_from, force_location_to=location_to, context=context) else: self._quant_split(cr, uid, quant, qty, context=context) to_move_quants.append(quant) quants_reconcile.append(quant) if to_move_quants: to_recompute_move_ids = [x.reservation_id.id for x in to_move_quants if x.reservation_id and x.reservation_id.id != move.id] self.move_quants_write(cr, uid, to_move_quants, move, location_to, dest_package_id, context=context) self.pool.get('stock.move').recalculate_move_state(cr, uid, to_recompute_move_ids, context=context) if location_to.usage == 'internal': # Do manual search for quant to avoid full table scan (order by id) cr.execute(""" SELECT 0 FROM stock_quant, stock_location WHERE product_id = %s AND stock_location.id = stock_quant.location_id AND ((stock_location.parent_left >= %s AND stock_location.parent_left < %s) OR stock_location.id = %s) AND qty < 0.0 LIMIT 1 """, (move.product_id.id, location_to.parent_left, location_to.parent_right, location_to.id)) if cr.fetchone(): for quant in quants_reconcile: self._quant_reconcile_negative(cr, uid, quant, move, context=context) def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, context=None): context=context or {} vals = {'location_id': location_dest_id.id, 'history_ids': [(4, move.id)], 'reservation_id': False} if not context.get('entire_pack'): vals.update({'package_id': dest_package_id}) self.write(cr, SUPERUSER_ID, [q.id for q in quants], vals, context=context) def quants_get_prefered_domain(self, cr, uid, location, product, qty, domain=None, prefered_domain_list=[], restrict_lot_id=False, restrict_partner_id=False, context=None): ''' This function tries to find quants in the given location for the given domain, by trying to first limit the choice on the quants that match the first item of prefered_domain_list as well. But if the qty requested is not reached it tries to find the remaining quantity by looping on the prefered_domain_list (tries with the second item and so on). Make sure the quants aren't found twice => all the domains of prefered_domain_list should be orthogonal ''' if domain is None: domain = [] quants = [(None, qty)] #don't look for quants in location that are of type production, supplier or inventory. if location.usage in ['inventory', 'production', 'supplier']: return quants res_qty = qty if not prefered_domain_list: return self.quants_get(cr, uid, location, product, qty, domain=domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context) for prefered_domain in prefered_domain_list: res_qty_cmp = float_compare(res_qty, 0, precision_rounding=product.uom_id.rounding) if res_qty_cmp > 0: #try to replace the last tuple (None, res_qty) with something that wasn't chosen at first because of the prefered order quants.pop() tmp_quants = self.quants_get(cr, uid, location, product, res_qty, domain=domain + prefered_domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context) for quant in tmp_quants: if quant[0]: res_qty -= quant[1] quants += tmp_quants return quants def quants_get(self, cr, uid, location, product, qty, domain=None, restrict_lot_id=False, restrict_partner_id=False, context=None): """ Use the removal strategies of product to search for the correct quants If you inherit, put the super at the end of your method. :location: browse record of the parent location where the quants have to be found :product: browse record of the product to find :qty in UoM of product """ result = [] domain = domain or [('qty', '>', 0.0)] if restrict_partner_id: domain += [('owner_id', '=', restrict_partner_id)] if restrict_lot_id: domain += [('lot_id', '=', restrict_lot_id)] if location: removal_strategy = self.pool.get('stock.location').get_removal_strategy(cr, uid, location, product, context=context) result += self.apply_removal_strategy(cr, uid, location, product, qty, domain, removal_strategy, context=context) return result def apply_removal_strategy(self, cr, uid, location, product, quantity, domain, removal_strategy, context=None): if removal_strategy == 'fifo': order = 'in_date, id' return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context) elif removal_strategy == 'lifo': order = 'in_date desc, id desc' return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context) raise osv.except_osv(_('Error!'), _('Removal strategy %s not implemented.' % (removal_strategy,))) def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, force_location_from=False, force_location_to=False, context=None): '''Create a quant in the destination location and create a negative quant in the source location if it's an internal location. ''' if context is None: context = {} price_unit = self.pool.get('stock.move').get_price_unit(cr, uid, move, context=context) location = force_location_to or move.location_dest_id rounding = move.product_id.uom_id.rounding vals = { 'product_id': move.product_id.id, 'location_id': location.id, 'qty': float_round(qty, precision_rounding=rounding), 'cost': price_unit, 'history_ids': [(4, move.id)], 'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'company_id': move.company_id.id, 'lot_id': lot_id, 'owner_id': owner_id, 'package_id': dest_package_id, } if move.location_id.usage == 'internal': #if we were trying to move something from an internal location and reach here (quant creation), #it means that a negative quant has to be created as well. negative_vals = vals.copy() negative_vals['location_id'] = force_location_from and force_location_from.id or move.location_id.id negative_vals['qty'] = float_round(-qty, precision_rounding=rounding) negative_vals['cost'] = price_unit negative_vals['negative_move_id'] = move.id negative_vals['package_id'] = src_package_id negative_quant_id = self.create(cr, SUPERUSER_ID, negative_vals, context=context) vals.update({'propagated_from_id': negative_quant_id}) #create the quant as superuser, because we want to restrict the creation of quant manually: we should always use this method to create quants quant_id = self.create(cr, SUPERUSER_ID, vals, context=context) return self.browse(cr, uid, quant_id, context=context) def _quant_split(self, cr, uid, quant, qty, context=None): context = context or {} rounding = quant.product_id.uom_id.rounding if float_compare(abs(quant.qty), abs(qty), precision_rounding=rounding) <= 0: # if quant <= qty in abs, take it entirely return False qty_round = float_round(qty, precision_rounding=rounding) new_qty_round = float_round(quant.qty - qty, precision_rounding=rounding) # Fetch the history_ids manually as it will not do a join with the stock moves then (=> a lot faster) cr.execute("""SELECT move_id FROM stock_quant_move_rel WHERE quant_id = %s""", (quant.id,)) res = cr.fetchall() new_quant = self.copy(cr, SUPERUSER_ID, quant.id, default={'qty': new_qty_round, 'history_ids': [(4, x[0]) for x in res]}, context=context) self.write(cr, SUPERUSER_ID, quant.id, {'qty': qty_round}, context=context) return self.browse(cr, uid, new_quant, context=context) def _get_latest_move(self, cr, uid, quant, context=None): move = False for m in quant.history_ids: if not move or m.date > move.date: move = m return move @api.cr_uid_ids_context def _quants_merge(self, cr, uid, solved_quant_ids, solving_quant, context=None): path = [] for move in solving_quant.history_ids: path.append((4, move.id)) self.write(cr, SUPERUSER_ID, solved_quant_ids, {'history_ids': path}, context=context) def _quant_reconcile_negative(self, cr, uid, quant, move, context=None): """ When new quant arrive in a location, try to reconcile it with negative quants. If it's possible, apply the cost of the new quant to the conter-part of the negative quant. """ solving_quant = quant dom = [('qty', '<', 0)] if quant.lot_id: dom += [('lot_id', '=', quant.lot_id.id)] dom += [('owner_id', '=', quant.owner_id.id)] dom += [('package_id', '=', quant.package_id.id)] dom += [('id', '!=', quant.propagated_from_id.id)] quants = self.quants_get(cr, uid, quant.location_id, quant.product_id, quant.qty, dom, context=context) product_uom_rounding = quant.product_id.uom_id.rounding for quant_neg, qty in quants: if not quant_neg or not solving_quant: continue to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id)], context=context) if not to_solve_quant_ids: continue solving_qty = qty solved_quant_ids = [] for to_solve_quant in self.browse(cr, uid, to_solve_quant_ids, context=context): if float_compare(solving_qty, 0, precision_rounding=product_uom_rounding) <= 0: continue solved_quant_ids.append(to_solve_quant.id) self._quant_split(cr, uid, to_solve_quant, min(solving_qty, to_solve_quant.qty), context=context) solving_qty -= min(solving_qty, to_solve_quant.qty) remaining_solving_quant = self._quant_split(cr, uid, solving_quant, qty, context=context) remaining_neg_quant = self._quant_split(cr, uid, quant_neg, -qty, context=context) #if the reconciliation was not complete, we need to link together the remaining parts if remaining_neg_quant: remaining_to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id), ('id', 'not in', solved_quant_ids)], context=context) if remaining_to_solve_quant_ids: self.write(cr, SUPERUSER_ID, remaining_to_solve_quant_ids, {'propagated_from_id': remaining_neg_quant.id}, context=context) if solving_quant.propagated_from_id and solved_quant_ids: self.write(cr, SUPERUSER_ID, solved_quant_ids, {'propagated_from_id': solving_quant.propagated_from_id.id}, context=context) #delete the reconciled quants, as it is replaced by the solved quants self.unlink(cr, SUPERUSER_ID, [quant_neg.id], context=context) if solved_quant_ids: #price update + accounting entries adjustments self._price_update(cr, uid, solved_quant_ids, solving_quant.cost, context=context) #merge history (and cost?) self._quants_merge(cr, uid, solved_quant_ids, solving_quant, context=context) self.unlink(cr, SUPERUSER_ID, [solving_quant.id], context=context) solving_quant = remaining_solving_quant def _price_update(self, cr, uid, ids, newprice, context=None): self.write(cr, SUPERUSER_ID, ids, {'cost': newprice}, context=context) def quants_unreserve(self, cr, uid, move, context=None): related_quants = [x.id for x in move.reserved_quant_ids] if related_quants: #if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed if move.picking_id: self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context) if move.partially_available: self.pool.get("stock.move").write(cr, uid, [move.id], {'partially_available': False}, context=context) self.write(cr, SUPERUSER_ID, related_quants, {'reservation_id': False}, context=context) def _quants_get_order(self, cr, uid, location, product, quantity, domain=[], orderby='in_date', context=None): ''' Implementation of removal strategies If it can not reserve, it will return a tuple (None, qty) ''' if context is None: context = {} domain += location and [('location_id', 'child_of', location.id)] or [] domain += [('product_id', '=', product.id)] if context.get('force_company'): domain += [('company_id', '=', context.get('force_company'))] else: domain += [('company_id', '=', self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id)] res = [] offset = 0 while float_compare(quantity, 0, precision_rounding=product.uom_id.rounding) > 0: quants = self.search(cr, uid, domain, order=orderby, limit=10, offset=offset, context=context) if not quants: res.append((None, quantity)) break for quant in self.browse(cr, uid, quants, context=context): rounding = product.uom_id.rounding if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0: res += [(quant, abs(quant.qty))] quantity -= abs(quant.qty) elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0: res += [(quant, quantity)] quantity = 0 break offset += 10 return res def _check_location(self, cr, uid, location, context=None): if location.usage == 'view': raise osv.except_osv(_('Error'), _('You cannot move to a location of type view %s.') % (location.name)) return True #---------------------------------------------------------- # Stock Picking #---------------------------------------------------------- class stock_picking(osv.osv): _name = "stock.picking" _inherit = ['mail.thread'] _description = "Picking List" _order = "priority desc, date asc, id desc" def _set_min_date(self, cr, uid, id, field, value, arg, context=None): move_obj = self.pool.get("stock.move") if value: move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines] move_obj.write(cr, uid, move_ids, {'date_expected': value}, context=context) def _set_priority(self, cr, uid, id, field, value, arg, context=None): move_obj = self.pool.get("stock.move") if value: move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines] move_obj.write(cr, uid, move_ids, {'priority': value}, context=context) def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None): """ Finds minimum and maximum dates for picking. @return: Dictionary of values """ res = {} for id in ids: res[id] = {'min_date': False, 'max_date': False, 'priority': '1'} if not ids: return res cr.execute("""select picking_id, min(date_expected), max(date_expected), max(priority) from stock_move where picking_id IN %s group by picking_id""", (tuple(ids),)) for pick, dt1, dt2, prio in cr.fetchall(): res[pick]['min_date'] = dt1 res[pick]['max_date'] = dt2 res[pick]['priority'] = prio return res def create(self, cr, user, vals, context=None): context = context or {} if ('name' not in vals) or (vals.get('name') in ('/', False)): ptype_id = vals.get('picking_type_id', context.get('default_picking_type_id', False)) sequence_id = self.pool.get('stock.picking.type').browse(cr, user, ptype_id, context=context).sequence_id.id vals['name'] = self.pool.get('ir.sequence').get_id(cr, user, sequence_id, 'id', context=context) return super(stock_picking, self).create(cr, user, vals, context) def _state_get(self, cr, uid, ids, field_name, arg, context=None): '''The state of a picking depends on the state of its related stock.move draft: the picking has no line or any one of the lines is draft done, draft, cancel: all lines are done / draft / cancel confirmed, waiting, assigned, partially_available depends on move_type (all at once or partial) ''' res = {} for pick in self.browse(cr, uid, ids, context=context): if (not pick.move_lines) or any([x.state == 'draft' for x in pick.move_lines]): res[pick.id] = 'draft' continue if all([x.state == 'cancel' for x in pick.move_lines]): res[pick.id] = 'cancel' continue if all([x.state in ('cancel', 'done') for x in pick.move_lines]): res[pick.id] = 'done' continue order = {'confirmed': 0, 'waiting': 1, 'assigned': 2} order_inv = {0: 'confirmed', 1: 'waiting', 2: 'assigned'} lst = [order[x.state] for x in pick.move_lines if x.state not in ('cancel', 'done')] if pick.move_type == 'one': res[pick.id] = order_inv[min(lst)] else: #we are in the case of partial delivery, so if all move are assigned, picking #should be assign too, else if one of the move is assigned, or partially available, picking should be #in partially available state, otherwise, picking is in waiting or confirmed state res[pick.id] = order_inv[max(lst)] if not all(x == 2 for x in lst): if any(x == 2 for x in lst): res[pick.id] = 'partially_available' else: #if all moves aren't assigned, check if we have one product partially available for move in pick.move_lines: if move.partially_available: res[pick.id] = 'partially_available' break return res def _get_pickings(self, cr, uid, ids, context=None): res = set() for move in self.browse(cr, uid, ids, context=context): if move.picking_id: res.add(move.picking_id.id) return list(res) def _get_pickings_dates_priority(self, cr, uid, ids, context=None): res = set() for move in self.browse(cr, uid, ids, context=context): if move.picking_id and (not (move.picking_id.min_date < move.date_expected < move.picking_id.max_date) or move.priority > move.picking_id.priority): res.add(move.picking_id.id) return list(res) def _get_pack_operation_exist(self, cr, uid, ids, field_name, arg, context=None): res = {} for pick in self.browse(cr, uid, ids, context=context): res[pick.id] = False if pick.pack_operation_ids: res[pick.id] = True return res def _get_quant_reserved_exist(self, cr, uid, ids, field_name, arg, context=None): res = {} for pick in self.browse(cr, uid, ids, context=context): res[pick.id] = False for move in pick.move_lines: if move.reserved_quant_ids: res[pick.id] = True continue return res def check_group_lot(self, cr, uid, context=None): """ This function will return true if we have the setting to use lots activated. """ return self.pool.get('res.users').has_group(cr, uid, 'stock.group_production_lot') def check_group_pack(self, cr, uid, context=None): """ This function will return true if we have the setting to use package activated. """ return self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_lot') def action_assign_owner(self, cr, uid, ids, context=None): for picking in self.browse(cr, uid, ids, context=context): packop_ids = [op.id for op in picking.pack_operation_ids] self.pool.get('stock.pack.operation').write(cr, uid, packop_ids, {'owner_id': picking.owner_id.id}, context=context) _columns = { 'name': fields.char('Reference', select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False), 'origin': fields.char('Source Document', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Reference of the document", select=True), 'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True, copy=False), 'note': fields.text('Notes'), 'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="It specifies goods to be deliver partially or all at once"), 'state': fields.function(_state_get, type="selection", copy=False, store={ 'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_type'], 20), 'stock.move': (_get_pickings, ['state', 'picking_id', 'partially_available'], 20)}, selection=[ ('draft', 'Draft'), ('cancel', 'Cancelled'), ('waiting', 'Waiting Another Operation'), ('confirmed', 'Waiting Availability'), ('partially_available', 'Partially Available'), ('assigned', 'Ready to Transfer'), ('done', 'Transferred'), ], string='Status', readonly=True, select=True, track_visibility='onchange', help=""" * Draft: not confirmed yet and will not be scheduled until confirmed\n * Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n * Waiting Availability: still waiting for the availability of products\n * Partially Available: some products are available and reserved\n * Ready to Transfer: products reserved, simply waiting for confirmation.\n * Transferred: has been processed, can't be modified or cancelled anymore\n * Cancelled: has been cancelled, can't be confirmed anymore""" ), 'priority': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_priority, type='selection', selection=procurement.PROCUREMENT_PRIORITIES, string='Priority', store={'stock.move': (_get_pickings_dates_priority, ['priority', 'picking_id'], 20)}, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, select=1, help="Priority for this picking. Setting manually a value here would set it as priority for all the moves", track_visibility='onchange', required=True), 'min_date': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_min_date, store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Scheduled Date', select=1, help="Scheduled time for the first part of the shipment to be processed. Setting manually a value here would set it as expected date for all the stock moves.", track_visibility='onchange'), 'max_date': fields.function(get_min_max_date, multi="min_max_date", store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', string='Max. Expected Date', select=2, help="Scheduled time for the last part of the shipment to be processed"), 'date': fields.datetime('Creation Date', help="Creation Date, usually the time of the order", select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, track_visibility='onchange'), 'date_done': fields.datetime('Date of Transfer', help="Date of Completion", states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False), 'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=True), 'quant_reserved_exist': fields.function(_get_quant_reserved_exist, type='boolean', string='Quant already reserved ?', help='technical field used to know if there is already at least one quant reserved on moves of a given picking'), 'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}), 'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}), 'pack_operation_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Related Packing Operations'), 'pack_operation_exist': fields.function(_get_pack_operation_exist, type='boolean', string='Pack Operation Exists?', help='technical field for attrs in view'), 'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, required=True), 'picking_type_code': fields.related('picking_type_id', 'code', type='char', string='Picking Type Code', help="Technical field used to display the correct label on print button in the picking view"), 'owner_id': fields.many2one('res.partner', 'Owner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Default Owner"), # Used to search on pickings 'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'), 'recompute_pack_op': fields.boolean('Recompute pack operation?', help='True if reserved quants changed, which mean we might need to recompute the package operations', copy=False), 'location_id': fields.related('move_lines', 'location_id', type='many2one', relation='stock.location', string='Location', readonly=True), 'location_dest_id': fields.related('move_lines', 'location_dest_id', type='many2one', relation='stock.location', string='Destination Location', readonly=True), 'group_id': fields.related('move_lines', 'group_id', type='many2one', relation='procurement.group', string='Procurement Group', readonly=True, store={ 'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_lines'], 10), 'stock.move': (_get_pickings, ['group_id', 'picking_id'], 10), }), } _defaults = { 'name': '/', 'state': 'draft', 'move_type': 'direct', 'priority': '1', # normal 'date': fields.datetime.now, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c), 'recompute_pack_op': True, } _sql_constraints = [ ('name_uniq', 'unique(name, company_id)', 'Reference must be unique per company!'), ] def do_print_picking(self, cr, uid, ids, context=None): '''This function prints the picking list''' context = dict(context or {}, active_ids=ids) return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_picking', context=context) def action_confirm(self, cr, uid, ids, context=None): todo = [] todo_force_assign = [] for picking in self.browse(cr, uid, ids, context=context): if picking.location_id.usage in ('supplier', 'inventory', 'production'): todo_force_assign.append(picking.id) for r in picking.move_lines: if r.state == 'draft': todo.append(r.id) if len(todo): self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context) if todo_force_assign: self.force_assign(cr, uid, todo_force_assign, context=context) return True def action_assign(self, cr, uid, ids, context=None): """ Check availability of picking moves. This has the effect of changing the state and reserve quants on available moves, and may also impact the state of the picking as it is computed based on move's states. @return: True """ for pick in self.browse(cr, uid, ids, context=context): if pick.state == 'draft': self.action_confirm(cr, uid, [pick.id], context=context) #skip the moves that don't need to be checked move_ids = [x.id for x in pick.move_lines if x.state not in ('draft', 'cancel', 'done')] if not move_ids: raise osv.except_osv(_('Warning!'), _('Nothing to check the availability for.')) self.pool.get('stock.move').action_assign(cr, uid, move_ids, context=context) return True def force_assign(self, cr, uid, ids, context=None): """ Changes state of picking to available if moves are confirmed or waiting. @return: True """ for pick in self.browse(cr, uid, ids, context=context): move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed', 'waiting']] self.pool.get('stock.move').force_assign(cr, uid, move_ids, context=context) #pack_operation might have changed and need to be recomputed self.write(cr, uid, ids, {'recompute_pack_op': True}, context=context) return True def action_cancel(self, cr, uid, ids, context=None): for pick in self.browse(cr, uid, ids, context=context): ids2 = [move.id for move in pick.move_lines] self.pool.get('stock.move').action_cancel(cr, uid, ids2, context) return True def action_done(self, cr, uid, ids, context=None): """Changes picking state to done by processing the Stock Moves of the Picking Normally that happens when the button "Done" is pressed on a Picking view. @return: True """ for pick in self.browse(cr, uid, ids, context=context): todo = [] for move in pick.move_lines: if move.state == 'draft': todo.extend(self.pool.get('stock.move').action_confirm(cr, uid, [move.id], context=context)) elif move.state in ('assigned', 'confirmed'): todo.append(move.id) if len(todo): self.pool.get('stock.move').action_done(cr, uid, todo, context=context) return True def unlink(self, cr, uid, ids, context=None): #on picking deletion, cancel its move then unlink them too move_obj = self.pool.get('stock.move') context = context or {} for pick in self.browse(cr, uid, ids, context=context): move_ids = [move.id for move in pick.move_lines] move_obj.action_cancel(cr, uid, move_ids, context=context) move_obj.unlink(cr, uid, move_ids, context=context) return super(stock_picking, self).unlink(cr, uid, ids, context=context) def write(self, cr, uid, ids, vals, context=None): if vals.get('move_lines') and not vals.get('pack_operation_ids'): # pack operations are directly dependant of move lines, it needs to be recomputed pack_operation_obj = self.pool['stock.pack.operation'] existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', ids)], context=context) if existing_package_ids: pack_operation_obj.unlink(cr, uid, existing_package_ids, context) res = super(stock_picking, self).write(cr, uid, ids, vals, context=context) #if we changed the move lines or the pack operations, we need to recompute the remaining quantities of both if 'move_lines' in vals or 'pack_operation_ids' in vals: self.do_recompute_remaining_quantities(cr, uid, ids, context=context) return res def _create_backorder(self, cr, uid, picking, backorder_moves=[], context=None): """ Move all non-done lines into a new backorder picking. If the key 'do_only_split' is given in the context, then move all lines not in context.get('split', []) instead of all non-done lines. """ if not backorder_moves: backorder_moves = picking.move_lines backorder_move_ids = [x.id for x in backorder_moves if x.state not in ('done', 'cancel')] if 'do_only_split' in context and context['do_only_split']: backorder_move_ids = [x.id for x in backorder_moves if x.id not in context.get('split', [])] if backorder_move_ids: backorder_id = self.copy(cr, uid, picking.id, { 'name': '/', 'move_lines': [], 'pack_operation_ids': [], 'backorder_id': picking.id, }) backorder = self.browse(cr, uid, backorder_id, context=context) self.message_post(cr, uid, picking.id, body=_("Back order <em>%s</em> <b>created</b>.") % (backorder.name), context=context) move_obj = self.pool.get("stock.move") move_obj.write(cr, uid, backorder_move_ids, {'picking_id': backorder_id}, context=context) if not picking.date_done: self.write(cr, uid, [picking.id], {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context) self.action_confirm(cr, uid, [backorder_id], context=context) return backorder_id return False @api.cr_uid_ids_context def recheck_availability(self, cr, uid, picking_ids, context=None): self.action_assign(cr, uid, picking_ids, context=context) self.do_prepare_partial(cr, uid, picking_ids, context=context) def _get_top_level_packages(self, cr, uid, quants_suggested_locations, context=None): """This method searches for the higher level packages that can be moved as a single operation, given a list of quants to move and their suggested destination, and returns the list of matching packages. """ # Try to find as much as possible top-level packages that can be moved pack_obj = self.pool.get("stock.quant.package") quant_obj = self.pool.get("stock.quant") top_lvl_packages = set() quants_to_compare = quants_suggested_locations.keys() for pack in list(set([x.package_id for x in quants_suggested_locations.keys() if x and x.package_id])): loop = True test_pack = pack good_pack = False pack_destination = False while loop: pack_quants = pack_obj.get_content(cr, uid, [test_pack.id], context=context) all_in = True for quant in quant_obj.browse(cr, uid, pack_quants, context=context): # If the quant is not in the quants to compare and not in the common location if not quant in quants_to_compare: all_in = False break else: #if putaway strat apply, the destination location of each quant may be different (and thus the package should not be taken as a single operation) if not pack_destination: pack_destination = quants_suggested_locations[quant] elif pack_destination != quants_suggested_locations[quant]: all_in = False break if all_in: good_pack = test_pack if test_pack.parent_id: test_pack = test_pack.parent_id else: #stop the loop when there's no parent package anymore loop = False else: #stop the loop when the package test_pack is not totally reserved for moves of this picking #(some quants may be reserved for other picking or not reserved at all) loop = False if good_pack: top_lvl_packages.add(good_pack) return list(top_lvl_packages) def _prepare_pack_ops(self, cr, uid, picking, quants, forced_qties, context=None): """ returns a list of dict, ready to be used in create() of stock.pack.operation. :param picking: browse record (stock.picking) :param quants: browse record list (stock.quant). List of quants associated to the picking :param forced_qties: dictionary showing for each product (keys) its corresponding quantity (value) that is not covered by the quants associated to the picking """ def _picking_putaway_apply(product): location = False # Search putaway strategy if product_putaway_strats.get(product.id): location = product_putaway_strats[product.id] else: location = self.pool.get('stock.location').get_putaway_strategy(cr, uid, picking.location_dest_id, product, context=context) product_putaway_strats[product.id] = location return location or picking.location_dest_id.id # If we encounter an UoM that is smaller than the default UoM or the one already chosen, use the new one instead. product_uom = {} # Determines UoM used in pack operations location_dest_id = None location_id = None for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]: if not product_uom.get(move.product_id.id): product_uom[move.product_id.id] = move.product_id.uom_id if move.product_uom.id != move.product_id.uom_id.id and move.product_uom.factor > product_uom[move.product_id.id].factor: product_uom[move.product_id.id] = move.product_uom if not move.scrapped: if location_dest_id and move.location_dest_id.id != location_dest_id: raise Warning(_('The destination location must be the same for all the moves of the picking.')) location_dest_id = move.location_dest_id.id if location_id and move.location_id.id != location_id: raise Warning(_('The source location must be the same for all the moves of the picking.')) location_id = move.location_id.id pack_obj = self.pool.get("stock.quant.package") quant_obj = self.pool.get("stock.quant") vals = [] qtys_grouped = {} #for each quant of the picking, find the suggested location quants_suggested_locations = {} product_putaway_strats = {} for quant in quants: if quant.qty <= 0: continue suggested_location_id = _picking_putaway_apply(quant.product_id) quants_suggested_locations[quant] = suggested_location_id #find the packages we can movei as a whole top_lvl_packages = self._get_top_level_packages(cr, uid, quants_suggested_locations, context=context) # and then create pack operations for the top-level packages found for pack in top_lvl_packages: pack_quant_ids = pack_obj.get_content(cr, uid, [pack.id], context=context) pack_quants = quant_obj.browse(cr, uid, pack_quant_ids, context=context) vals.append({ 'picking_id': picking.id, 'package_id': pack.id, 'product_qty': 1.0, 'location_id': pack.location_id.id, 'location_dest_id': quants_suggested_locations[pack_quants[0]], 'owner_id': pack.owner_id.id, }) #remove the quants inside the package so that they are excluded from the rest of the computation for quant in pack_quants: del quants_suggested_locations[quant] # Go through all remaining reserved quants and group by product, package, lot, owner, source location and dest location for quant, dest_location_id in quants_suggested_locations.items(): key = (quant.product_id.id, quant.package_id.id, quant.lot_id.id, quant.owner_id.id, quant.location_id.id, dest_location_id) if qtys_grouped.get(key): qtys_grouped[key] += quant.qty else: qtys_grouped[key] = quant.qty # Do the same for the forced quantities (in cases of force_assign or incomming shipment for example) for product, qty in forced_qties.items(): if qty <= 0: continue suggested_location_id = _picking_putaway_apply(product) key = (product.id, False, False, picking.owner_id.id, picking.location_id.id, suggested_location_id) if qtys_grouped.get(key): qtys_grouped[key] += qty else: qtys_grouped[key] = qty # Create the necessary operations for the grouped quants and remaining qtys uom_obj = self.pool.get('product.uom') prevals = {} for key, qty in qtys_grouped.items(): product = self.pool.get("product.product").browse(cr, uid, key[0], context=context) uom_id = product.uom_id.id qty_uom = qty if product_uom.get(key[0]): uom_id = product_uom[key[0]].id qty_uom = uom_obj._compute_qty(cr, uid, product.uom_id.id, qty, uom_id) val_dict = { 'picking_id': picking.id, 'product_qty': qty_uom, 'product_id': key[0], 'package_id': key[1], 'lot_id': key[2], 'owner_id': key[3], 'location_id': key[4], 'location_dest_id': key[5], 'product_uom_id': uom_id, } if key[0] in prevals: prevals[key[0]].append(val_dict) else: prevals[key[0]] = [val_dict] # prevals var holds the operations in order to create them in the same order than the picking stock moves if possible processed_products = set() for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]: if move.product_id.id not in processed_products: vals += prevals.get(move.product_id.id, []) processed_products.add(move.product_id.id) return vals @api.cr_uid_ids_context def open_barcode_interface(self, cr, uid, picking_ids, context=None): final_url="/barcode/web/#action=stock.ui&picking_id="+str(picking_ids[0]) return {'type': 'ir.actions.act_url', 'url':final_url, 'target': 'self',} @api.cr_uid_ids_context def do_partial_open_barcode(self, cr, uid, picking_ids, context=None): self.do_prepare_partial(cr, uid, picking_ids, context=context) return self.open_barcode_interface(cr, uid, picking_ids, context=context) @api.cr_uid_ids_context def do_prepare_partial(self, cr, uid, picking_ids, context=None): context = context or {} pack_operation_obj = self.pool.get('stock.pack.operation') #used to avoid recomputing the remaining quantities at each new pack operation created ctx = context.copy() ctx['no_recompute'] = True #get list of existing operations and delete them existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', picking_ids)], context=context) if existing_package_ids: pack_operation_obj.unlink(cr, uid, existing_package_ids, context) for picking in self.browse(cr, uid, picking_ids, context=context): forced_qties = {} # Quantity remaining after calculating reserved quants picking_quants = [] #Calculate packages, reserved quants, qtys of this picking's moves for move in picking.move_lines: if move.state not in ('assigned', 'confirmed', 'waiting'): continue move_quants = move.reserved_quant_ids picking_quants += move_quants forced_qty = (move.state == 'assigned') and move.product_qty - sum([x.qty for x in move_quants]) or 0 #if we used force_assign() on the move, or if the move is incoming, forced_qty > 0 if float_compare(forced_qty, 0, precision_rounding=move.product_id.uom_id.rounding) > 0: if forced_qties.get(move.product_id): forced_qties[move.product_id] += forced_qty else: forced_qties[move.product_id] = forced_qty for vals in self._prepare_pack_ops(cr, uid, picking, picking_quants, forced_qties, context=context): pack_operation_obj.create(cr, uid, vals, context=ctx) #recompute the remaining quantities all at once self.do_recompute_remaining_quantities(cr, uid, picking_ids, context=context) self.write(cr, uid, picking_ids, {'recompute_pack_op': False}, context=context) @api.cr_uid_ids_context def do_unreserve(self, cr, uid, picking_ids, context=None): """ Will remove all quants for picking in picking_ids """ moves_to_unreserve = [] pack_line_to_unreserve = [] for picking in self.browse(cr, uid, picking_ids, context=context): moves_to_unreserve += [m.id for m in picking.move_lines if m.state not in ('done', 'cancel')] pack_line_to_unreserve += [p.id for p in picking.pack_operation_ids] if moves_to_unreserve: if pack_line_to_unreserve: self.pool.get('stock.pack.operation').unlink(cr, uid, pack_line_to_unreserve, context=context) self.pool.get('stock.move').do_unreserve(cr, uid, moves_to_unreserve, context=context) def recompute_remaining_qty(self, cr, uid, picking, context=None): def _create_link_for_index(operation_id, index, product_id, qty_to_assign, quant_id=False): move_dict = prod2move_ids[product_id][index] qty_on_link = min(move_dict['remaining_qty'], qty_to_assign) self.pool.get('stock.move.operation.link').create(cr, uid, {'move_id': move_dict['move'].id, 'operation_id': operation_id, 'qty': qty_on_link, 'reserved_quant_id': quant_id}, context=context) if move_dict['remaining_qty'] == qty_on_link: prod2move_ids[product_id].pop(index) else: move_dict['remaining_qty'] -= qty_on_link return qty_on_link def _create_link_for_quant(operation_id, quant, qty): """create a link for given operation and reserved move of given quant, for the max quantity possible, and returns this quantity""" if not quant.reservation_id.id: return _create_link_for_product(operation_id, quant.product_id.id, qty) qty_on_link = 0 for i in range(0, len(prod2move_ids[quant.product_id.id])): if prod2move_ids[quant.product_id.id][i]['move'].id != quant.reservation_id.id: continue qty_on_link = _create_link_for_index(operation_id, i, quant.product_id.id, qty, quant_id=quant.id) break return qty_on_link def _create_link_for_product(operation_id, product_id, qty): '''method that creates the link between a given operation and move(s) of given product, for the given quantity. Returns True if it was possible to create links for the requested quantity (False if there was not enough quantity on stock moves)''' qty_to_assign = qty prod_obj = self.pool.get("product.product") product = prod_obj.browse(cr, uid, product_id) rounding = product.uom_id.rounding qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding) if prod2move_ids.get(product_id): while prod2move_ids[product_id] and qtyassign_cmp > 0: qty_on_link = _create_link_for_index(operation_id, 0, product_id, qty_to_assign, quant_id=False) qty_to_assign -= qty_on_link qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding) return qtyassign_cmp == 0 uom_obj = self.pool.get('product.uom') package_obj = self.pool.get('stock.quant.package') quant_obj = self.pool.get('stock.quant') link_obj = self.pool.get('stock.move.operation.link') quants_in_package_done = set() prod2move_ids = {} still_to_do = [] #make a dictionary giving for each product, the moves and related quantity that can be used in operation links for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]: if not prod2move_ids.get(move.product_id.id): prod2move_ids[move.product_id.id] = [{'move': move, 'remaining_qty': move.product_qty}] else: prod2move_ids[move.product_id.id].append({'move': move, 'remaining_qty': move.product_qty}) need_rereserve = False #sort the operations in order to give higher priority to those with a package, then a serial number operations = picking.pack_operation_ids operations = sorted(operations, key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0)) #delete existing operations to start again from scratch links = link_obj.search(cr, uid, [('operation_id', 'in', [x.id for x in operations])], context=context) if links: link_obj.unlink(cr, uid, links, context=context) #1) first, try to create links when quants can be identified without any doubt for ops in operations: #for each operation, create the links with the stock move by seeking on the matching reserved quants, #and deffer the operation if there is some ambiguity on the move to select if ops.package_id and not ops.product_id: #entire package quant_ids = package_obj.get_content(cr, uid, [ops.package_id.id], context=context) for quant in quant_obj.browse(cr, uid, quant_ids, context=context): remaining_qty_on_quant = quant.qty if quant.reservation_id: #avoid quants being counted twice quants_in_package_done.add(quant.id) qty_on_link = _create_link_for_quant(ops.id, quant, quant.qty) remaining_qty_on_quant -= qty_on_link if remaining_qty_on_quant: still_to_do.append((ops, quant.product_id.id, remaining_qty_on_quant)) need_rereserve = True elif ops.product_id.id: #Check moves with same product qty_to_assign = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context) for move_dict in prod2move_ids.get(ops.product_id.id, []): move = move_dict['move'] for quant in move.reserved_quant_ids: if not qty_to_assign > 0: break if quant.id in quants_in_package_done: continue #check if the quant is matching the operation details if ops.package_id: flag = quant.package_id and bool(package_obj.search(cr, uid, [('id', 'child_of', [ops.package_id.id])], context=context)) or False else: flag = not quant.package_id.id flag = flag and ((ops.lot_id and ops.lot_id.id == quant.lot_id.id) or not ops.lot_id) flag = flag and (ops.owner_id.id == quant.owner_id.id) if flag: max_qty_on_link = min(quant.qty, qty_to_assign) qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link) qty_to_assign -= qty_on_link qty_assign_cmp = float_compare(qty_to_assign, 0, precision_rounding=ops.product_id.uom_id.rounding) if qty_assign_cmp > 0: #qty reserved is less than qty put in operations. We need to create a link but it's deferred after we processed #all the quants (because they leave no choice on their related move and needs to be processed with higher priority) still_to_do += [(ops, ops.product_id.id, qty_to_assign)] need_rereserve = True #2) then, process the remaining part all_op_processed = True for ops, product_id, remaining_qty in still_to_do: all_op_processed = _create_link_for_product(ops.id, product_id, remaining_qty) and all_op_processed return (need_rereserve, all_op_processed) def picking_recompute_remaining_quantities(self, cr, uid, picking, context=None): need_rereserve = False all_op_processed = True if picking.pack_operation_ids: need_rereserve, all_op_processed = self.recompute_remaining_qty(cr, uid, picking, context=context) return need_rereserve, all_op_processed @api.cr_uid_ids_context def do_recompute_remaining_quantities(self, cr, uid, picking_ids, context=None): for picking in self.browse(cr, uid, picking_ids, context=context): if picking.pack_operation_ids: self.recompute_remaining_qty(cr, uid, picking, context=context) def _prepare_values_extra_move(self, cr, uid, op, product, remaining_qty, context=None): """ Creates an extra move when there is no corresponding original move to be copied """ uom_obj = self.pool.get("product.uom") uom_id = product.uom_id.id qty = remaining_qty if op.product_id and op.product_uom_id and op.product_uom_id.id != product.uom_id.id: if op.product_uom_id.factor > product.uom_id.factor: #If the pack operation's is a smaller unit uom_id = op.product_uom_id.id #HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM qty = uom_obj._compute_qty_obj(cr, uid, product.uom_id, remaining_qty, op.product_uom_id, rounding_method='HALF-UP') picking = op.picking_id ref = product.default_code name = '[' + ref + ']' + ' ' + product.name if ref else product.name res = { 'picking_id': picking.id, 'location_id': picking.location_id.id, 'location_dest_id': picking.location_dest_id.id, 'product_id': product.id, 'product_uom': uom_id, 'product_uom_qty': qty, 'name': _('Extra Move: ') + name, 'state': 'draft', 'restrict_partner_id': op.owner_id, 'group_id': picking.group_id.id, } return res def _create_extra_moves(self, cr, uid, picking, context=None): '''This function creates move lines on a picking, at the time of do_transfer, based on unexpected product transfers (or exceeding quantities) found in the pack operations. ''' move_obj = self.pool.get('stock.move') operation_obj = self.pool.get('stock.pack.operation') moves = [] for op in picking.pack_operation_ids: for product_id, remaining_qty in operation_obj._get_remaining_prod_quantities(cr, uid, op, context=context).items(): product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) if float_compare(remaining_qty, 0, precision_rounding=product.uom_id.rounding) > 0: vals = self._prepare_values_extra_move(cr, uid, op, product, remaining_qty, context=context) moves.append(move_obj.create(cr, uid, vals, context=context)) if moves: move_obj.action_confirm(cr, uid, moves, context=context) return moves def rereserve_pick(self, cr, uid, ids, context=None): """ This can be used to provide a button that rereserves taking into account the existing pack operations """ for pick in self.browse(cr, uid, ids, context=context): self.rereserve_quants(cr, uid, pick, move_ids = [x.id for x in pick.move_lines], context=context) def rereserve_quants(self, cr, uid, picking, move_ids=[], context=None): """ Unreserve quants then try to reassign quants.""" stock_move_obj = self.pool.get('stock.move') if not move_ids: self.do_unreserve(cr, uid, [picking.id], context=context) self.action_assign(cr, uid, [picking.id], context=context) else: stock_move_obj.do_unreserve(cr, uid, move_ids, context=context) stock_move_obj.action_assign(cr, uid, move_ids, context=context) @api.cr_uid_ids_context def do_enter_transfer_details(self, cr, uid, picking, context=None): if not context: context = {} context.update({ 'active_model': self._name, 'active_ids': picking, 'active_id': len(picking) and picking[0] or False }) created_id = self.pool['stock.transfer_details'].create(cr, uid, {'picking_id': len(picking) and picking[0] or False}, context) return self.pool['stock.transfer_details'].wizard_view(cr, uid, created_id, context) @api.cr_uid_ids_context def do_transfer(self, cr, uid, picking_ids, context=None): """ If no pack operation, we do simple action_done of the picking Otherwise, do the pack operations """ if not context: context = {} stock_move_obj = self.pool.get('stock.move') for picking in self.browse(cr, uid, picking_ids, context=context): if not picking.pack_operation_ids: self.action_done(cr, uid, [picking.id], context=context) continue else: need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, context=context) #create extra moves in the picking (unexpected product moves coming from pack operations) todo_move_ids = [] if not all_op_processed: todo_move_ids += self._create_extra_moves(cr, uid, picking, context=context) #split move lines if needed toassign_move_ids = [] for move in picking.move_lines: remaining_qty = move.remaining_qty if move.state in ('done', 'cancel'): #ignore stock moves cancelled or already done continue elif move.state == 'draft': toassign_move_ids.append(move.id) if float_compare(remaining_qty, 0, precision_rounding = move.product_id.uom_id.rounding) == 0: if move.state in ('draft', 'assigned', 'confirmed'): todo_move_ids.append(move.id) elif float_compare(remaining_qty,0, precision_rounding = move.product_id.uom_id.rounding) > 0 and \ float_compare(remaining_qty, move.product_qty, precision_rounding = move.product_id.uom_id.rounding) < 0: new_move = stock_move_obj.split(cr, uid, move, remaining_qty, context=context) todo_move_ids.append(move.id) #Assign move as it was assigned before toassign_move_ids.append(new_move) if need_rereserve or not all_op_processed: if not picking.location_id.usage in ("supplier", "production", "inventory"): self.rereserve_quants(cr, uid, picking, move_ids=todo_move_ids, context=context) self.do_recompute_remaining_quantities(cr, uid, [picking.id], context=context) if todo_move_ids and not context.get('do_only_split'): self.pool.get('stock.move').action_done(cr, uid, todo_move_ids, context=context) elif context.get('do_only_split'): context = dict(context, split=todo_move_ids) self._create_backorder(cr, uid, picking, context=context) if toassign_move_ids: stock_move_obj.action_assign(cr, uid, toassign_move_ids, context=context) return True @api.cr_uid_ids_context def do_split(self, cr, uid, picking_ids, context=None): """ just split the picking (create a backorder) without making it 'done' """ if context is None: context = {} ctx = context.copy() ctx['do_only_split'] = True return self.do_transfer(cr, uid, picking_ids, context=ctx) def get_next_picking_for_ui(self, cr, uid, context=None): """ returns the next pickings to process. Used in the barcode scanner UI""" if context is None: context = {} domain = [('state', 'in', ('assigned', 'partially_available'))] if context.get('default_picking_type_id'): domain.append(('picking_type_id', '=', context['default_picking_type_id'])) return self.search(cr, uid, domain, context=context) def action_done_from_ui(self, cr, uid, picking_id, context=None): """ called when button 'done' is pushed in the barcode scanner UI """ #write qty_done into field product_qty for every package_operation before doing the transfer pack_op_obj = self.pool.get('stock.pack.operation') for operation in self.browse(cr, uid, picking_id, context=context).pack_operation_ids: pack_op_obj.write(cr, uid, operation.id, {'product_qty': operation.qty_done}, context=dict(context, no_recompute=True)) self.do_transfer(cr, uid, [picking_id], context=context) #return id of next picking to work on return self.get_next_picking_for_ui(cr, uid, context=context) @api.cr_uid_ids_context def action_pack(self, cr, uid, picking_ids, operation_filter_ids=None, context=None): """ Create a package with the current pack_operation_ids of the picking that aren't yet in a pack. Used in the barcode scanner UI and the normal interface as well. operation_filter_ids is used by barcode scanner interface to specify a subset of operation to pack""" if operation_filter_ids == None: operation_filter_ids = [] stock_operation_obj = self.pool.get('stock.pack.operation') package_obj = self.pool.get('stock.quant.package') stock_move_obj = self.pool.get('stock.move') package_id = False for picking_id in picking_ids: operation_search_domain = [('picking_id', '=', picking_id), ('result_package_id', '=', False)] if operation_filter_ids != []: operation_search_domain.append(('id', 'in', operation_filter_ids)) operation_ids = stock_operation_obj.search(cr, uid, operation_search_domain, context=context) pack_operation_ids = [] if operation_ids: for operation in stock_operation_obj.browse(cr, uid, operation_ids, context=context): #If we haven't done all qty in operation, we have to split into 2 operation op = operation if (operation.qty_done < operation.product_qty): new_operation = stock_operation_obj.copy(cr, uid, operation.id, {'product_qty': operation.qty_done,'qty_done': operation.qty_done}, context=context) stock_operation_obj.write(cr, uid, operation.id, {'product_qty': operation.product_qty - operation.qty_done,'qty_done': 0}, context=context) op = stock_operation_obj.browse(cr, uid, new_operation, context=context) pack_operation_ids.append(op.id) if op.product_id and op.location_id and op.location_dest_id: stock_move_obj.check_tracking_product(cr, uid, op.product_id, op.lot_id.id, op.location_id, op.location_dest_id, context=context) package_id = package_obj.create(cr, uid, {}, context=context) stock_operation_obj.write(cr, uid, pack_operation_ids, {'result_package_id': package_id}, context=context) return package_id def process_product_id_from_ui(self, cr, uid, picking_id, product_id, op_id, increment=True, context=None): return self.pool.get('stock.pack.operation')._search_and_increment(cr, uid, picking_id, [('product_id', '=', product_id),('id', '=', op_id)], increment=increment, context=context) def process_barcode_from_ui(self, cr, uid, picking_id, barcode_str, visible_op_ids, context=None): '''This function is called each time there barcode scanner reads an input''' lot_obj = self.pool.get('stock.production.lot') package_obj = self.pool.get('stock.quant.package') product_obj = self.pool.get('product.product') stock_operation_obj = self.pool.get('stock.pack.operation') stock_location_obj = self.pool.get('stock.location') answer = {'filter_loc': False, 'operation_id': False} #check if the barcode correspond to a location matching_location_ids = stock_location_obj.search(cr, uid, [('loc_barcode', '=', barcode_str)], context=context) if matching_location_ids: #if we have a location, return immediatly with the location name location = stock_location_obj.browse(cr, uid, matching_location_ids[0], context=None) answer['filter_loc'] = stock_location_obj._name_get(cr, uid, location, context=None) answer['filter_loc_id'] = matching_location_ids[0] return answer #check if the barcode correspond to a product matching_product_ids = product_obj.search(cr, uid, ['|', ('ean13', '=', barcode_str), ('default_code', '=', barcode_str)], context=context) if matching_product_ids: op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', matching_product_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context) answer['operation_id'] = op_id return answer #check if the barcode correspond to a lot matching_lot_ids = lot_obj.search(cr, uid, [('name', '=', barcode_str)], context=context) if matching_lot_ids: lot = lot_obj.browse(cr, uid, matching_lot_ids[0], context=context) op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', lot.product_id.id), ('lot_id', '=', lot.id)], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context) answer['operation_id'] = op_id return answer #check if the barcode correspond to a package matching_package_ids = package_obj.search(cr, uid, [('name', '=', barcode_str)], context=context) if matching_package_ids: op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('package_id', '=', matching_package_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context) answer['operation_id'] = op_id return answer return answer class stock_production_lot(osv.osv): _name = 'stock.production.lot' _inherit = ['mail.thread'] _description = 'Lot/Serial' _columns = { 'name': fields.char('Serial Number', required=True, help="Unique Serial Number"), 'ref': fields.char('Internal Reference', help="Internal reference number in case it differs from the manufacturer's serial number"), 'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]), 'quant_ids': fields.one2many('stock.quant', 'lot_id', 'Quants', readonly=True), 'create_date': fields.datetime('Creation Date'), } _defaults = { 'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'), 'product_id': lambda x, y, z, c: c.get('product_id', False), } _sql_constraints = [ ('name_ref_uniq', 'unique (name, ref, product_id)', 'The combination of serial number, internal reference and product must be unique !'), ] def action_traceability(self, cr, uid, ids, context=None): """ It traces the information of lots @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of IDs selected @param context: A standard dictionary @return: A dictionary of values """ quant_obj = self.pool.get("stock.quant") quants = quant_obj.search(cr, uid, [('lot_id', 'in', ids)], context=context) moves = set() for quant in quant_obj.browse(cr, uid, quants, context=context): moves |= {move.id for move in quant.history_ids} if moves: return { 'domain': "[('id','in',[" + ','.join(map(str, list(moves))) + "])]", 'name': _('Traceability'), 'view_mode': 'tree,form', 'view_type': 'form', 'context': {'tree_view_ref': 'stock.view_move_tree'}, 'res_model': 'stock.move', 'type': 'ir.actions.act_window', } return False # ---------------------------------------------------- # Move # ---------------------------------------------------- class stock_move(osv.osv): _name = "stock.move" _description = "Stock Move" _order = 'date_expected desc, id' _log_create = False def get_price_unit(self, cr, uid, move, context=None): """ Returns the unit price to store on the quant """ return move.price_unit or move.product_id.standard_price def name_get(self, cr, uid, ids, context=None): res = [] for line in self.browse(cr, uid, ids, context=context): name = line.location_id.name + ' > ' + line.location_dest_id.name if line.product_id.code: name = line.product_id.code + ': ' + name if line.picking_id.origin: name = line.picking_id.origin + '/ ' + name res.append((line.id, name)) return res def _quantity_normalize(self, cr, uid, ids, name, args, context=None): uom_obj = self.pool.get('product.uom') res = {} for m in self.browse(cr, uid, ids, context=context): res[m.id] = uom_obj._compute_qty_obj(cr, uid, m.product_uom, m.product_uom_qty, m.product_id.uom_id, context=context) return res def _get_remaining_qty(self, cr, uid, ids, field_name, args, context=None): uom_obj = self.pool.get('product.uom') res = {} for move in self.browse(cr, uid, ids, context=context): qty = move.product_qty for record in move.linked_move_operation_ids: qty -= record.qty # Keeping in product default UoM res[move.id] = float_round(qty, precision_rounding=move.product_id.uom_id.rounding) return res def _get_lot_ids(self, cr, uid, ids, field_name, args, context=None): res = dict.fromkeys(ids, False) for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': res[move.id] = [q.lot_id.id for q in move.quant_ids if q.lot_id] else: res[move.id] = [q.lot_id.id for q in move.reserved_quant_ids if q.lot_id] return res def _get_product_availability(self, cr, uid, ids, field_name, args, context=None): quant_obj = self.pool.get('stock.quant') res = dict.fromkeys(ids, False) for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': res[move.id] = move.product_qty else: sublocation_ids = self.pool.get('stock.location').search(cr, uid, [('id', 'child_of', [move.location_id.id])], context=context) quant_ids = quant_obj.search(cr, uid, [('location_id', 'in', sublocation_ids), ('product_id', '=', move.product_id.id), ('reservation_id', '=', False)], context=context) availability = 0 for quant in quant_obj.browse(cr, uid, quant_ids, context=context): availability += quant.qty res[move.id] = min(move.product_qty, availability) return res def _get_string_qty_information(self, cr, uid, ids, field_name, args, context=None): settings_obj = self.pool.get('stock.config.settings') uom_obj = self.pool.get('product.uom') res = dict.fromkeys(ids, '') precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure') for move in self.browse(cr, uid, ids, context=context): if move.state in ('draft', 'done', 'cancel') or move.location_id.usage != 'internal': res[move.id] = '' # 'not applicable' or 'n/a' could work too continue total_available = min(move.product_qty, move.reserved_availability + move.availability) total_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, total_available, move.product_uom, round=False, context=context) total_available = float_round(total_available, precision_digits=precision) info = str(total_available) #look in the settings if we need to display the UoM name or not config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context) if config_ids: stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context) if stock_settings.group_uom: info += ' ' + move.product_uom.name if move.reserved_availability: if move.reserved_availability != total_available: #some of the available quantity is assigned and some are available but not reserved reserved_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, move.reserved_availability, move.product_uom, round=False, context=context) reserved_available = float_round(reserved_available, precision_digits=precision) info += _(' (%s reserved)') % str(reserved_available) else: #all available quantity is assigned info += _(' (reserved)') res[move.id] = info return res def _get_reserved_availability(self, cr, uid, ids, field_name, args, context=None): res = dict.fromkeys(ids, 0) for move in self.browse(cr, uid, ids, context=context): res[move.id] = sum([quant.qty for quant in move.reserved_quant_ids]) return res def _get_move(self, cr, uid, ids, context=None): res = set() for quant in self.browse(cr, uid, ids, context=context): if quant.reservation_id: res.add(quant.reservation_id.id) return list(res) def _get_move_ids(self, cr, uid, ids, context=None): res = [] for picking in self.browse(cr, uid, ids, context=context): res += [x.id for x in picking.move_lines] return res def _get_moves_from_prod(self, cr, uid, ids, context=None): if ids: return self.pool.get('stock.move').search(cr, uid, [('product_id', 'in', ids)], context=context) return [] def _set_product_qty(self, cr, uid, id, field, value, arg, context=None): """ The meaning of product_qty field changed lately and is now a functional field computing the quantity in the default product UoM. This code has been added to raise an error if a write is made given a value for `product_qty`, where the same write should set the `product_uom_qty` field instead, in order to detect errors. """ raise osv.except_osv(_('Programming Error!'), _('The requested operation cannot be processed because of a programming error setting the `product_qty` field instead of the `product_uom_qty`.')) _columns = { 'name': fields.char('Description', required=True, select=True), 'priority': fields.selection(procurement.PROCUREMENT_PRIORITIES, 'Priority'), 'create_date': fields.datetime('Creation Date', readonly=True, select=True), 'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}), 'date_expected': fields.datetime('Expected Date', states={'done': [('readonly', True)]}, required=True, select=True, help="Scheduled date for the processing of this move"), 'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type', '<>', 'service')], states={'done': [('readonly', True)]}), 'product_qty': fields.function(_quantity_normalize, fnct_inv=_set_product_qty, type='float', digits=0, store={ _name: (lambda self, cr, uid, ids, c={}: ids, ['product_id', 'product_uom', 'product_uom_qty'], 10), }, string='Quantity', help='Quantity in the default UoM of the product'), 'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, states={'done': [('readonly', True)]}, help="This is the quantity of products from an inventory " "point of view. For moves in the state 'done', this is the " "quantity of products that were actually moved. For other " "moves, this is the quantity of product that is planned to " "be moved. Lowering this quantity does not generate a " "backorder. Changing this quantity on assigned moves affects " "the product reservation, and should be done with care." ), 'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True, states={'done': [('readonly', True)]}), 'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product UoS'), states={'done': [('readonly', True)]}), 'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}), 'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'), 'product_packaging': fields.many2one('product.packaging', 'Prefered Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."), 'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True, auto_join=True, states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."), 'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True, states={'done': [('readonly', True)]}, select=True, auto_join=True, help="Location where the system will stock the finished products."), 'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"), 'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True, copy=False), 'move_orig_ids': fields.one2many('stock.move', 'move_dest_id', 'Original Move', help="Optional: previous stock move when chaining them", select=True), 'picking_id': fields.many2one('stock.picking', 'Reference', select=True, states={'done': [('readonly', True)]}), 'note': fields.text('Notes'), 'state': fields.selection([('draft', 'New'), ('cancel', 'Cancelled'), ('waiting', 'Waiting Another Move'), ('confirmed', 'Waiting Availability'), ('assigned', 'Available'), ('done', 'Done'), ], 'Status', readonly=True, select=True, copy=False, help= "* New: When the stock move is created and not yet confirmed.\n"\ "* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\ "* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\ "* Available: When products are reserved, it is set to \'Available\'.\n"\ "* Done: When the shipment is processed, the state is \'Done\'."), 'partially_available': fields.boolean('Partially Available', readonly=True, help="Checks if the move has some stock reserved", copy=False), 'price_unit': fields.float('Unit Price', help="Technical field used to record the product cost set by the user during a picking confirmation (when costing method used is 'average price' or 'real'). Value given in company currency and in product uom."), # as it's a technical field, we intentionally don't provide the digits attribute 'company_id': fields.many2one('res.company', 'Company', required=True, select=True), 'split_from': fields.many2one('stock.move', string="Move Split From", help="Technical field used to track the origin of a split move, which can be useful in case of debug", copy=False), 'backorder_id': fields.related('picking_id', 'backorder_id', type='many2one', relation="stock.picking", string="Back Order of", select=True), 'origin': fields.char("Source"), 'procure_method': fields.selection([('make_to_stock', 'Default: Take From Stock'), ('make_to_order', 'Advanced: Apply Procurement Rules')], 'Supply Method', required=True, help="""By default, the system will take from the stock in the source location and passively wait for availability. The other possibility allows you to directly create a procurement on the source location (and thus ignore its current stock) to gather products. If we want to chain moves and have this one to wait for the previous, this second option should be chosen."""), # used for colors in tree views: 'scrapped': fields.related('location_dest_id', 'scrap_location', type='boolean', relation='stock.location', string='Scrapped', readonly=True), 'quant_ids': fields.many2many('stock.quant', 'stock_quant_move_rel', 'move_id', 'quant_id', 'Moved Quants', copy=False), 'reserved_quant_ids': fields.one2many('stock.quant', 'reservation_id', 'Reserved quants'), 'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'move_id', string='Linked Operations', readonly=True, help='Operations that impact this move for the computation of the remaining quantities'), 'remaining_qty': fields.function(_get_remaining_qty, type='float', string='Remaining Quantity', digits=0, states={'done': [('readonly', True)]}, help="Remaining Quantity in default UoM according to operations matched with this move"), 'procurement_id': fields.many2one('procurement.order', 'Procurement'), 'group_id': fields.many2one('procurement.group', 'Procurement Group'), 'rule_id': fields.many2one('procurement.rule', 'Procurement Rule', help='The pull rule that created this stock move'), 'push_rule_id': fields.many2one('stock.location.path', 'Push Rule', help='The push rule that created this stock move'), 'propagate': fields.boolean('Propagate cancel and split', help='If checked, when this move is cancelled, cancel the linked move too'), 'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'), 'inventory_id': fields.many2one('stock.inventory', 'Inventory'), 'lot_ids': fields.function(_get_lot_ids, type='many2many', relation='stock.production.lot', string='Lots'), 'origin_returned_move_id': fields.many2one('stock.move', 'Origin return move', help='move that created the return move', copy=False), 'returned_move_ids': fields.one2many('stock.move', 'origin_returned_move_id', 'All returned moves', help='Optional: all returned moves created from this move'), 'reserved_availability': fields.function(_get_reserved_availability, type='float', string='Quantity Reserved', readonly=True, help='Quantity that has already been reserved for this move'), 'availability': fields.function(_get_product_availability, type='float', string='Quantity Available', readonly=True, help='Quantity in stock that can still be reserved for this move'), 'string_availability_info': fields.function(_get_string_qty_information, type='text', string='Availability', readonly=True, help='Show various information on stock availability for this move'), 'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot', help="Technical field used to depict a restriction on the lot of quants to consider when marking this move as 'done'"), 'restrict_partner_id': fields.many2one('res.partner', 'Owner ', help="Technical field used to depict a restriction on the ownership of quants to consider when marking this move as 'done'"), 'route_ids': fields.many2many('stock.location.route', 'stock_location_route_move', 'move_id', 'route_id', 'Destination route', help="Preferred route to be followed by the procurement order"), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Technical field depicting the warehouse to consider for the route selection on the next procurement (if any)."), } def _default_location_destination(self, cr, uid, context=None): context = context or {} if context.get('default_picking_type_id', False): pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context) return pick_type.default_location_dest_id and pick_type.default_location_dest_id.id or False return False def _default_location_source(self, cr, uid, context=None): context = context or {} if context.get('default_picking_type_id', False): pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context) return pick_type.default_location_src_id and pick_type.default_location_src_id.id or False return False def _default_destination_address(self, cr, uid, context=None): return False def _default_group_id(self, cr, uid, context=None): context = context or {} if context.get('default_picking_id', False): picking = self.pool.get('stock.picking').browse(cr, uid, context['default_picking_id'], context=context) return picking.group_id.id return False _defaults = { 'location_id': _default_location_source, 'location_dest_id': _default_location_destination, 'partner_id': _default_destination_address, 'state': 'draft', 'priority': '1', 'product_uom_qty': 1.0, 'scrapped': False, 'date': fields.datetime.now, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c), 'date_expected': fields.datetime.now, 'procure_method': 'make_to_stock', 'propagate': True, 'partially_available': False, 'group_id': _default_group_id, } def _check_uom(self, cr, uid, ids, context=None): for move in self.browse(cr, uid, ids, context=context): if move.product_id.uom_id.category_id.id != move.product_uom.category_id.id: return False return True _constraints = [ (_check_uom, 'You try to move a product using a UoM that is not compatible with the UoM of the product moved. Please use an UoM in the same UoM category.', ['product_uom']), ] def init(self, cr): cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_move_product_location_index',)) if not cr.fetchone(): cr.execute('CREATE INDEX stock_move_product_location_index ON stock_move (product_id, location_id, location_dest_id, company_id, state)') @api.cr_uid_ids_context def do_unreserve(self, cr, uid, move_ids, context=None): quant_obj = self.pool.get("stock.quant") for move in self.browse(cr, uid, move_ids, context=context): if move.state in ('done', 'cancel'): raise osv.except_osv(_('Operation Forbidden!'), _('Cannot unreserve a done move')) quant_obj.quants_unreserve(cr, uid, move, context=context) if self.find_move_ancestors(cr, uid, move, context=context): self.write(cr, uid, [move.id], {'state': 'waiting'}, context=context) else: self.write(cr, uid, [move.id], {'state': 'confirmed'}, context=context) def _prepare_procurement_from_move(self, cr, uid, move, context=None): origin = (move.group_id and (move.group_id.name + ":") or "") + (move.rule_id and move.rule_id.name or move.origin or move.picking_id.name or "/") group_id = move.group_id and move.group_id.id or False if move.rule_id: if move.rule_id.group_propagation_option == 'fixed' and move.rule_id.group_id: group_id = move.rule_id.group_id.id elif move.rule_id.group_propagation_option == 'none': group_id = False return { 'name': move.rule_id and move.rule_id.name or "/", 'origin': origin, 'company_id': move.company_id and move.company_id.id or False, 'date_planned': move.date, 'product_id': move.product_id.id, 'product_qty': move.product_uom_qty, 'product_uom': move.product_uom.id, 'product_uos_qty': (move.product_uos and move.product_uos_qty) or move.product_uom_qty, 'product_uos': (move.product_uos and move.product_uos.id) or move.product_uom.id, 'location_id': move.location_id.id, 'move_dest_id': move.id, 'group_id': group_id, 'route_ids': [(4, x.id) for x in move.route_ids], 'warehouse_id': move.warehouse_id.id or (move.picking_type_id and move.picking_type_id.warehouse_id.id or False), 'priority': move.priority, } def _push_apply(self, cr, uid, moves, context=None): push_obj = self.pool.get("stock.location.path") for move in moves: #1) if the move is already chained, there is no need to check push rules #2) if the move is a returned move, we don't want to check push rules, as returning a returned move is the only decent way # to receive goods without triggering the push rules again (which would duplicate chained operations) if not move.move_dest_id and not move.origin_returned_move_id: domain = [('location_from_id', '=', move.location_dest_id.id)] #priority goes to the route defined on the product and product category route_ids = [x.id for x in move.product_id.route_ids + move.product_id.categ_id.total_route_ids] rules = push_obj.search(cr, uid, domain + [('route_id', 'in', route_ids)], order='route_sequence, sequence', context=context) if not rules: #then we search on the warehouse if a rule can apply wh_route_ids = [] if move.warehouse_id: wh_route_ids = [x.id for x in move.warehouse_id.route_ids] elif move.picking_type_id and move.picking_type_id.warehouse_id: wh_route_ids = [x.id for x in move.picking_type_id.warehouse_id.route_ids] if wh_route_ids: rules = push_obj.search(cr, uid, domain + [('route_id', 'in', wh_route_ids)], order='route_sequence, sequence', context=context) if not rules: #if no specialized push rule has been found yet, we try to find a general one (without route) rules = push_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context) if rules: rule = push_obj.browse(cr, uid, rules[0], context=context) push_obj._apply(cr, uid, rule, move, context=context) return True def _create_procurement(self, cr, uid, move, context=None): """ This will create a procurement order """ return self.pool.get("procurement.order").create(cr, uid, self._prepare_procurement_from_move(cr, uid, move, context=context), context=context) def _create_procurements(self, cr, uid, moves, context=None): res = [] for move in moves: res.append(self._create_procurement(cr, uid, move, context=context)) return res def write(self, cr, uid, ids, vals, context=None): if context is None: context = {} if isinstance(ids, (int, long)): ids = [ids] # Check that we do not modify a stock.move which is done frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id']) for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': if frozen_fields.intersection(vals): raise osv.except_osv(_('Operation Forbidden!'), _('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).')) propagated_changes_dict = {} #propagation of quantity change if vals.get('product_uom_qty'): propagated_changes_dict['product_uom_qty'] = vals['product_uom_qty'] if vals.get('product_uom_id'): propagated_changes_dict['product_uom_id'] = vals['product_uom_id'] #propagation of expected date: propagated_date_field = False if vals.get('date_expected'): #propagate any manual change of the expected date propagated_date_field = 'date_expected' elif (vals.get('state', '') == 'done' and vals.get('date')): #propagate also any delta observed when setting the move as done propagated_date_field = 'date' if not context.get('do_not_propagate', False) and (propagated_date_field or propagated_changes_dict): #any propagation is (maybe) needed for move in self.browse(cr, uid, ids, context=context): if move.move_dest_id and move.propagate: if 'date_expected' in propagated_changes_dict: propagated_changes_dict.pop('date_expected') if propagated_date_field: current_date = datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) new_date = datetime.strptime(vals.get(propagated_date_field), DEFAULT_SERVER_DATETIME_FORMAT) delta = new_date - current_date if abs(delta.days) >= move.company_id.propagation_minimum_delta: old_move_date = datetime.strptime(move.move_dest_id.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) new_move_date = (old_move_date + relativedelta.relativedelta(days=delta.days or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT) propagated_changes_dict['date_expected'] = new_move_date #For pushed moves as well as for pulled moves, propagate by recursive call of write(). #Note that, for pulled moves we intentionally don't propagate on the procurement. if propagated_changes_dict: self.write(cr, uid, [move.move_dest_id.id], propagated_changes_dict, context=context) return super(stock_move, self).write(cr, uid, ids, vals, context=context) def onchange_quantity(self, cr, uid, ids, product_id, product_qty, product_uom, product_uos): """ On change of product quantity finds UoM and UoS quantities @param product_id: Product id @param product_qty: Changed Quantity of product @param product_uom: Unit of measure of product @param product_uos: Unit of sale of product @return: Dictionary of values """ result = { 'product_uos_qty': 0.00 } warning = {} if (not product_id) or (product_qty <= 0.0): result['product_qty'] = 0.0 return {'value': result} product_obj = self.pool.get('product.product') uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff']) # Warn if the quantity was decreased if ids: for move in self.read(cr, uid, ids, ['product_qty']): if product_qty < move['product_qty']: warning.update({ 'title': _('Information'), 'message': _("By changing this quantity here, you accept the " "new quantity as complete: Odoo will not " "automatically generate a back order.")}) break if product_uos and product_uom and (product_uom != product_uos): precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product UoS') result['product_uos_qty'] = float_round(product_qty * uos_coeff['uos_coeff'], precision_digits=precision) else: result['product_uos_qty'] = product_qty return {'value': result, 'warning': warning} def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty, product_uos, product_uom): """ On change of product quantity finds UoM and UoS quantities @param product_id: Product id @param product_uos_qty: Changed UoS Quantity of product @param product_uom: Unit of measure of product @param product_uos: Unit of sale of product @return: Dictionary of values """ result = { 'product_uom_qty': 0.00 } if (not product_id) or (product_uos_qty <= 0.0): result['product_uos_qty'] = 0.0 return {'value': result} product_obj = self.pool.get('product.product') uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff']) # No warning if the quantity was decreased to avoid double warnings: # The clients should call onchange_quantity too anyway if product_uos and product_uom and (product_uom != product_uos): precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure') result['product_uom_qty'] = float_round(product_uos_qty / uos_coeff['uos_coeff'], precision_digits=precision) else: result['product_uom_qty'] = product_uos_qty return {'value': result} def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False): """ On change of product id, if finds UoM, UoS, quantity and UoS quantity. @param prod_id: Changed Product id @param loc_id: Source location id @param loc_dest_id: Destination location id @param partner_id: Address id of partner @return: Dictionary of values """ if not prod_id: return {} user = self.pool.get('res.users').browse(cr, uid, uid) lang = user and user.lang or False if partner_id: addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id) if addr_rec: lang = addr_rec and addr_rec.lang or False ctx = {'lang': lang} product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0] uos_id = product.uos_id and product.uos_id.id or False result = { 'name': product.partner_ref, 'product_uom': product.uom_id.id, 'product_uos': uos_id, 'product_uom_qty': 1.00, 'product_uos_qty': self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty'], } if loc_id: result['location_id'] = loc_id if loc_dest_id: result['location_dest_id'] = loc_dest_id return {'value': result} def _prepare_picking_assign(self, cr, uid, move, context=None): """ Prepares a new picking for this move as it could not be assigned to another picking. This method is designed to be inherited. """ values = { 'origin': move.origin, 'company_id': move.company_id and move.company_id.id or False, 'move_type': move.group_id and move.group_id.move_type or 'direct', 'partner_id': move.partner_id.id or False, 'picking_type_id': move.picking_type_id and move.picking_type_id.id or False, } return values @api.cr_uid_ids_context def _picking_assign(self, cr, uid, move_ids, procurement_group, location_from, location_to, context=None): """Assign a picking on the given move_ids, which is a list of move supposed to share the same procurement_group, location_from and location_to (and company). Those attributes are also given as parameters. """ pick_obj = self.pool.get("stock.picking") # Use a SQL query as doing with the ORM will split it in different queries with id IN (,,) # In the next version, the locations on the picking should be stored again. query = """ SELECT stock_picking.id FROM stock_picking, stock_move WHERE stock_picking.state in ('draft', 'confirmed', 'waiting') AND stock_move.picking_id = stock_picking.id AND stock_move.location_id = %s AND stock_move.location_dest_id = %s AND """ params = (location_from, location_to) if not procurement_group: query += "stock_picking.group_id IS NULL LIMIT 1" else: query += "stock_picking.group_id = %s LIMIT 1" params += (procurement_group,) cr.execute(query, params) [pick] = cr.fetchone() or [None] if not pick: move = self.browse(cr, uid, move_ids, context=context)[0] values = self._prepare_picking_assign(cr, uid, move, context=context) pick = pick_obj.create(cr, uid, values, context=context) return self.write(cr, uid, move_ids, {'picking_id': pick}, context=context) def onchange_date(self, cr, uid, ids, date, date_expected, context=None): """ On change of Scheduled Date gives a Move date. @param date_expected: Scheduled Date @param date: Move Date @return: Move Date """ if not date_expected: date_expected = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) return {'value': {'date': date_expected}} def attribute_price(self, cr, uid, move, context=None): """ Attribute price to move, important in inter-company moves or receipts with only one partner """ if not move.price_unit: price = move.product_id.standard_price self.write(cr, uid, [move.id], {'price_unit': price}) def action_confirm(self, cr, uid, ids, context=None): """ Confirms stock move or put it in waiting if it's linked to another move. @return: List of ids. """ if not context: context = {} if isinstance(ids, (int, long)): ids = [ids] states = { 'confirmed': [], 'waiting': [] } to_assign = {} for move in self.browse(cr, uid, ids, context=context): self.attribute_price(cr, uid, move, context=context) state = 'confirmed' #if the move is preceeded, then it's waiting (if preceeding move is done, then action_assign has been called already and its state is already available) if move.move_orig_ids: state = 'waiting' #if the move is split and some of the ancestor was preceeded, then it's waiting as well elif move.split_from: move2 = move.split_from while move2 and state != 'waiting': if move2.move_orig_ids: state = 'waiting' move2 = move2.split_from states[state].append(move.id) if not move.picking_id and move.picking_type_id: key = (move.group_id.id, move.location_id.id, move.location_dest_id.id) if key not in to_assign: to_assign[key] = [] to_assign[key].append(move.id) moves = [move for move in self.browse(cr, uid, states['confirmed'], context=context) if move.procure_method == 'make_to_order'] self._create_procurements(cr, uid, moves, context=context) for move in moves: states['waiting'].append(move.id) states['confirmed'].remove(move.id) for state, write_ids in states.items(): if len(write_ids): self.write(cr, uid, write_ids, {'state': state}) #assign picking in batch for all confirmed move that share the same details for key, move_ids in to_assign.items(): procurement_group, location_from, location_to = key self._picking_assign(cr, uid, move_ids, procurement_group, location_from, location_to, context=context) moves = self.browse(cr, uid, ids, context=context) self._push_apply(cr, uid, moves, context=context) return ids def force_assign(self, cr, uid, ids, context=None): """ Changes the state to assigned. @return: True """ return self.write(cr, uid, ids, {'state': 'assigned'}, context=context) def check_tracking_product(self, cr, uid, product, lot_id, location, location_dest, context=None): check = False if product.track_all and not location_dest.usage == 'inventory': check = True elif product.track_incoming and location.usage in ('supplier', 'transit', 'inventory') and location_dest.usage == 'internal': check = True elif product.track_outgoing and location_dest.usage in ('customer', 'transit') and location.usage == 'internal': check = True if check and not lot_id: raise osv.except_osv(_('Warning!'), _('You must assign a serial number for the product %s') % (product.name)) def check_tracking(self, cr, uid, move, lot_id, context=None): """ Checks if serial number is assigned to stock move or not and raise an error if it had to. """ self.check_tracking_product(cr, uid, move.product_id, lot_id, move.location_id, move.location_dest_id, context=context) def action_assign(self, cr, uid, ids, context=None): """ Checks the product type and accordingly writes the state. """ context = context or {} quant_obj = self.pool.get("stock.quant") to_assign_moves = set() main_domain = {} todo_moves = [] operations = set() for move in self.browse(cr, uid, ids, context=context): if move.state not in ('confirmed', 'waiting', 'assigned'): continue if move.location_id.usage in ('supplier', 'inventory', 'production'): to_assign_moves.add(move.id) #in case the move is returned, we want to try to find quants before forcing the assignment if not move.origin_returned_move_id: continue if move.product_id.type == 'consu': to_assign_moves.add(move.id) continue else: todo_moves.append(move) #we always keep the quants already assigned and try to find the remaining quantity on quants not assigned only main_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)] #if the move is preceeded, restrict the choice of quants in the ones moved previously in original move ancestors = self.find_move_ancestors(cr, uid, move, context=context) if move.state == 'waiting' and not ancestors: #if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock main_domain[move.id] += [('id', '=', False)] elif ancestors: main_domain[move.id] += [('history_ids', 'in', ancestors)] #if the move is returned from another, restrict the choice of quants to the ones that follow the returned move if move.origin_returned_move_id: main_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)] for link in move.linked_move_operation_ids: operations.add(link.operation_id) # Check all ops and sort them: we want to process first the packages, then operations with lot then the rest operations = list(operations) operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0)) for ops in operations: #first try to find quants based on specific domains given by linked operations for record in ops.linked_move_operation_ids: move = record.move_id if move.id in main_domain: domain = main_domain[move.id] + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context) qty = record.qty if qty: quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, qty, domain=domain, prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context) quant_obj.quants_reserve(cr, uid, quants, move, record, context=context) for move in todo_moves: if move.linked_move_operation_ids: continue #then if the move isn't totally assigned, try to find quants without any specific domain if move.state != 'assigned': qty_already_assigned = move.reserved_availability qty = move.product_qty - qty_already_assigned quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain[move.id], prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context) quant_obj.quants_reserve(cr, uid, quants, move, context=context) #force assignation of consumable products and incoming from supplier/inventory/production if to_assign_moves: self.force_assign(cr, uid, list(to_assign_moves), context=context) def action_cancel(self, cr, uid, ids, context=None): """ Cancels the moves and if all moves are cancelled it cancels the picking. @return: True """ procurement_obj = self.pool.get('procurement.order') context = context or {} procs_to_check = [] for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': raise osv.except_osv(_('Operation Forbidden!'), _('You cannot cancel a stock move that has been set to \'Done\'.')) if move.reserved_quant_ids: self.pool.get("stock.quant").quants_unreserve(cr, uid, move, context=context) if context.get('cancel_procurement'): if move.propagate: procurement_ids = procurement_obj.search(cr, uid, [('move_dest_id', '=', move.id)], context=context) procurement_obj.cancel(cr, uid, procurement_ids, context=context) else: if move.move_dest_id: if move.propagate: self.action_cancel(cr, uid, [move.move_dest_id.id], context=context) elif move.move_dest_id.state == 'waiting': #If waiting, the chain will be broken and we are not sure if we can still wait for it (=> could take from stock instead) self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context) if move.procurement_id: # Does the same as procurement check, only eliminating a refresh procs_to_check.append(move.procurement_id.id) res = self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context) if procs_to_check: procurement_obj.check(cr, uid, procs_to_check, context=context) return res def _check_package_from_moves(self, cr, uid, ids, context=None): pack_obj = self.pool.get("stock.quant.package") packs = set() for move in self.browse(cr, uid, ids, context=context): packs |= set([q.package_id for q in move.quant_ids if q.package_id and q.qty > 0]) return pack_obj._check_location_constraint(cr, uid, list(packs), context=context) def find_move_ancestors(self, cr, uid, move, context=None): '''Find the first level ancestors of given move ''' ancestors = [] move2 = move while move2: ancestors += [x.id for x in move2.move_orig_ids] #loop on the split_from to find the ancestor of split moves only if the move has not direct ancestor (priority goes to them) move2 = not move2.move_orig_ids and move2.split_from or False return ancestors @api.cr_uid_ids_context def recalculate_move_state(self, cr, uid, move_ids, context=None): '''Recompute the state of moves given because their reserved quants were used to fulfill another operation''' for move in self.browse(cr, uid, move_ids, context=context): vals = {} reserved_quant_ids = move.reserved_quant_ids if len(reserved_quant_ids) > 0 and not move.partially_available: vals['partially_available'] = True if len(reserved_quant_ids) == 0 and move.partially_available: vals['partially_available'] = False if move.state == 'assigned': if self.find_move_ancestors(cr, uid, move, context=context): vals['state'] = 'waiting' else: vals['state'] = 'confirmed' if vals: self.write(cr, uid, [move.id], vals, context=context) def action_done(self, cr, uid, ids, context=None): """ Process completely the moves given as ids and if all moves are done, it will finish the picking. """ context = context or {} picking_obj = self.pool.get("stock.picking") quant_obj = self.pool.get("stock.quant") todo = [move.id for move in self.browse(cr, uid, ids, context=context) if move.state == "draft"] if todo: ids = self.action_confirm(cr, uid, todo, context=context) pickings = set() procurement_ids = set() #Search operations that are linked to the moves operations = set() move_qty = {} for move in self.browse(cr, uid, ids, context=context): move_qty[move.id] = move.product_qty for link in move.linked_move_operation_ids: operations.add(link.operation_id) #Sort operations according to entire packages first, then package + lot, package only, lot only operations = list(operations) operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0)) for ops in operations: if ops.picking_id: pickings.add(ops.picking_id.id) main_domain = [('qty', '>', 0)] for record in ops.linked_move_operation_ids: move = record.move_id self.check_tracking(cr, uid, move, not ops.product_id and ops.package_id.id or ops.lot_id.id, context=context) prefered_domain = [('reservation_id', '=', move.id)] fallback_domain = [('reservation_id', '=', False)] fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)] prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2] dom = main_domain + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context) quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, record.qty, domain=dom, prefered_domain_list=prefered_domain_list, restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context) if ops.product_id: #If a product is given, the result is always put immediately in the result package (if it is False, they are without package) quant_dest_package_id = ops.result_package_id.id ctx = context else: # When a pack is moved entirely, the quants should not be written anything for the destination package quant_dest_package_id = False ctx = context.copy() ctx['entire_pack'] = True quant_obj.quants_move(cr, uid, quants, move, ops.location_dest_id, location_from=ops.location_id, lot_id=ops.lot_id.id, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id, dest_package_id=quant_dest_package_id, context=ctx) # Handle pack in pack if not ops.product_id and ops.package_id and ops.result_package_id.id != ops.package_id.parent_id.id: self.pool.get('stock.quant.package').write(cr, SUPERUSER_ID, [ops.package_id.id], {'parent_id': ops.result_package_id.id}, context=context) if not move_qty.get(move.id): raise osv.except_osv(_("Error"), _("The roundings of your Unit of Measures %s on the move vs. %s on the product don't allow to do these operations or you are not transferring the picking at once. ") % (move.product_uom.name, move.product_id.uom_id.name)) move_qty[move.id] -= record.qty #Check for remaining qtys and unreserve/check move_dest_id in move_dest_ids = set() for move in self.browse(cr, uid, ids, context=context): move_qty_cmp = float_compare(move_qty[move.id], 0, precision_rounding=move.product_id.uom_id.rounding) if move_qty_cmp > 0: # (=In case no pack operations in picking) main_domain = [('qty', '>', 0)] prefered_domain = [('reservation_id', '=', move.id)] fallback_domain = [('reservation_id', '=', False)] fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)] prefered_domain_list = [prefered_domain] + [fallback_domain]
codeparrot/github-code-clean
## @file # Routines for generating AutoGen.h and AutoGen.c # # Copyright (c) 2007 - 2012, Intel Corporation. All rights reserved.<BR> # This program and the accompanying materials # are licensed and made available under the terms and conditions of the BSD License # which accompanies this distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # ## Import Modules # import string from Common import EdkLogger from Common.BuildToolError import * from Common.DataType import * from Common.Misc import * from Common.String import StringToArray from StrGather import * ## PCD type string gItemTypeStringDatabase = { TAB_PCDS_FEATURE_FLAG : 'FixedAtBuild', TAB_PCDS_FIXED_AT_BUILD : 'FixedAtBuild', TAB_PCDS_PATCHABLE_IN_MODULE: 'BinaryPatch', TAB_PCDS_DYNAMIC : '', TAB_PCDS_DYNAMIC_DEFAULT : '', TAB_PCDS_DYNAMIC_VPD : '', TAB_PCDS_DYNAMIC_HII : '', TAB_PCDS_DYNAMIC_EX : '', TAB_PCDS_DYNAMIC_EX_DEFAULT : '', TAB_PCDS_DYNAMIC_EX_VPD : '', TAB_PCDS_DYNAMIC_EX_HII : '', } ## Dynamic PCD types gDynamicPcd = [TAB_PCDS_DYNAMIC, TAB_PCDS_DYNAMIC_DEFAULT, TAB_PCDS_DYNAMIC_VPD, TAB_PCDS_DYNAMIC_HII] ## Dynamic-ex PCD types gDynamicExPcd = [TAB_PCDS_DYNAMIC_EX, TAB_PCDS_DYNAMIC_EX_DEFAULT, TAB_PCDS_DYNAMIC_EX_VPD, TAB_PCDS_DYNAMIC_EX_HII] ## Datum size gDatumSizeStringDatabase = {'UINT8':'8','UINT16':'16','UINT32':'32','UINT64':'64','BOOLEAN':'BOOLEAN','VOID*':'8'} gDatumSizeStringDatabaseH = {'UINT8':'8','UINT16':'16','UINT32':'32','UINT64':'64','BOOLEAN':'BOOL','VOID*':'PTR'} gDatumSizeStringDatabaseLib = {'UINT8':'8','UINT16':'16','UINT32':'32','UINT64':'64','BOOLEAN':'Bool','VOID*':'Ptr'} ## Mapping between PCD driver type and EFI phase gPcdPhaseMap = { "PEI_PCD_DRIVER" : "PEI", "DXE_PCD_DRIVER" : "DXE" } gPcdDatabaseCommonAutoGenH = """ // // The following definition will be generated by build tool // // // Common definitions // typedef UINT8 SKU_ID; #define PCD_TYPE_SHIFT 28 #define PCD_TYPE_DATA (0x0U << PCD_TYPE_SHIFT) #define PCD_TYPE_HII (0x8U << PCD_TYPE_SHIFT) #define PCD_TYPE_VPD (0x4U << PCD_TYPE_SHIFT) #define PCD_TYPE_SKU_ENABLED (0x2U << PCD_TYPE_SHIFT) #define PCD_TYPE_STRING (0x1U << PCD_TYPE_SHIFT) #define PCD_TYPE_ALL_SET (PCD_TYPE_DATA | PCD_TYPE_HII | PCD_TYPE_VPD | PCD_TYPE_SKU_ENABLED | PCD_TYPE_STRING) #define PCD_DATUM_TYPE_SHIFT 24 #define PCD_DATUM_TYPE_POINTER (0x0U << PCD_DATUM_TYPE_SHIFT) #define PCD_DATUM_TYPE_UINT8 (0x1U << PCD_DATUM_TYPE_SHIFT) #define PCD_DATUM_TYPE_UINT16 (0x2U << PCD_DATUM_TYPE_SHIFT) #define PCD_DATUM_TYPE_UINT32 (0x4U << PCD_DATUM_TYPE_SHIFT) #define PCD_DATUM_TYPE_UINT64 (0x8U << PCD_DATUM_TYPE_SHIFT) #define PCD_DATUM_TYPE_ALL_SET (PCD_DATUM_TYPE_POINTER | \\ PCD_DATUM_TYPE_UINT8 | \\ PCD_DATUM_TYPE_UINT16 | \\ PCD_DATUM_TYPE_UINT32 | \\ PCD_DATUM_TYPE_UINT64) #define PCD_DATABASE_OFFSET_MASK (~(PCD_TYPE_ALL_SET | PCD_DATUM_TYPE_ALL_SET)) typedef struct { UINT32 ExTokenNumber; UINT16 LocalTokenNumber; // PCD Number of this particular platform build UINT16 ExGuidIndex; // Index of GuidTable } DYNAMICEX_MAPPING; typedef struct { UINT32 SkuDataStartOffset; //We have to use offsetof MACRO as we don't know padding done by compiler UINT32 SkuIdTableOffset; //Offset from the PCD_DB } SKU_HEAD; typedef struct { UINT32 StringIndex; // Offset in String Table in units of UINT32. UINT32 DefaultValueOffset; // Offset of the Default Value UINT16 GuidTableIndex; // Offset in Guid Table in units of GUID. UINT16 Offset; // Offset in Variable } VARIABLE_HEAD; typedef struct { UINT32 Offset; } VPD_HEAD; typedef UINT32 STRING_HEAD; typedef UINT16 SIZE_INFO; #define offsetof(s,m) (UINT32) (UINTN) &(((s *)0)->m) """ gPcdDatabaseEpilogueAutoGenH = """ typedef struct { PEI_PCD_DATABASE PeiDb; DXE_PCD_DATABASE DxeDb; } PCD_DATABASE; #define PCD_TOTAL_TOKEN_NUMBER (PEI_LOCAL_TOKEN_NUMBER + DXE_LOCAL_TOKEN_NUMBER) """ gPcdDatabaseAutoGenH = TemplateString(""" #define ${PHASE}_GUID_TABLE_SIZE ${GUID_TABLE_SIZE} #define ${PHASE}_STRING_TABLE_SIZE ${STRING_TABLE_SIZE} #define ${PHASE}_SKUID_TABLE_SIZE ${SKUID_TABLE_SIZE} #define ${PHASE}_LOCAL_TOKEN_NUMBER_TABLE_SIZE ${LOCAL_TOKEN_NUMBER_TABLE_SIZE} #define ${PHASE}_LOCAL_TOKEN_NUMBER ${LOCAL_TOKEN_NUMBER} #define ${PHASE}_EXMAPPING_TABLE_SIZE ${EXMAPPING_TABLE_SIZE} #define ${PHASE}_EX_TOKEN_NUMBER ${EX_TOKEN_NUMBER} #define ${PHASE}_SIZE_TABLE_SIZE ${SIZE_TABLE_SIZE} #define ${PHASE}_GUID_TABLE_EMPTY ${GUID_TABLE_EMPTY} #define ${PHASE}_STRING_TABLE_EMPTY ${STRING_TABLE_EMPTY} #define ${PHASE}_SKUID_TABLE_EMPTY ${SKUID_TABLE_EMPTY} #define ${PHASE}_DATABASE_EMPTY ${DATABASE_EMPTY} #define ${PHASE}_EXMAP_TABLE_EMPTY ${EXMAP_TABLE_EMPTY} typedef struct { ${BEGIN} UINT64 ${INIT_CNAME_DECL_UINT64}_${INIT_GUID_DECL_UINT64}[${INIT_NUMSKUS_DECL_UINT64}]; ${END} ${BEGIN} UINT64 ${VARDEF_CNAME_UINT64}_${VARDEF_GUID_UINT64}_VariableDefault_${VARDEF_SKUID_UINT64}; ${END} ${BEGIN} UINT32 ${INIT_CNAME_DECL_UINT32}_${INIT_GUID_DECL_UINT32}[${INIT_NUMSKUS_DECL_UINT32}]; ${END} ${BEGIN} UINT32 ${VARDEF_CNAME_UINT32}_${VARDEF_GUID_UINT32}_VariableDefault_${VARDEF_SKUID_UINT32}; ${END} ${BEGIN} VPD_HEAD ${VPD_HEAD_CNAME_DECL}_${VPD_HEAD_GUID_DECL}[${VPD_HEAD_NUMSKUS_DECL}]; ${END} DYNAMICEX_MAPPING ExMapTable[${PHASE}_EXMAPPING_TABLE_SIZE]; UINT32 LocalTokenNumberTable[${PHASE}_LOCAL_TOKEN_NUMBER_TABLE_SIZE]; GUID GuidTable[${PHASE}_GUID_TABLE_SIZE]; ${BEGIN} STRING_HEAD ${STRING_HEAD_CNAME_DECL}_${STRING_HEAD_GUID_DECL}[${STRING_HEAD_NUMSKUS_DECL}]; ${END} ${BEGIN} VARIABLE_HEAD ${VARIABLE_HEAD_CNAME_DECL}_${VARIABLE_HEAD_GUID_DECL}_Variable_Header[${VARIABLE_HEAD_NUMSKUS_DECL}]; ${END} ${BEGIN} UINT8 StringTable${STRING_TABLE_INDEX}[${STRING_TABLE_LENGTH}]; /* ${STRING_TABLE_CNAME}_${STRING_TABLE_GUID} */ ${END} SIZE_INFO SizeTable[${PHASE}_SIZE_TABLE_SIZE]; ${BEGIN} UINT16 ${INIT_CNAME_DECL_UINT16}_${INIT_GUID_DECL_UINT16}[${INIT_NUMSKUS_DECL_UINT16}]; ${END} ${BEGIN} UINT16 ${VARDEF_CNAME_UINT16}_${VARDEF_GUID_UINT16}_VariableDefault_${VARDEF_SKUID_UINT16}; ${END} ${BEGIN} UINT8 ${INIT_CNAME_DECL_UINT8}_${INIT_GUID_DECL_UINT8}[${INIT_NUMSKUS_DECL_UINT8}]; ${END} ${BEGIN} UINT8 ${VARDEF_CNAME_UINT8}_${VARDEF_GUID_UINT8}_VariableDefault_${VARDEF_SKUID_UINT8}; ${END} ${BEGIN} BOOLEAN ${INIT_CNAME_DECL_BOOLEAN}_${INIT_GUID_DECL_BOOLEAN}[${INIT_NUMSKUS_DECL_BOOLEAN}]; ${END} ${BEGIN} BOOLEAN ${VARDEF_CNAME_BOOLEAN}_${VARDEF_GUID_BOOLEAN}_VariableDefault_${VARDEF_SKUID_BOOLEAN}; ${END} UINT8 SkuIdTable[${PHASE}_SKUID_TABLE_SIZE]; ${SYSTEM_SKU_ID} } ${PHASE}_PCD_DATABASE_INIT; typedef struct { ${PCD_DATABASE_UNINIT_EMPTY} ${BEGIN} UINT64 ${UNINIT_CNAME_DECL_UINT64}_${UNINIT_GUID_DECL_UINT64}[${UNINIT_NUMSKUS_DECL_UINT64}]; ${END} ${BEGIN} UINT32 ${UNINIT_CNAME_DECL_UINT32}_${UNINIT_GUID_DECL_UINT32}[${UNINIT_NUMSKUS_DECL_UINT32}]; ${END} ${BEGIN} UINT16 ${UNINIT_CNAME_DECL_UINT16}_${UNINIT_GUID_DECL_UINT16}[${UNINIT_NUMSKUS_DECL_UINT16}]; ${END} ${BEGIN} UINT8 ${UNINIT_CNAME_DECL_UINT8}_${UNINIT_GUID_DECL_UINT8}[${UNINIT_NUMSKUS_DECL_UINT8}]; ${END} ${BEGIN} BOOLEAN ${UNINIT_CNAME_DECL_BOOLEAN}_${UNINIT_GUID_DECL_BOOLEAN}[${UNINIT_NUMSKUS_DECL_BOOLEAN}]; ${END} } ${PHASE}_PCD_DATABASE_UNINIT; #define PCD_${PHASE}_SERVICE_DRIVER_VERSION 2 typedef struct { ${PHASE}_PCD_DATABASE_INIT Init; ${PHASE}_PCD_DATABASE_UNINIT Uninit; } ${PHASE}_PCD_DATABASE; #define ${PHASE}_NEX_TOKEN_NUMBER (${PHASE}_LOCAL_TOKEN_NUMBER - ${PHASE}_EX_TOKEN_NUMBER) """) gEmptyPcdDatabaseAutoGenC = TemplateString(""" ${PHASE}_PCD_DATABASE_INIT g${PHASE}PcdDbInit = { /* ExMapTable */ { {0, 0, 0} }, /* LocalTokenNumberTable */ { 0 }, /* GuidTable */ { {0x00000000, 0x0000, 0x0000, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}} }, /* StringTable */ { 0 }, /* SizeTable */ { 0, 0 }, /* SkuIdTable */ { 0 }, ${SYSTEM_SKU_ID_VALUE} }; """) gPcdDatabaseAutoGenC = TemplateString(""" ${PHASE}_PCD_DATABASE_INIT g${PHASE}PcdDbInit = { ${BEGIN} { ${INIT_VALUE_UINT64} }, /* ${INIT_CNAME_DECL_UINT64}_${INIT_GUID_DECL_UINT64}[${INIT_NUMSKUS_DECL_UINT64}] */ ${END} ${BEGIN} ${VARDEF_VALUE_UINT64}, /* ${VARDEF_CNAME_UINT64}_${VARDEF_GUID_UINT64}_VariableDefault_${VARDEF_SKUID_UINT64} */ ${END} ${BEGIN} { ${INIT_VALUE_UINT32} }, /* ${INIT_CNAME_DECL_UINT32}_${INIT_GUID_DECL_UINT32}[${INIT_NUMSKUS_DECL_UINT32}] */ ${END} ${BEGIN} ${VARDEF_VALUE_UINT32}, /* ${VARDEF_CNAME_UINT32}_${VARDEF_GUID_UINT32}_VariableDefault_${VARDEF_SKUID_UINT32} */ ${END} /* VPD */ ${BEGIN} { ${VPD_HEAD_VALUE} }, /* ${VPD_HEAD_CNAME_DECL}_${VPD_HEAD_GUID_DECL}[${VPD_HEAD_NUMSKUS_DECL}] */ ${END} /* ExMapTable */ { ${BEGIN} { ${EXMAPPING_TABLE_EXTOKEN}, ${EXMAPPING_TABLE_LOCAL_TOKEN}, ${EXMAPPING_TABLE_GUID_INDEX} }, ${END} }, /* LocalTokenNumberTable */ { ${BEGIN} offsetof(${PHASE}_PCD_DATABASE, ${TOKEN_INIT}.${TOKEN_CNAME}_${TOKEN_GUID}${VARDEF_HEADER}) | ${TOKEN_TYPE}, ${END} }, /* GuidTable */ { ${BEGIN} ${GUID_STRUCTURE}, ${END} }, ${BEGIN} { ${STRING_HEAD_VALUE} }, /* ${STRING_HEAD_CNAME_DECL}_${STRING_HEAD_GUID_DECL}[${STRING_HEAD_NUMSKUS_DECL}] */ ${END} ${BEGIN} /* ${VARIABLE_HEAD_CNAME_DECL}_${VARIABLE_HEAD_GUID_DECL}_Variable_Header[${VARIABLE_HEAD_NUMSKUS_DECL}] */ { ${VARIABLE_HEAD_VALUE} }, ${END} /* StringTable */ ${BEGIN} ${STRING_TABLE_VALUE}, /* ${STRING_TABLE_CNAME}_${STRING_TABLE_GUID} */ ${END} /* SizeTable */ { ${BEGIN} ${SIZE_TABLE_MAXIMUM_LENGTH}, ${SIZE_TABLE_CURRENT_LENGTH}, /* ${SIZE_TABLE_CNAME}_${SIZE_TABLE_GUID} */ ${END} }, ${BEGIN} { ${INIT_VALUE_UINT16} }, /* ${INIT_CNAME_DECL_UINT16}_${INIT_GUID_DECL_UINT16}[${INIT_NUMSKUS_DECL_UINT16}] */ ${END} ${BEGIN} ${VARDEF_VALUE_UINT16}, /* ${VARDEF_CNAME_UINT16}_${VARDEF_GUID_UINT16}_VariableDefault_${VARDEF_SKUID_UINT16} */ ${END} ${BEGIN} { ${INIT_VALUE_UINT8} }, /* ${INIT_CNAME_DECL_UINT8}_${INIT_GUID_DECL_UINT8}[${INIT_NUMSKUS_DECL_UINT8}] */ ${END} ${BEGIN} ${VARDEF_VALUE_UINT8}, /* ${VARDEF_CNAME_UINT8}_${VARDEF_GUID_UINT8}_VariableDefault_${VARDEF_SKUID_UINT8} */ ${END} ${BEGIN} { ${INIT_VALUE_BOOLEAN} }, /* ${INIT_CNAME_DECL_BOOLEAN}_${INIT_GUID_DECL_BOOLEAN}[${INIT_NUMSKUS_DECL_BOOLEAN}] */ ${END} ${BEGIN} ${VARDEF_VALUE_BOOLEAN}, /* ${VARDEF_CNAME_BOOLEAN}_${VARDEF_GUID_BOOLEAN}_VariableDefault_${VARDEF_SKUID_BOOLEAN} */ ${END} /* SkuIdTable */ { ${BEGIN}${SKUID_VALUE}, ${END} }, ${SYSTEM_SKU_ID_VALUE} }; """) ## AutoGen File Header Templates gAutoGenHeaderString = TemplateString("""\ /** DO NOT EDIT FILE auto-generated Module name: ${FileName} Abstract: Auto-generated ${FileName} for building module or library. **/ """) gAutoGenHPrologueString = TemplateString(""" #ifndef _${File}_${Guid} #define _${File}_${Guid} """) gAutoGenHCppPrologueString = """\ #ifdef __cplusplus extern "C" { #endif """ gAutoGenHEpilogueString = """ #ifdef __cplusplus } #endif #endif """ ## PEI Core Entry Point Templates gPeiCoreEntryPointPrototype = TemplateString(""" ${BEGIN} VOID EFIAPI ${Function} ( IN CONST EFI_SEC_PEI_HAND_OFF *SecCoreData, IN CONST EFI_PEI_PPI_DESCRIPTOR *PpiList, IN VOID *Context ); ${END} """) gPeiCoreEntryPointString = TemplateString(""" ${BEGIN} VOID EFIAPI ProcessModuleEntryPointList ( IN CONST EFI_SEC_PEI_HAND_OFF *SecCoreData, IN CONST EFI_PEI_PPI_DESCRIPTOR *PpiList, IN VOID *Context ) { ${Function} (SecCoreData, PpiList, Context); } ${END} """) ## DXE Core Entry Point Templates gDxeCoreEntryPointPrototype = TemplateString(""" ${BEGIN} VOID EFIAPI ${Function} ( IN VOID *HobStart ); ${END} """) gDxeCoreEntryPointString = TemplateString(""" ${BEGIN} VOID EFIAPI ProcessModuleEntryPointList ( IN VOID *HobStart ) { ${Function} (HobStart); } ${END} """) ## PEIM Entry Point Templates gPeimEntryPointPrototype = TemplateString(""" ${BEGIN} EFI_STATUS EFIAPI ${Function} ( IN EFI_PEI_FILE_HANDLE FileHandle, IN CONST EFI_PEI_SERVICES **PeiServices ); ${END} """) gPeimEntryPointString = [ TemplateString(""" GLOBAL_REMOVE_IF_UNREFERENCED const UINT32 _gPeimRevision = ${PiSpecVersion}; EFI_STATUS EFIAPI ProcessModuleEntryPointList ( IN EFI_PEI_FILE_HANDLE FileHandle, IN CONST EFI_PEI_SERVICES **PeiServices ) { return EFI_SUCCESS; } """), TemplateString(""" GLOBAL_REMOVE_IF_UNREFERENCED const UINT32 _gPeimRevision = ${PiSpecVersion}; ${BEGIN} EFI_STATUS EFIAPI ProcessModuleEntryPointList ( IN EFI_PEI_FILE_HANDLE FileHandle, IN CONST EFI_PEI_SERVICES **PeiServices ) { return ${Function} (FileHandle, PeiServices); } ${END} """), TemplateString(""" GLOBAL_REMOVE_IF_UNREFERENCED const UINT32 _gPeimRevision = ${PiSpecVersion}; EFI_STATUS EFIAPI ProcessModuleEntryPointList ( IN EFI_PEI_FILE_HANDLE FileHandle, IN CONST EFI_PEI_SERVICES **PeiServices ) { EFI_STATUS Status; EFI_STATUS CombinedStatus; CombinedStatus = EFI_LOAD_ERROR; ${BEGIN} Status = ${Function} (FileHandle, PeiServices); if (!EFI_ERROR (Status) || EFI_ERROR (CombinedStatus)) { CombinedStatus = Status; } ${END} return CombinedStatus; } """) ] ## SMM_CORE Entry Point Templates gSmmCoreEntryPointPrototype = TemplateString(""" ${BEGIN} EFI_STATUS EFIAPI ${Function} ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ); ${END} """) gSmmCoreEntryPointString = TemplateString(""" ${BEGIN} const UINT32 _gUefiDriverRevision = ${UefiSpecVersion}; const UINT32 _gDxeRevision = ${PiSpecVersion}; EFI_STATUS EFIAPI ProcessModuleEntryPointList ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { return ${Function} (ImageHandle, SystemTable); } ${END} """) ## DXE SMM Entry Point Templates gDxeSmmEntryPointPrototype = TemplateString(""" ${BEGIN} EFI_STATUS EFIAPI ${Function} ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ); ${END} """) gDxeSmmEntryPointString = [ TemplateString(""" const UINT32 _gUefiDriverRevision = ${UefiSpecVersion}; const UINT32 _gDxeRevision = ${PiSpecVersion}; EFI_STATUS EFIAPI ProcessModuleEntryPointList ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { return EFI_SUCCESS; } """), TemplateString(""" const UINT32 _gUefiDriverRevision = ${UefiSpecVersion}; const UINT32 _gDxeRevision = ${PiSpecVersion}; static BASE_LIBRARY_JUMP_BUFFER mJumpContext; static EFI_STATUS mDriverEntryPointStatus; VOID EFIAPI ExitDriver ( IN EFI_STATUS Status ) { if (!EFI_ERROR (Status) || EFI_ERROR (mDriverEntryPointStatus)) { mDriverEntryPointStatus = Status; } LongJump (&mJumpContext, (UINTN)-1); ASSERT (FALSE); } EFI_STATUS EFIAPI ProcessModuleEntryPointList ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { mDriverEntryPointStatus = EFI_LOAD_ERROR; ${BEGIN} if (SetJump (&mJumpContext) == 0) { ExitDriver (${Function} (ImageHandle, SystemTable)); ASSERT (FALSE); } ${END} return mDriverEntryPointStatus; } """) ] ## UEFI Driver Entry Point Templates gUefiDriverEntryPointPrototype = TemplateString(""" ${BEGIN} EFI_STATUS EFIAPI ${Function} ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ); ${END} """) gUefiDriverEntryPointString = [ TemplateString(""" const UINT32 _gUefiDriverRevision = ${UefiSpecVersion}; const UINT32 _gDxeRevision = ${PiSpecVersion}; EFI_STATUS EFIAPI ProcessModuleEntryPointList ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { return EFI_SUCCESS; } """), TemplateString(""" const UINT32 _gUefiDriverRevision = ${UefiSpecVersion}; const UINT32 _gDxeRevision = ${PiSpecVersion}; ${BEGIN} EFI_STATUS EFIAPI ProcessModuleEntryPointList ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { return ${Function} (ImageHandle, SystemTable); } ${END} VOID EFIAPI ExitDriver ( IN EFI_STATUS Status ) { if (EFI_ERROR (Status)) { ProcessLibraryDestructorList (gImageHandle, gST); } gBS->Exit (gImageHandle, Status, 0, NULL); } """), TemplateString(""" const UINT32 _gUefiDriverRevision = ${UefiSpecVersion}; const UINT32 _gDxeRevision = ${PiSpecVersion}; static BASE_LIBRARY_JUMP_BUFFER mJumpContext; static EFI_STATUS mDriverEntryPointStatus; EFI_STATUS EFIAPI ProcessModuleEntryPointList ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { mDriverEntryPointStatus = EFI_LOAD_ERROR; ${BEGIN} if (SetJump (&mJumpContext) == 0) { ExitDriver (${Function} (ImageHandle, SystemTable)); ASSERT (FALSE); } ${END} return mDriverEntryPointStatus; } VOID EFIAPI ExitDriver ( IN EFI_STATUS Status ) { if (!EFI_ERROR (Status) || EFI_ERROR (mDriverEntryPointStatus)) { mDriverEntryPointStatus = Status; } LongJump (&mJumpContext, (UINTN)-1); ASSERT (FALSE); } """) ] ## UEFI Application Entry Point Templates gUefiApplicationEntryPointPrototype = TemplateString(""" ${BEGIN} EFI_STATUS EFIAPI ${Function} ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ); ${END} """) gUefiApplicationEntryPointString = [ TemplateString(""" const UINT32 _gUefiDriverRevision = ${UefiSpecVersion}; EFI_STATUS EFIAPI ProcessModuleEntryPointList ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { return EFI_SUCCESS; } """), TemplateString(""" const UINT32 _gUefiDriverRevision = ${UefiSpecVersion}; ${BEGIN} EFI_STATUS EFIAPI ProcessModuleEntryPointList ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { return ${Function} (ImageHandle, SystemTable); } ${END} VOID EFIAPI ExitDriver ( IN EFI_STATUS Status ) { if (EFI_ERROR (Status)) { ProcessLibraryDestructorList (gImageHandle, gST); } gBS->Exit (gImageHandle, Status, 0, NULL); } """), TemplateString(""" const UINT32 _gUefiDriverRevision = ${UefiSpecVersion}; EFI_STATUS EFIAPI ProcessModuleEntryPointList ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { ${BEGIN} if (SetJump (&mJumpContext) == 0) { ExitDriver (${Function} (ImageHandle, SystemTable)); ASSERT (FALSE); } ${END} return mDriverEntryPointStatus; } static BASE_LIBRARY_JUMP_BUFFER mJumpContext; static EFI_STATUS mDriverEntryPointStatus = EFI_LOAD_ERROR; VOID EFIAPI ExitDriver ( IN EFI_STATUS Status ) { if (!EFI_ERROR (Status) || EFI_ERROR (mDriverEntryPointStatus)) { mDriverEntryPointStatus = Status; } LongJump (&mJumpContext, (UINTN)-1); ASSERT (FALSE); } """) ] ## UEFI Unload Image Templates gUefiUnloadImagePrototype = TemplateString(""" ${BEGIN} EFI_STATUS EFIAPI ${Function} ( IN EFI_HANDLE ImageHandle ); ${END} """) gUefiUnloadImageString = [ TemplateString(""" GLOBAL_REMOVE_IF_UNREFERENCED const UINT8 _gDriverUnloadImageCount = ${Count}; EFI_STATUS EFIAPI ProcessModuleUnloadList ( IN EFI_HANDLE ImageHandle ) { return EFI_SUCCESS; } """), TemplateString(""" GLOBAL_REMOVE_IF_UNREFERENCED const UINT8 _gDriverUnloadImageCount = ${Count}; ${BEGIN} EFI_STATUS EFIAPI ProcessModuleUnloadList ( IN EFI_HANDLE ImageHandle ) { return ${Function} (ImageHandle); } ${END} """), TemplateString(""" GLOBAL_REMOVE_IF_UNREFERENCED const UINT8 _gDriverUnloadImageCount = ${Count}; EFI_STATUS EFIAPI ProcessModuleUnloadList ( IN EFI_HANDLE ImageHandle ) { EFI_STATUS Status; Status = EFI_SUCCESS; ${BEGIN} if (EFI_ERROR (Status)) { ${Function} (ImageHandle); } else { Status = ${Function} (ImageHandle); } ${END} return Status; } """) ] gLibraryStructorPrototype = { 'BASE' : TemplateString("""${BEGIN} RETURN_STATUS EFIAPI ${Function} ( VOID );${END} """), 'PEI' : TemplateString("""${BEGIN} EFI_STATUS EFIAPI ${Function} ( IN EFI_PEI_FILE_HANDLE FileHandle, IN CONST EFI_PEI_SERVICES **PeiServices );${END} """), 'DXE' : TemplateString("""${BEGIN} EFI_STATUS EFIAPI ${Function} ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable );${END} """), } gLibraryStructorCall = { 'BASE' : TemplateString("""${BEGIN} Status = ${Function} (); ASSERT_EFI_ERROR (Status);${END} """), 'PEI' : TemplateString("""${BEGIN} Status = ${Function} (FileHandle, PeiServices); ASSERT_EFI_ERROR (Status);${END} """), 'DXE' : TemplateString("""${BEGIN} Status = ${Function} (ImageHandle, SystemTable); ASSERT_EFI_ERROR (Status);${END} """), } ## Library Constructor and Destructor Templates gLibraryString = { 'BASE' : TemplateString(""" ${BEGIN}${FunctionPrototype}${END} VOID EFIAPI ProcessLibrary${Type}List ( VOID ) { ${BEGIN} EFI_STATUS Status; ${FunctionCall}${END} } """), 'PEI' : TemplateString(""" ${BEGIN}${FunctionPrototype}${END} VOID EFIAPI ProcessLibrary${Type}List ( IN EFI_PEI_FILE_HANDLE FileHandle, IN CONST EFI_PEI_SERVICES **PeiServices ) { ${BEGIN} EFI_STATUS Status; ${FunctionCall}${END} } """), 'DXE' : TemplateString(""" ${BEGIN}${FunctionPrototype}${END} VOID EFIAPI ProcessLibrary${Type}List ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { ${BEGIN} EFI_STATUS Status; ${FunctionCall}${END} } """), } gBasicHeaderFile = "Base.h" gModuleTypeHeaderFile = { "BASE" : [gBasicHeaderFile], "SEC" : ["PiPei.h", "Library/DebugLib.h"], "PEI_CORE" : ["PiPei.h", "Library/DebugLib.h", "Library/PeiCoreEntryPoint.h"], "PEIM" : ["PiPei.h", "Library/DebugLib.h", "Library/PeimEntryPoint.h"], "DXE_CORE" : ["PiDxe.h", "Library/DebugLib.h", "Library/DxeCoreEntryPoint.h"], "DXE_DRIVER" : ["PiDxe.h", "Library/BaseLib.h", "Library/DebugLib.h", "Library/UefiBootServicesTableLib.h", "Library/UefiDriverEntryPoint.h"], "DXE_SMM_DRIVER" : ["PiDxe.h", "Library/BaseLib.h", "Library/DebugLib.h", "Library/UefiBootServicesTableLib.h", "Library/UefiDriverEntryPoint.h"], "DXE_RUNTIME_DRIVER": ["PiDxe.h", "Library/BaseLib.h", "Library/DebugLib.h", "Library/UefiBootServicesTableLib.h", "Library/UefiDriverEntryPoint.h"], "DXE_SAL_DRIVER" : ["PiDxe.h", "Library/BaseLib.h", "Library/DebugLib.h", "Library/UefiBootServicesTableLib.h", "Library/UefiDriverEntryPoint.h"], "UEFI_DRIVER" : ["Uefi.h", "Library/BaseLib.h", "Library/DebugLib.h", "Library/UefiBootServicesTableLib.h", "Library/UefiDriverEntryPoint.h"], "UEFI_APPLICATION" : ["Uefi.h", "Library/BaseLib.h", "Library/DebugLib.h", "Library/UefiBootServicesTableLib.h", "Library/UefiApplicationEntryPoint.h"], "SMM_CORE" : ["PiDxe.h", "Library/BaseLib.h", "Library/DebugLib.h", "Library/UefiDriverEntryPoint.h"], "USER_DEFINED" : [gBasicHeaderFile] } ## Create code for module PCDs # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # @param Pcd The PCD object # def CreateModulePcdCode(Info, AutoGenC, AutoGenH, Pcd): TokenSpaceGuidValue = Pcd.TokenSpaceGuidValue #Info.GuidList[Pcd.TokenSpaceGuidCName] PcdTokenNumber = Info.PlatformInfo.PcdTokenNumber # # Write PCDs # PcdTokenName = '_PCD_TOKEN_' + Pcd.TokenCName if Pcd.Type in gDynamicExPcd: TokenNumber = int(Pcd.TokenValue, 0) else: if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) not in PcdTokenNumber: EdkLogger.error("build", AUTOGEN_ERROR, "No generated token number for %s.%s\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) TokenNumber = PcdTokenNumber[Pcd.TokenCName, Pcd.TokenSpaceGuidCName] AutoGenH.Append('\n#define %s %dU\n' % (PcdTokenName, TokenNumber)) EdkLogger.debug(EdkLogger.DEBUG_3, "Creating code for " + Pcd.TokenCName + "." + Pcd.TokenSpaceGuidCName) if Pcd.Type not in gItemTypeStringDatabase: EdkLogger.error("build", AUTOGEN_ERROR, "Unknown PCD type [%s] of PCD %s.%s" % (Pcd.Type, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) if Pcd.DatumType not in gDatumSizeStringDatabase: EdkLogger.error("build", AUTOGEN_ERROR, "Unknown datum type [%s] of PCD %s.%s" % (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) DatumSize = gDatumSizeStringDatabase[Pcd.DatumType] DatumSizeLib = gDatumSizeStringDatabaseLib[Pcd.DatumType] GetModeName = '_PCD_GET_MODE_' + gDatumSizeStringDatabaseH[Pcd.DatumType] + '_' + Pcd.TokenCName SetModeName = '_PCD_SET_MODE_' + gDatumSizeStringDatabaseH[Pcd.DatumType] + '_' + Pcd.TokenCName if Pcd.Type in gDynamicExPcd: AutoGenH.Append('#define %s LibPcdGetEx%s(&%s, %s)\n' % (GetModeName, DatumSizeLib, Pcd.TokenSpaceGuidCName, PcdTokenName)) if Pcd.DatumType == 'VOID*': AutoGenH.Append('#define %s(SizeOfBuffer, Buffer) LibPcdSetEx%s(&%s, %s, (SizeOfBuffer), (Buffer))\n' % (SetModeName, DatumSizeLib, Pcd.TokenSpaceGuidCName, PcdTokenName)) else: AutoGenH.Append('#define %s(Value) LibPcdSetEx%s(&%s, %s, (Value))\n' % (SetModeName, DatumSizeLib, Pcd.TokenSpaceGuidCName, PcdTokenName)) elif Pcd.Type in gDynamicPcd: AutoGenH.Append('#define %s LibPcdGet%s(%s)\n' % (GetModeName, DatumSizeLib, PcdTokenName)) if Pcd.DatumType == 'VOID*': AutoGenH.Append('#define %s(SizeOfBuffer, Buffer) LibPcdSet%s(%s, (SizeOfBuffer), (Buffer))\n' %(SetModeName, DatumSizeLib, PcdTokenName)) else: AutoGenH.Append('#define %s(Value) LibPcdSet%s(%s, (Value))\n' % (SetModeName, DatumSizeLib, PcdTokenName)) else: PcdVariableName = '_gPcd_' + gItemTypeStringDatabase[Pcd.Type] + '_' + Pcd.TokenCName Const = 'const' if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE: Const = '' Type = '' Array = '' Value = Pcd.DefaultValue Unicode = False ValueNumber = 0 if Pcd.DatumType == 'BOOLEAN': BoolValue = Value.upper() if BoolValue == 'TRUE' or BoolValue == '1': Value = '1U' elif BoolValue == 'FALSE' or BoolValue == '0': Value = '0U' if Pcd.DatumType in ['UINT64', 'UINT32', 'UINT16', 'UINT8']: try: if Value.upper().startswith('0X'): ValueNumber = int (Value, 16) else: ValueNumber = int (Value) except: EdkLogger.error("build", AUTOGEN_ERROR, "PCD value is not valid dec or hex number for datum type [%s] of PCD %s.%s" % (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) if Pcd.DatumType == 'UINT64': if ValueNumber < 0: EdkLogger.error("build", AUTOGEN_ERROR, "PCD can't be set to negative value for datum type [%s] of PCD %s.%s" % (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) elif ValueNumber >= 0x10000000000000000: EdkLogger.error("build", AUTOGEN_ERROR, "Too large PCD value for datum type [%s] of PCD %s.%s" % (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) if not Value.endswith('ULL'): Value += 'ULL' elif Pcd.DatumType == 'UINT32': if ValueNumber < 0: EdkLogger.error("build", AUTOGEN_ERROR, "PCD can't be set to negative value for datum type [%s] of PCD %s.%s" % (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) elif ValueNumber >= 0x100000000: EdkLogger.error("build", AUTOGEN_ERROR, "Too large PCD value for datum type [%s] of PCD %s.%s" % (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) if not Value.endswith('U'): Value += 'U' elif Pcd.DatumType == 'UINT16': if ValueNumber < 0: EdkLogger.error("build", AUTOGEN_ERROR, "PCD can't be set to negative value for datum type [%s] of PCD %s.%s" % (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) elif ValueNumber >= 0x10000: EdkLogger.error("build", AUTOGEN_ERROR, "Too large PCD value for datum type [%s] of PCD %s.%s" % (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) if not Value.endswith('U'): Value += 'U' elif Pcd.DatumType == 'UINT8': if ValueNumber < 0: EdkLogger.error("build", AUTOGEN_ERROR, "PCD can't be set to negative value for datum type [%s] of PCD %s.%s" % (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) elif ValueNumber >= 0x100: EdkLogger.error("build", AUTOGEN_ERROR, "Too large PCD value for datum type [%s] of PCD %s.%s" % (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) if not Value.endswith('U'): Value += 'U' if Pcd.DatumType == 'VOID*': if Pcd.MaxDatumSize == None or Pcd.MaxDatumSize == '': EdkLogger.error("build", AUTOGEN_ERROR, "Unknown [MaxDatumSize] of PCD [%s.%s]" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) ArraySize = int(Pcd.MaxDatumSize, 0) if Value[0] == '{': Type = '(VOID *)' else: if Value[0] == 'L': Unicode = True Value = Value.lstrip('L') #.strip('"') Value = eval(Value) # translate escape character NewValue = '{' for Index in range(0,len(Value)): if Unicode: NewValue = NewValue + str(ord(Value[Index]) % 0x10000) + ', ' else: NewValue = NewValue + str(ord(Value[Index]) % 0x100) + ', ' if Unicode: ArraySize = ArraySize / 2; if ArraySize < (len(Value) + 1): EdkLogger.error("build", AUTOGEN_ERROR, "The maximum size of VOID* type PCD '%s.%s' is less than its actual size occupied." % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) Value = NewValue + '0 }' Array = '[%d]' % ArraySize # # skip casting for fixed at build since it breaks ARM assembly. # Long term we need PCD macros that work in assembly # elif Pcd.Type != TAB_PCDS_FIXED_AT_BUILD: Value = "((%s)%s)" % (Pcd.DatumType, Value) if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE: PcdValueName = '_PCD_PATCHABLE_VALUE_' + Pcd.TokenCName else: PcdValueName = '_PCD_VALUE_' + Pcd.TokenCName if Pcd.DatumType == 'VOID*': # # For unicode, UINT16 array will be generated, so the alignment of unicode is guaranteed. # if Unicode: AutoGenH.Append('#define _PCD_PATCHABLE_%s_SIZE %s\n' % (Pcd.TokenCName, Pcd.MaxDatumSize)) AutoGenH.Append('#define %s %s%s\n' %(PcdValueName, Type, PcdVariableName)) AutoGenC.Append('GLOBAL_REMOVE_IF_UNREFERENCED %s UINT16 %s%s = %s;\n' % (Const, PcdVariableName, Array, Value)) AutoGenH.Append('extern %s UINT16 %s%s;\n' %(Const, PcdVariableName, Array)) AutoGenH.Append('#define %s %s%s\n' %(GetModeName, Type, PcdVariableName)) else: AutoGenH.Append('#define _PCD_PATCHABLE_%s_SIZE %s\n' % (Pcd.TokenCName, Pcd.MaxDatumSize)) AutoGenH.Append('#define %s %s%s\n' %(PcdValueName, Type, PcdVariableName)) AutoGenC.Append('GLOBAL_REMOVE_IF_UNREFERENCED %s UINT8 %s%s = %s;\n' % (Const, PcdVariableName, Array, Value)) AutoGenH.Append('extern %s UINT8 %s%s;\n' %(Const, PcdVariableName, Array)) AutoGenH.Append('#define %s %s%s\n' %(GetModeName, Type, PcdVariableName)) elif Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE: AutoGenH.Append('#define %s %s\n' %(PcdValueName, Value)) AutoGenC.Append('volatile %s %s %s = %s;\n' %(Const, Pcd.DatumType, PcdVariableName, PcdValueName)) AutoGenH.Append('extern volatile %s %s %s%s;\n' % (Const, Pcd.DatumType, PcdVariableName, Array)) AutoGenH.Append('#define %s %s%s\n' % (GetModeName, Type, PcdVariableName)) else: AutoGenH.Append('#define %s %s\n' %(PcdValueName, Value)) AutoGenC.Append('GLOBAL_REMOVE_IF_UNREFERENCED %s %s %s = %s;\n' %(Const, Pcd.DatumType, PcdVariableName, PcdValueName)) AutoGenH.Append('extern %s %s %s%s;\n' % (Const, Pcd.DatumType, PcdVariableName, Array)) AutoGenH.Append('#define %s %s%s\n' % (GetModeName, Type, PcdVariableName)) if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE: if Pcd.DatumType == 'VOID*': AutoGenH.Append('#define %s(SizeOfBuffer, Buffer) LibPatchPcdSetPtr(_gPcd_BinaryPatch_%s, (UINTN)_PCD_PATCHABLE_%s_SIZE, (SizeOfBuffer), (Buffer))\n' % (SetModeName, Pcd.TokenCName, Pcd.TokenCName)) else: AutoGenH.Append('#define %s(Value) (%s = (Value))\n' % (SetModeName, PcdVariableName)) else: AutoGenH.Append('//#define %s ASSERT(FALSE) // It is not allowed to set value for a FIXED_AT_BUILD PCD\n' % SetModeName) ## Create code for library module PCDs # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # @param Pcd The PCD object # def CreateLibraryPcdCode(Info, AutoGenC, AutoGenH, Pcd): PcdTokenNumber = Info.PlatformInfo.PcdTokenNumber TokenSpaceGuidCName = Pcd.TokenSpaceGuidCName TokenCName = Pcd.TokenCName TokenSpaceGuidValue = Pcd.TokenSpaceGuidValue #Info.GuidList[TokenSpaceGuidCName] if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) not in PcdTokenNumber: EdkLogger.error("build", AUTOGEN_ERROR, "No generated token number for %s.%s\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) TokenNumber = PcdTokenNumber[TokenCName, TokenSpaceGuidCName] # If PCD is DynamicEx, then use TokenNumber declared in DEC file if Pcd.Type in gDynamicExPcd: TokenNumber = int(Pcd.TokenValue, 0) if Pcd.Type not in gItemTypeStringDatabase: EdkLogger.error("build", AUTOGEN_ERROR, "Unknown PCD type [%s] of PCD %s.%s" % (Pcd.Type, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) if Pcd.DatumType not in gDatumSizeStringDatabase: EdkLogger.error("build", AUTOGEN_ERROR, "Unknown datum type [%s] of PCD %s.%s" % (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Info)) DatumType = Pcd.DatumType DatumSize = gDatumSizeStringDatabaseH[DatumType] DatumSizeLib= gDatumSizeStringDatabaseLib[DatumType] GetModeName = '_PCD_GET_MODE_' + DatumSize + '_' + TokenCName SetModeName = '_PCD_SET_MODE_' + DatumSize + '_' + TokenCName Type = '' Array = '' if Pcd.DatumType == 'VOID*': Type = '(VOID *)' Array = '[]' AutoGenH.Append('#define _PCD_TOKEN_%s %dU\n' % (TokenCName, TokenNumber)) PcdItemType = Pcd.Type #if PcdItemType in gDynamicPcd: # PcdItemType = TAB_PCDS_FIXED_AT_BUILD # if (TokenCName, TokenSpaceGuidCName) in Info.PlatformInfo.Platform.Pcds: # PcdItemType = Info.PlatformInfo.Platform.Pcds[TokenCName, TokenSpaceGuidCName].Type if PcdItemType in gDynamicExPcd: PcdTokenName = '_PCD_TOKEN_' + TokenCName AutoGenH.Append('#define %s LibPcdGetEx%s(&%s, %s)\n' % (GetModeName, DatumSizeLib, TokenSpaceGuidCName, PcdTokenName)) if DatumType == 'VOID*': AutoGenH.Append('#define %s(SizeOfBuffer, Buffer) LibPcdSetEx%s(&%s, %s, (SizeOfBuffer), (Buffer))\n' % (SetModeName,DatumSizeLib, TokenSpaceGuidCName, PcdTokenName)) else: AutoGenH.Append('#define %s(Value) LibPcdSetEx%s(&%s, %s, (Value))\n' % (SetModeName, DatumSizeLib, TokenSpaceGuidCName, PcdTokenName)) if PcdItemType in gDynamicPcd: PcdTokenName = '_PCD_TOKEN_' + TokenCName AutoGenH.Append('#define %s LibPcdGet%s(%s)\n' % (GetModeName, DatumSizeLib, PcdTokenName)) if DatumType == 'VOID*': AutoGenH.Append('#define %s(SizeOfBuffer, Buffer) LibPcdSet%s(%s, (SizeOfBuffer), (Buffer))\n' %(SetModeName, DatumSizeLib, PcdTokenName)) else: AutoGenH.Append('#define %s(Value) LibPcdSet%s(%s, (Value))\n' % (SetModeName, DatumSizeLib, PcdTokenName)) if PcdItemType == TAB_PCDS_PATCHABLE_IN_MODULE: PcdVariableName = '_gPcd_' + gItemTypeStringDatabase[TAB_PCDS_PATCHABLE_IN_MODULE] + '_' + TokenCName AutoGenH.Append('extern %s _gPcd_BinaryPatch_%s%s;\n' %(DatumType, TokenCName, Array) ) AutoGenH.Append('#define %s %s_gPcd_BinaryPatch_%s\n' %(GetModeName, Type, TokenCName)) AutoGenH.Append('#define %s(Value) (%s = (Value))\n' % (SetModeName, PcdVariableName)) if PcdItemType == TAB_PCDS_FIXED_AT_BUILD or PcdItemType == TAB_PCDS_FEATURE_FLAG: AutoGenH.Append('extern const %s _gPcd_FixedAtBuild_%s%s;\n' %(DatumType, TokenCName, Array)) #AutoGenH.Append('#define _PCD_VALUE_%s _gPcd_FixedAtBuild_%s\n' %(TokenCName, TokenCName)) AutoGenH.Append('#define %s %s_gPcd_FixedAtBuild_%s\n' %(GetModeName, Type, TokenCName)) AutoGenH.Append('//#define %s ASSERT(FALSE) // It is not allowed to set value for a FIXED_AT_BUILD PCD\n' % SetModeName) ## Create code for PCD database in DXE or PEI phase # # @param Platform The platform object # @retval tuple Two TemplateString objects for C code and header file, # respectively # def CreatePcdDatabasePhaseSpecificAutoGen (Platform, Phase): AutoGenC = TemplateString() AutoGenH = TemplateString() Dict = { 'PHASE' : Phase, 'GUID_TABLE_SIZE' : '1U', 'STRING_TABLE_SIZE' : '1U', 'SKUID_TABLE_SIZE' : '1U', 'LOCAL_TOKEN_NUMBER_TABLE_SIZE' : '1U', 'LOCAL_TOKEN_NUMBER' : '0U', 'EXMAPPING_TABLE_SIZE' : '1U', 'EX_TOKEN_NUMBER' : '0U', 'SIZE_TABLE_SIZE' : '2U', 'GUID_TABLE_EMPTY' : 'TRUE', 'STRING_TABLE_EMPTY' : 'TRUE', 'SKUID_TABLE_EMPTY' : 'TRUE', 'DATABASE_EMPTY' : 'TRUE', 'EXMAP_TABLE_EMPTY' : 'TRUE', 'PCD_DATABASE_UNINIT_EMPTY' : ' UINT8 dummy; /* PCD_DATABASE_UNINIT is emptry */', 'SYSTEM_SKU_ID' : ' SKU_ID SystemSkuId;', 'SYSTEM_SKU_ID_VALUE' : '0U' } for DatumType in ['UINT64','UINT32','UINT16','UINT8','BOOLEAN', "VOID*"]: Dict['VARDEF_CNAME_' + DatumType] = [] Dict['VARDEF_GUID_' + DatumType] = [] Dict['VARDEF_SKUID_' + DatumType] = [] Dict['VARDEF_VALUE_' + DatumType] = [] for Init in ['INIT','UNINIT']: Dict[Init+'_CNAME_DECL_' + DatumType] = [] Dict[Init+'_GUID_DECL_' + DatumType] = [] Dict[Init+'_NUMSKUS_DECL_' + DatumType] = [] Dict[Init+'_VALUE_' + DatumType] = [] for Type in ['STRING_HEAD','VPD_HEAD','VARIABLE_HEAD']: Dict[Type + '_CNAME_DECL'] = [] Dict[Type + '_GUID_DECL'] = [] Dict[Type + '_NUMSKUS_DECL'] = [] Dict[Type + '_VALUE'] = [] Dict['STRING_TABLE_INDEX'] = [] Dict['STRING_TABLE_LENGTH'] = [] Dict['STRING_TABLE_CNAME'] = [] Dict['STRING_TABLE_GUID'] = [] Dict['STRING_TABLE_VALUE'] = [] Dict['SIZE_TABLE_CNAME'] = [] Dict['SIZE_TABLE_GUID'] = [] Dict['SIZE_TABLE_CURRENT_LENGTH'] = [] Dict['SIZE_TABLE_MAXIMUM_LENGTH'] = [] Dict['EXMAPPING_TABLE_EXTOKEN'] = [] Dict['EXMAPPING_TABLE_LOCAL_TOKEN'] = [] Dict['EXMAPPING_TABLE_GUID_INDEX'] = [] Dict['GUID_STRUCTURE'] = [] Dict['SKUID_VALUE'] = [] Dict['VARDEF_HEADER'] = [] if Phase == 'DXE': Dict['SYSTEM_SKU_ID'] = '' Dict['SYSTEM_SKU_ID_VALUE'] = '' StringTableIndex = 0 StringTableSize = 0 NumberOfLocalTokens = 0 NumberOfPeiLocalTokens = 0 NumberOfDxeLocalTokens = 0 NumberOfExTokens = 0 NumberOfSizeItems = 0 GuidList = [] for Pcd in Platform.DynamicPcdList: CName = Pcd.TokenCName TokenSpaceGuidCName = Pcd.TokenSpaceGuidCName EdkLogger.debug(EdkLogger.DEBUG_3, "PCD: %s %s (%s : %s)" % (CName, TokenSpaceGuidCName, Pcd.Phase, Phase)) if Pcd.DatumType not in gDatumSizeStringDatabase: EdkLogger.error("build", AUTOGEN_ERROR, "Unknown datum type [%s] of PCD %s.%s" % (Pcd.DatumType, Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Platform)) if Pcd.Phase == 'PEI': NumberOfPeiLocalTokens += 1 if Pcd.Phase == 'DXE': NumberOfDxeLocalTokens += 1 if Pcd.Phase != Phase: continue # # TODO: need GetGuidValue() definition # TokenSpaceGuidStructure = Pcd.TokenSpaceGuidValue TokenSpaceGuid = GuidStructureStringToGuidValueName(TokenSpaceGuidStructure) if Pcd.Type in gDynamicExPcd: if TokenSpaceGuid not in GuidList: GuidList += [TokenSpaceGuid] Dict['GUID_STRUCTURE'].append(TokenSpaceGuidStructure) NumberOfExTokens += 1 ValueList = [] StringHeadOffsetList = [] VpdHeadOffsetList = [] VariableHeadValueList = [] Pcd.InitString = 'UNINIT' if Pcd.DatumType == 'VOID*': if Pcd.Type not in ["DynamicVpd", "DynamicExVpd"]: Pcd.TokenTypeList = ['PCD_TYPE_STRING'] else: Pcd.TokenTypeList = [] elif Pcd.DatumType == 'BOOLEAN': Pcd.TokenTypeList = ['PCD_DATUM_TYPE_UINT8'] else: Pcd.TokenTypeList = ['PCD_DATUM_TYPE_' + Pcd.DatumType] if len(Pcd.SkuInfoList) > 1: Pcd.TokenTypeList += ['PCD_TYPE_SKU_ENABLED'] for SkuName in Pcd.SkuInfoList: Sku = Pcd.SkuInfoList[SkuName] SkuId = Sku.SkuId if SkuId == None or SkuId == '': continue if (SkuId + 'U') not in Dict['SKUID_VALUE']: Dict['SKUID_VALUE'].append(SkuId + 'U') SkuIdIndex = Dict['SKUID_VALUE'].index(SkuId + 'U') if len(Sku.VariableName) > 0: Pcd.TokenTypeList += ['PCD_TYPE_HII'] Pcd.InitString = 'INIT' VariableNameStructure = StringToArray(Sku.VariableName) if VariableNameStructure not in Dict['STRING_TABLE_VALUE']: Dict['STRING_TABLE_CNAME'].append(CName) Dict['STRING_TABLE_GUID'].append(TokenSpaceGuid) if StringTableIndex == 0: Dict['STRING_TABLE_INDEX'].append('') else: Dict['STRING_TABLE_INDEX'].append('_%d' % StringTableIndex) Dict['STRING_TABLE_LENGTH'].append((len(Sku.VariableName) - 3 + 1) * 2) Dict['STRING_TABLE_VALUE'].append(VariableNameStructure) StringTableIndex += 1 StringTableSize += (len(Sku.VariableName) - 3 + 1) * 2 VariableHeadStringIndex = 0 for Index in range(Dict['STRING_TABLE_VALUE'].index(VariableNameStructure)): VariableHeadStringIndex += Dict['STRING_TABLE_LENGTH'][Index] VariableGuidStructure = Sku.VariableGuidValue VariableGuid = GuidStructureStringToGuidValueName(VariableGuidStructure) if VariableGuid not in GuidList: GuidList += [VariableGuid] Dict['GUID_STRUCTURE'].append(VariableGuidStructure) VariableHeadGuidIndex = GuidList.index(VariableGuid) if "PCD_TYPE_STRING" in Pcd.TokenTypeList: VariableHeadValueList.append('%dU, offsetof(%s_PCD_DATABASE, Init.%s_%s), %dU, %sU' % (VariableHeadStringIndex, Phase, CName, TokenSpaceGuid, VariableHeadGuidIndex, Sku.VariableOffset)) else: VariableHeadValueList.append('%dU, offsetof(%s_PCD_DATABASE, Init.%s_%s_VariableDefault_%s), %dU, %sU' % (VariableHeadStringIndex, Phase, CName, TokenSpaceGuid, SkuIdIndex, VariableHeadGuidIndex, Sku.VariableOffset)) Dict['VARDEF_CNAME_'+Pcd.DatumType].append(CName) Dict['VARDEF_GUID_'+Pcd.DatumType].append(TokenSpaceGuid) Dict['VARDEF_SKUID_'+Pcd.DatumType].append(SkuIdIndex) if "PCD_TYPE_STRING" in Pcd.TokenTypeList: Dict['VARDEF_VALUE_' + Pcd.DatumType].append("%s_%s[%d]" % (Pcd.TokenCName, TokenSpaceGuid, SkuIdIndex)) else: # # ULL (for UINT64) or U(other integer type) should be append to avoid # warning under linux building environment. # if Pcd.DatumType == "UINT64": Dict['VARDEF_VALUE_'+Pcd.DatumType].append(Sku.HiiDefaultValue + "ULL") elif Pcd.DatumType in ("UINT32", "UINT16", "UINT8"): Dict['VARDEF_VALUE_'+Pcd.DatumType].append(Sku.HiiDefaultValue + "U") elif Pcd.DatumType == "BOOLEAN": if Sku.HiiDefaultValue in ["1", "0"]: Dict['VARDEF_VALUE_'+Pcd.DatumType].append(Sku.HiiDefaultValue + "U") else: Dict['VARDEF_VALUE_'+Pcd.DatumType].append(Sku.HiiDefaultValue) elif Sku.VpdOffset != '': Pcd.TokenTypeList += ['PCD_TYPE_VPD'] Pcd.InitString = 'INIT' VpdHeadOffsetList.append(str(Sku.VpdOffset) + 'U') continue if Pcd.DatumType == 'VOID*': Pcd.TokenTypeList += ['PCD_TYPE_STRING'] Pcd.InitString = 'INIT' if Sku.HiiDefaultValue != '' and Sku.DefaultValue == '': Sku.DefaultValue = Sku.HiiDefaultValue if Sku.DefaultValue != '': NumberOfSizeItems += 1 Dict['STRING_TABLE_CNAME'].append(CName) Dict['STRING_TABLE_GUID'].append(TokenSpaceGuid) if StringTableIndex == 0: Dict['STRING_TABLE_INDEX'].append('') else: Dict['STRING_TABLE_INDEX'].append('_%d' % StringTableIndex) if Sku.DefaultValue[0] == 'L': Size = (len(Sku.DefaultValue) - 3 + 1) * 2 Dict['STRING_TABLE_VALUE'].append(StringToArray(Sku.DefaultValue)) elif Sku.DefaultValue[0] == '"': Size = len(Sku.DefaultValue) - 2 + 1 Dict['STRING_TABLE_VALUE'].append(StringToArray(Sku.DefaultValue)) elif Sku.DefaultValue[0] == '{': Size = len(Sku.DefaultValue.replace(',',' ').split()) Dict['STRING_TABLE_VALUE'].append(Sku.DefaultValue) StringHeadOffsetList.append(str(StringTableSize) + 'U') Dict['SIZE_TABLE_CNAME'].append(CName) Dict['SIZE_TABLE_GUID'].append(TokenSpaceGuid) Dict['SIZE_TABLE_CURRENT_LENGTH'].append(str(Size) + 'U') Dict['SIZE_TABLE_MAXIMUM_LENGTH'].append(str(Pcd.MaxDatumSize) + 'U') if Pcd.MaxDatumSize != '': MaxDatumSize = int(Pcd.MaxDatumSize, 0) if MaxDatumSize < Size: EdkLogger.error("build", AUTOGEN_ERROR, "The maximum size of VOID* type PCD '%s.%s' is less than its actual size occupied." % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName), ExtraData="[%s]" % str(Platform)) Size = MaxDatumSize Dict['STRING_TABLE_LENGTH'].append(Size) StringTableIndex += 1 StringTableSize += (Size) else: if "PCD_TYPE_HII" not in Pcd.TokenTypeList: Pcd.TokenTypeList += ['PCD_TYPE_DATA'] if Sku.DefaultValue == 'TRUE': Pcd.InitString = 'INIT' else: try: if int(Sku.DefaultValue, 0) != 0: Pcd.InitString = 'INIT' except: pass # # For UNIT64 type PCD's value, ULL should be append to avoid # warning under linux building environment. # if Pcd.DatumType == "UINT64": ValueList.append(Sku.DefaultValue + "ULL") elif Pcd.DatumType in ("UINT32", "UINT16", "UINT8"): ValueList.append(Sku.DefaultValue + "U") elif Pcd.DatumType == "BOOLEAN": if Sku.DefaultValue in ["1", "0"]: ValueList.append(Sku.DefaultValue + "U") else: ValueList.append(Sku.DefaultValue) Pcd.TokenTypeList = list(set(Pcd.TokenTypeList)) if 'PCD_TYPE_HII' in Pcd.TokenTypeList: Dict['VARIABLE_HEAD_CNAME_DECL'].append(CName) Dict['VARIABLE_HEAD_GUID_DECL'].append(TokenSpaceGuid) Dict['VARIABLE_HEAD_NUMSKUS_DECL'].append(len(Pcd.SkuInfoList)) Dict['VARIABLE_HEAD_VALUE'].append('{ %s }\n' % ' },\n { '.join(VariableHeadValueList)) Dict['VARDEF_HEADER'].append('_Variable_Header') else: Dict['VARDEF_HEADER'].append('') if 'PCD_TYPE_VPD' in Pcd.TokenTypeList: Dict['VPD_HEAD_CNAME_DECL'].append(CName) Dict['VPD_HEAD_GUID_DECL'].append(TokenSpaceGuid) Dict['VPD_HEAD_NUMSKUS_DECL'].append(len(Pcd.SkuInfoList)) Dict['VPD_HEAD_VALUE'].append('{ %s }' % ' }, { '.join(VpdHeadOffsetList)) if 'PCD_TYPE_STRING' in Pcd.TokenTypeList: Dict['STRING_HEAD_CNAME_DECL'].append(CName) Dict['STRING_HEAD_GUID_DECL'].append(TokenSpaceGuid) Dict['STRING_HEAD_NUMSKUS_DECL'].append(len(Pcd.SkuInfoList)) Dict['STRING_HEAD_VALUE'].append(', '.join(StringHeadOffsetList)) if 'PCD_TYPE_DATA' in Pcd.TokenTypeList: Dict[Pcd.InitString+'_CNAME_DECL_'+Pcd.DatumType].append(CName) Dict[Pcd.InitString+'_GUID_DECL_'+Pcd.DatumType].append(TokenSpaceGuid) Dict[Pcd.InitString+'_NUMSKUS_DECL_'+Pcd.DatumType].append(len(Pcd.SkuInfoList)) if Pcd.InitString == 'UNINIT': Dict['PCD_DATABASE_UNINIT_EMPTY'] = '' else: Dict[Pcd.InitString+'_VALUE_'+Pcd.DatumType].append(', '.join(ValueList)) if Phase == 'PEI': NumberOfLocalTokens = NumberOfPeiLocalTokens if Phase == 'DXE': NumberOfLocalTokens = NumberOfDxeLocalTokens Dict['TOKEN_INIT'] = ['' for x in range(NumberOfLocalTokens)] Dict['TOKEN_CNAME'] = ['' for x in range(NumberOfLocalTokens)] Dict['TOKEN_GUID'] = ['' for x in range(NumberOfLocalTokens)] Dict['TOKEN_TYPE'] = ['' for x in range(NumberOfLocalTokens)] for Pcd in Platform.DynamicPcdList: CName = Pcd.TokenCName TokenSpaceGuidCName = Pcd.TokenSpaceGuidCName if Pcd.Phase != Phase: continue TokenSpaceGuid = GuidStructureStringToGuidValueName(Pcd.TokenSpaceGuidValue) #(Platform.PackageList, TokenSpaceGuidCName)) GeneratedTokenNumber = Platform.PcdTokenNumber[CName, TokenSpaceGuidCName] - 1 if Phase == 'DXE': GeneratedTokenNumber -= NumberOfPeiLocalTokens EdkLogger.debug(EdkLogger.DEBUG_1, "PCD = %s.%s" % (CName, TokenSpaceGuidCName)) EdkLogger.debug(EdkLogger.DEBUG_1, "phase = %s" % Phase) EdkLogger.debug(EdkLogger.DEBUG_1, "GeneratedTokenNumber = %s" % str(GeneratedTokenNumber)) Dict['TOKEN_INIT'][GeneratedTokenNumber] = 'Init' if Pcd.InitString == 'UNINIT': Dict['TOKEN_INIT'][GeneratedTokenNumber] = 'Uninit' Dict['TOKEN_CNAME'][GeneratedTokenNumber] = CName Dict['TOKEN_GUID'][GeneratedTokenNumber] = TokenSpaceGuid Dict['TOKEN_TYPE'][GeneratedTokenNumber] = ' | '.join(Pcd.TokenTypeList) Pcd.TokenTypeList = list(set(Pcd.TokenTypeList)) # # Update VARDEF_HEADER # if 'PCD_TYPE_HII' in Pcd.TokenTypeList: Dict['VARDEF_HEADER'][GeneratedTokenNumber] = '_Variable_Header' else: Dict['VARDEF_HEADER'][GeneratedTokenNumber] = '' if Pcd.Type in gDynamicExPcd: Dict['EXMAPPING_TABLE_EXTOKEN'].append(str(Pcd.TokenValue) + 'U') if Phase == 'DXE': GeneratedTokenNumber += NumberOfPeiLocalTokens # # Per, PCD architecture specification, PCD Token Number is 1 based and 0 is defined as invalid token number. # For each EX type PCD, a PCD Token Number is assigned. When the # PCD Driver/PEIM map EX_GUID and EX_TOKEN_NUMBER to the PCD Token Number, # the non-EX Protocol/PPI interface can be called to get/set the value. This assumption is made by # Pcd Driver/PEIM in MdeModulePkg. # Therefore, 1 is added to GeneratedTokenNumber to generate a PCD Token Number before being inserted # to the EXMAPPING_TABLE. # Dict['EXMAPPING_TABLE_LOCAL_TOKEN'].append(str(GeneratedTokenNumber + 1) + 'U') Dict['EXMAPPING_TABLE_GUID_INDEX'].append(str(GuidList.index(TokenSpaceGuid)) + 'U') if GuidList != []: Dict['GUID_TABLE_EMPTY'] = 'FALSE' Dict['GUID_TABLE_SIZE'] = str(len(GuidList)) + 'U' else: Dict['GUID_STRUCTURE'] = [GuidStringToGuidStructureString('00000000-0000-0000-0000-000000000000')] if StringTableIndex == 0: Dict['STRING_TABLE_INDEX'].append('') Dict['STRING_TABLE_LENGTH'].append(1) Dict['STRING_TABLE_CNAME'].append('') Dict['STRING_TABLE_GUID'].append('') Dict['STRING_TABLE_VALUE'].append('{ 0 }') else: Dict['STRING_TABLE_EMPTY'] = 'FALSE' Dict['STRING_TABLE_SIZE'] = str(StringTableSize) + 'U' if Dict['SIZE_TABLE_CNAME'] == []: Dict['SIZE_TABLE_CNAME'].append('') Dict['SIZE_TABLE_GUID'].append('') Dict['SIZE_TABLE_CURRENT_LENGTH'].append('0U') Dict['SIZE_TABLE_MAXIMUM_LENGTH'].append('0U') if NumberOfLocalTokens != 0: Dict['DATABASE_EMPTY'] = 'FALSE' Dict['LOCAL_TOKEN_NUMBER_TABLE_SIZE'] = NumberOfLocalTokens Dict['LOCAL_TOKEN_NUMBER'] = NumberOfLocalTokens if NumberOfExTokens != 0: Dict['EXMAP_TABLE_EMPTY'] = 'FALSE' Dict['EXMAPPING_TABLE_SIZE'] = str(NumberOfExTokens) + 'U' Dict['EX_TOKEN_NUMBER'] = str(NumberOfExTokens) + 'U' else: Dict['EXMAPPING_TABLE_EXTOKEN'].append('0U') Dict['EXMAPPING_TABLE_LOCAL_TOKEN'].append('0U') Dict['EXMAPPING_TABLE_GUID_INDEX'].append('0U') if NumberOfSizeItems != 0: Dict['SIZE_TABLE_SIZE'] = str(NumberOfSizeItems * 2) + 'U' AutoGenH.Append(gPcdDatabaseAutoGenH.Replace(Dict)) if NumberOfLocalTokens == 0: AutoGenC.Append(gEmptyPcdDatabaseAutoGenC.Replace(Dict)) else: # # Update Size Table to the right order, it should be same with LocalTokenNumberTable # SizeCNameTempList = [] SizeGuidTempList = [] SizeCurLenTempList = [] SizeMaxLenTempList = [] ReOrderFlag = True if len(Dict['SIZE_TABLE_CNAME']) == 1: if not (Dict['SIZE_TABLE_CNAME'][0] and Dict['SIZE_TABLE_GUID'][0]): ReOrderFlag = False if ReOrderFlag: for Count in range(len(Dict['TOKEN_CNAME'])): for Count1 in range(len(Dict['SIZE_TABLE_CNAME'])): if Dict['TOKEN_CNAME'][Count] == Dict['SIZE_TABLE_CNAME'][Count1] and \ Dict['TOKEN_GUID'][Count] == Dict['SIZE_TABLE_GUID'][Count1]: SizeCNameTempList.append(Dict['SIZE_TABLE_CNAME'][Count1]) SizeGuidTempList.append(Dict['SIZE_TABLE_GUID'][Count1]) SizeCurLenTempList.append(Dict['SIZE_TABLE_CURRENT_LENGTH'][Count1]) SizeMaxLenTempList.append(Dict['SIZE_TABLE_MAXIMUM_LENGTH'][Count1]) for Count in range(len(Dict['SIZE_TABLE_CNAME'])): Dict['SIZE_TABLE_CNAME'][Count] = SizeCNameTempList[Count] Dict['SIZE_TABLE_GUID'][Count] = SizeGuidTempList[Count] Dict['SIZE_TABLE_CURRENT_LENGTH'][Count] = SizeCurLenTempList[Count] Dict['SIZE_TABLE_MAXIMUM_LENGTH'][Count] = SizeMaxLenTempList[Count] AutoGenC.Append(gPcdDatabaseAutoGenC.Replace(Dict)) return AutoGenH, AutoGenC ## Create code for PCD database # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # def CreatePcdDatabaseCode (Info, AutoGenC, AutoGenH): if Info.PcdIsDriver == "": return if Info.PcdIsDriver not in gPcdPhaseMap: EdkLogger.error("build", AUTOGEN_ERROR, "Not supported PcdIsDriver type:%s" % Info.PcdIsDriver, ExtraData="[%s]" % str(Info)) AutoGenH.Append(gPcdDatabaseCommonAutoGenH) AdditionalAutoGenH, AdditionalAutoGenC = CreatePcdDatabasePhaseSpecificAutoGen (Info.PlatformInfo, 'PEI') AutoGenH.Append(AdditionalAutoGenH.String) Phase = gPcdPhaseMap[Info.PcdIsDriver] if Phase == 'PEI': AutoGenC.Append(AdditionalAutoGenC.String) if Phase == 'DXE': AdditionalAutoGenH, AdditionalAutoGenC = CreatePcdDatabasePhaseSpecificAutoGen (Info.PlatformInfo, Phase) AutoGenH.Append(AdditionalAutoGenH.String) AutoGenC.Append(AdditionalAutoGenC.String) AutoGenH.Append(gPcdDatabaseEpilogueAutoGenH) ## Create code for library constructor # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # def CreateLibraryConstructorCode(Info, AutoGenC, AutoGenH): # # Library Constructors # ConstructorPrototypeString = TemplateString() ConstructorCallingString = TemplateString() if Info.IsLibrary: DependentLibraryList = [Info.Module] else: DependentLibraryList = Info.DependentLibraryList for Lib in DependentLibraryList: if len(Lib.ConstructorList) <= 0: continue Dict = {'Function':Lib.ConstructorList} if Lib.ModuleType in ['BASE', 'SEC']: ConstructorPrototypeString.Append(gLibraryStructorPrototype['BASE'].Replace(Dict)) ConstructorCallingString.Append(gLibraryStructorCall['BASE'].Replace(Dict)) elif Lib.ModuleType in ['PEI_CORE','PEIM']: ConstructorPrototypeString.Append(gLibraryStructorPrototype['PEI'].Replace(Dict)) ConstructorCallingString.Append(gLibraryStructorCall['PEI'].Replace(Dict)) elif Lib.ModuleType in ['DXE_CORE','DXE_DRIVER','DXE_SMM_DRIVER','DXE_RUNTIME_DRIVER', 'DXE_SAL_DRIVER','UEFI_DRIVER','UEFI_APPLICATION','SMM_CORE']: ConstructorPrototypeString.Append(gLibraryStructorPrototype['DXE'].Replace(Dict)) ConstructorCallingString.Append(gLibraryStructorCall['DXE'].Replace(Dict)) if str(ConstructorPrototypeString) == '': ConstructorPrototypeList = [] else: ConstructorPrototypeList = [str(ConstructorPrototypeString)] if str(ConstructorCallingString) == '': ConstructorCallingList = [] else: ConstructorCallingList = [str(ConstructorCallingString)] Dict = { 'Type' : 'Constructor', 'FunctionPrototype' : ConstructorPrototypeList, 'FunctionCall' : ConstructorCallingList } if Info.IsLibrary: AutoGenH.Append("${BEGIN}${FunctionPrototype}${END}", Dict) else: if Info.ModuleType in ['BASE', 'SEC']: AutoGenC.Append(gLibraryString['BASE'].Replace(Dict)) elif Info.ModuleType in ['PEI_CORE','PEIM']: AutoGenC.Append(gLibraryString['PEI'].Replace(Dict)) elif Info.ModuleType in ['DXE_CORE','DXE_DRIVER','DXE_SMM_DRIVER','DXE_RUNTIME_DRIVER', 'DXE_SAL_DRIVER','UEFI_DRIVER','UEFI_APPLICATION','SMM_CORE']: AutoGenC.Append(gLibraryString['DXE'].Replace(Dict)) ## Create code for library destructor # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # def CreateLibraryDestructorCode(Info, AutoGenC, AutoGenH): # # Library Destructors # DestructorPrototypeString = TemplateString() DestructorCallingString = TemplateString() if Info.IsLibrary: DependentLibraryList = [Info.Module] else: DependentLibraryList = Info.DependentLibraryList for Index in range(len(DependentLibraryList)-1, -1, -1): Lib = DependentLibraryList[Index] if len(Lib.DestructorList) <= 0: continue Dict = {'Function':Lib.DestructorList} if Lib.ModuleType in ['BASE', 'SEC']: DestructorPrototypeString.Append(gLibraryStructorPrototype['BASE'].Replace(Dict)) DestructorCallingString.Append(gLibraryStructorCall['BASE'].Replace(Dict)) elif Lib.ModuleType in ['PEI_CORE','PEIM']: DestructorPrototypeString.Append(gLibraryStructorPrototype['PEI'].Replace(Dict)) DestructorCallingString.Append(gLibraryStructorCall['PEI'].Replace(Dict)) elif Lib.ModuleType in ['DXE_CORE','DXE_DRIVER','DXE_SMM_DRIVER','DXE_RUNTIME_DRIVER', 'DXE_SAL_DRIVER','UEFI_DRIVER','UEFI_APPLICATION', 'SMM_CORE']: DestructorPrototypeString.Append(gLibraryStructorPrototype['DXE'].Replace(Dict)) DestructorCallingString.Append(gLibraryStructorCall['DXE'].Replace(Dict)) if str(DestructorPrototypeString) == '': DestructorPrototypeList = [] else: DestructorPrototypeList = [str(DestructorPrototypeString)] if str(DestructorCallingString) == '': DestructorCallingList = [] else: DestructorCallingList = [str(DestructorCallingString)] Dict = { 'Type' : 'Destructor', 'FunctionPrototype' : DestructorPrototypeList, 'FunctionCall' : DestructorCallingList } if Info.IsLibrary: AutoGenH.Append("${BEGIN}${FunctionPrototype}${END}", Dict) else: if Info.ModuleType in ['BASE', 'SEC']: AutoGenC.Append(gLibraryString['BASE'].Replace(Dict)) elif Info.ModuleType in ['PEI_CORE','PEIM']: AutoGenC.Append(gLibraryString['PEI'].Replace(Dict)) elif Info.ModuleType in ['DXE_CORE','DXE_DRIVER','DXE_SMM_DRIVER','DXE_RUNTIME_DRIVER', 'DXE_SAL_DRIVER','UEFI_DRIVER','UEFI_APPLICATION','SMM_CORE']: AutoGenC.Append(gLibraryString['DXE'].Replace(Dict)) ## Create code for ModuleEntryPoint # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # def CreateModuleEntryPointCode(Info, AutoGenC, AutoGenH): if Info.IsLibrary or Info.ModuleType in ['USER_DEFINED', 'SEC']: return # # Module Entry Points # NumEntryPoints = len(Info.Module.ModuleEntryPointList) if 'PI_SPECIFICATION_VERSION' in Info.Module.Specification: PiSpecVersion = Info.Module.Specification['PI_SPECIFICATION_VERSION'] else: PiSpecVersion = '0x00000000' if 'UEFI_SPECIFICATION_VERSION' in Info.Module.Specification: UefiSpecVersion = Info.Module.Specification['UEFI_SPECIFICATION_VERSION'] else: UefiSpecVersion = '0x00000000' Dict = { 'Function' : Info.Module.ModuleEntryPointList, 'PiSpecVersion' : PiSpecVersion + 'U', 'UefiSpecVersion': UefiSpecVersion + 'U' } if Info.ModuleType in ['PEI_CORE', 'DXE_CORE', 'SMM_CORE']: if Info.SourceFileList <> None and Info.SourceFileList <> []: if NumEntryPoints != 1: EdkLogger.error( "build", AUTOGEN_ERROR, '%s must have exactly one entry point' % Info.ModuleType, File=str(Info), ExtraData= ", ".join(Info.Module.ModuleEntryPointList) ) if Info.ModuleType == 'PEI_CORE': AutoGenC.Append(gPeiCoreEntryPointString.Replace(Dict)) AutoGenH.Append(gPeiCoreEntryPointPrototype.Replace(Dict)) elif Info.ModuleType == 'DXE_CORE': AutoGenC.Append(gDxeCoreEntryPointString.Replace(Dict)) AutoGenH.Append(gDxeCoreEntryPointPrototype.Replace(Dict)) elif Info.ModuleType == 'SMM_CORE': AutoGenC.Append(gSmmCoreEntryPointString.Replace(Dict)) AutoGenH.Append(gSmmCoreEntryPointPrototype.Replace(Dict)) elif Info.ModuleType == 'PEIM': if NumEntryPoints < 2: AutoGenC.Append(gPeimEntryPointString[NumEntryPoints].Replace(Dict)) else: AutoGenC.Append(gPeimEntryPointString[2].Replace(Dict)) AutoGenH.Append(gPeimEntryPointPrototype.Replace(Dict)) elif Info.ModuleType in ['DXE_RUNTIME_DRIVER','DXE_DRIVER','DXE_SAL_DRIVER','UEFI_DRIVER']: if NumEntryPoints < 2: AutoGenC.Append(gUefiDriverEntryPointString[NumEntryPoints].Replace(Dict)) else: AutoGenC.Append(gUefiDriverEntryPointString[2].Replace(Dict)) AutoGenH.Append(gUefiDriverEntryPointPrototype.Replace(Dict)) elif Info.ModuleType == 'DXE_SMM_DRIVER': if NumEntryPoints == 0: AutoGenC.Append(gDxeSmmEntryPointString[0].Replace(Dict)) else: AutoGenC.Append(gDxeSmmEntryPointString[1].Replace(Dict)) AutoGenH.Append(gDxeSmmEntryPointPrototype.Replace(Dict)) elif Info.ModuleType == 'UEFI_APPLICATION': if NumEntryPoints < 2: AutoGenC.Append(gUefiApplicationEntryPointString[NumEntryPoints].Replace(Dict)) else: AutoGenC.Append(gUefiApplicationEntryPointString[2].Replace(Dict)) AutoGenH.Append(gUefiApplicationEntryPointPrototype.Replace(Dict)) ## Create code for ModuleUnloadImage # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # def CreateModuleUnloadImageCode(Info, AutoGenC, AutoGenH): if Info.IsLibrary or Info.ModuleType in ['USER_DEFINED', 'SEC']: return # # Unload Image Handlers # NumUnloadImage = len(Info.Module.ModuleUnloadImageList) Dict = {'Count':str(NumUnloadImage) + 'U', 'Function':Info.Module.ModuleUnloadImageList} if NumUnloadImage < 2: AutoGenC.Append(gUefiUnloadImageString[NumUnloadImage].Replace(Dict)) else: AutoGenC.Append(gUefiUnloadImageString[2].Replace(Dict)) AutoGenH.Append(gUefiUnloadImagePrototype.Replace(Dict)) ## Create code for GUID # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # def CreateGuidDefinitionCode(Info, AutoGenC, AutoGenH): if Info.IsLibrary: return if Info.ModuleType in ["USER_DEFINED", "BASE"]: GuidType = "GUID" else: GuidType = "EFI_GUID" if Info.GuidList: AutoGenC.Append("\n// Guids\n") # # GUIDs # for Key in Info.GuidList: AutoGenC.Append('GLOBAL_REMOVE_IF_UNREFERENCED %s %s = %s;\n' % (GuidType, Key, Info.GuidList[Key])) ## Create code for protocol # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # def CreateProtocolDefinitionCode(Info, AutoGenC, AutoGenH): if Info.IsLibrary: return if Info.ModuleType in ["USER_DEFINED", "BASE"]: GuidType = "GUID" else: GuidType = "EFI_GUID" if Info.ProtocolList: AutoGenC.Append("\n// Protocols\n") # # Protocol GUIDs # for Key in Info.ProtocolList: AutoGenC.Append('GLOBAL_REMOVE_IF_UNREFERENCED %s %s = %s;\n' % (GuidType, Key, Info.ProtocolList[Key])) ## Create code for PPI # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # def CreatePpiDefinitionCode(Info, AutoGenC, AutoGenH): if Info.IsLibrary: return if Info.ModuleType in ["USER_DEFINED", "BASE"]: GuidType = "GUID" else: GuidType = "EFI_GUID" if Info.PpiList: AutoGenC.Append("\n// PPIs\n") # # PPI GUIDs # for Key in Info.PpiList: AutoGenC.Append('GLOBAL_REMOVE_IF_UNREFERENCED %s %s = %s;\n' % (GuidType, Key, Info.PpiList[Key])) ## Create code for PCD # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # def CreatePcdCode(Info, AutoGenC, AutoGenH): # Collect Token Space GUIDs used by DynamicEc PCDs TokenSpaceList = [] for Pcd in Info.ModulePcdList: if Pcd.Type in gDynamicExPcd and Pcd.TokenSpaceGuidCName not in TokenSpaceList: TokenSpaceList += [Pcd.TokenSpaceGuidCName] # Add extern declarations to AutoGen.h if one or more Token Space GUIDs were found if TokenSpaceList <> []: AutoGenH.Append("\n// Definition of PCD Token Space GUIDs used in this module\n\n") if Info.ModuleType in ["USER_DEFINED", "BASE"]: GuidType = "GUID" else: GuidType = "EFI_GUID" for Item in TokenSpaceList: AutoGenH.Append('extern %s %s;\n' % (GuidType, Item)) if Info.IsLibrary: if Info.ModulePcdList: AutoGenH.Append("\n// PCD definitions\n") for Pcd in Info.ModulePcdList: CreateLibraryPcdCode(Info, AutoGenC, AutoGenH, Pcd) else: if Info.ModulePcdList: AutoGenH.Append("\n// Definition of PCDs used in this module\n") AutoGenC.Append("\n// Definition of PCDs used in this module\n") for Pcd in Info.ModulePcdList: CreateModulePcdCode(Info, AutoGenC, AutoGenH, Pcd) if Info.LibraryPcdList: AutoGenH.Append("\n// Definition of PCDs used in libraries is in AutoGen.c\n") AutoGenC.Append("\n// Definition of PCDs used in libraries\n") for Pcd in Info.LibraryPcdList: CreateModulePcdCode(Info, AutoGenC, AutoGenC, Pcd) CreatePcdDatabaseCode(Info, AutoGenC, AutoGenH) ## Create code for unicode string definition # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # @param UniGenCFlag UniString is generated into AutoGen C file when it is set to True # @param UniGenBinBuffer Buffer to store uni string package data # def CreateUnicodeStringCode(Info, AutoGenC, AutoGenH, UniGenCFlag, UniGenBinBuffer): WorkingDir = os.getcwd() os.chdir(Info.WorkspaceDir) IncList = [Info.MetaFile.Dir] # Get all files under [Sources] section in inf file for EDK-II module EDK2Module = True SrcList = [F for F in Info.SourceFileList] if Info.AutoGenVersion < 0x00010005: EDK2Module = False # Get all files under the module directory for EDK-I module Cwd = os.getcwd() os.chdir(Info.MetaFile.Dir) for Root, Dirs, Files in os.walk("."): if 'CVS' in Dirs: Dirs.remove('CVS') if '.svn' in Dirs: Dirs.remove('.svn') for File in Files: File = PathClass(os.path.join(Root, File), Info.MetaFile.Dir) if File in SrcList: continue SrcList.append(File) os.chdir(Cwd) if 'BUILD' in Info.BuildOption and Info.BuildOption['BUILD']['FLAGS'].find('-c') > -1: CompatibleMode = True else: CompatibleMode = False # # -s is a temporary option dedicated for building .UNI files with ISO 639-2 language codes of EDK Shell in EDK2 # if 'BUILD' in Info.BuildOption and Info.BuildOption['BUILD']['FLAGS'].find('-s') > -1: if CompatibleMode: EdkLogger.error("build", AUTOGEN_ERROR, "-c and -s build options should be used exclusively", ExtraData="[%s]" % str(Info)) ShellMode = True else: ShellMode = False #RFC4646 is only for EDKII modules and ISO639-2 for EDK modules if EDK2Module: FilterInfo = [EDK2Module] + [Info.PlatformInfo.Platform.RFCLanguages] else: FilterInfo = [EDK2Module] + [Info.PlatformInfo.Platform.ISOLanguages] Header, Code = GetStringFiles(Info.UnicodeFileList, SrcList, IncList, Info.IncludePathList, ['.uni', '.inf'], Info.Name, CompatibleMode, ShellMode, UniGenCFlag, UniGenBinBuffer, FilterInfo) if CompatibleMode or UniGenCFlag: AutoGenC.Append("\n//\n//Unicode String Pack Definition\n//\n") AutoGenC.Append(Code) AutoGenC.Append("\n") AutoGenH.Append("\n//\n//Unicode String ID\n//\n") AutoGenH.Append(Header) if CompatibleMode or UniGenCFlag: AutoGenH.Append("\n#define STRING_ARRAY_NAME %sStrings\n" % Info.Name) os.chdir(WorkingDir) ## Create common code # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # def CreateHeaderCode(Info, AutoGenC, AutoGenH): # file header AutoGenH.Append(gAutoGenHeaderString.Replace({'FileName':'AutoGen.h'})) # header file Prologue AutoGenH.Append(gAutoGenHPrologueString.Replace({'File':'AUTOGENH','Guid':Info.Guid.replace('-','_')})) AutoGenH.Append(gAutoGenHCppPrologueString) if Info.AutoGenVersion >= 0x00010005: # header files includes AutoGenH.Append("#include <%s>\n" % gBasicHeaderFile) if Info.ModuleType in gModuleTypeHeaderFile \ and gModuleTypeHeaderFile[Info.ModuleType][0] != gBasicHeaderFile: AutoGenH.Append("#include <%s>\n" % gModuleTypeHeaderFile[Info.ModuleType][0]) # # if either PcdLib in [LibraryClasses] sections or there exist Pcd section, add PcdLib.h # As if modules only uses FixedPcd, then PcdLib is not needed in [LibraryClasses] section. # if 'PcdLib' in Info.Module.LibraryClasses or Info.Module.Pcds: AutoGenH.Append("#include <Library/PcdLib.h>\n") AutoGenH.Append('\nextern GUID gEfiCallerIdGuid;\n\n') if Info.IsLibrary: return AutoGenH.Append("#define EFI_CALLER_ID_GUID \\\n %s\n" % GuidStringToGuidStructureString(Info.Guid)) if Info.IsLibrary: return # C file header AutoGenC.Append(gAutoGenHeaderString.Replace({'FileName':'AutoGen.c'})) if Info.AutoGenVersion >= 0x00010005: # C file header files includes if Info.ModuleType in gModuleTypeHeaderFile: for Inc in gModuleTypeHeaderFile[Info.ModuleType]: AutoGenC.Append("#include <%s>\n" % Inc) else: AutoGenC.Append("#include <%s>\n" % gBasicHeaderFile) # # Publish the CallerId Guid # AutoGenC.Append('\nGLOBAL_REMOVE_IF_UNREFERENCED GUID gEfiCallerIdGuid = %s;\n' % GuidStringToGuidStructureString(Info.Guid)) ## Create common code for header file # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # def CreateFooterCode(Info, AutoGenC, AutoGenH): AutoGenH.Append(gAutoGenHEpilogueString) ## Create code for a module # # @param Info The ModuleAutoGen object # @param AutoGenC The TemplateString object for C code # @param AutoGenH The TemplateString object for header file # @param UniGenCFlag UniString is generated into AutoGen C file when it is set to True # @param UniGenBinBuffer Buffer to store uni string package data # def CreateCode(Info, AutoGenC, AutoGenH, StringH, UniGenCFlag, UniGenBinBuffer): CreateHeaderCode(Info, AutoGenC, AutoGenH) if Info.AutoGenVersion >= 0x00010005: CreateGuidDefinitionCode(Info, AutoGenC, AutoGenH) CreateProtocolDefinitionCode(Info, AutoGenC, AutoGenH) CreatePpiDefinitionCode(Info, AutoGenC, AutoGenH) CreatePcdCode(Info, AutoGenC, AutoGenH) CreateLibraryConstructorCode(Info, AutoGenC, AutoGenH) CreateLibraryDestructorCode(Info, AutoGenC, AutoGenH) CreateModuleEntryPointCode(Info, AutoGenC, AutoGenH) CreateModuleUnloadImageCode(Info, AutoGenC, AutoGenH) if Info.UnicodeFileList: FileName = "%sStrDefs.h" % Info.Name StringH.Append(gAutoGenHeaderString.Replace({'FileName':FileName})) StringH.Append(gAutoGenHPrologueString.Replace({'File':'STRDEFS', 'Guid':Info.Guid.replace('-','_')})) CreateUnicodeStringCode(Info, AutoGenC, StringH, UniGenCFlag, UniGenBinBuffer) StringH.Append("\n#endif\n") AutoGenH.Append('#include "%s"\n' % FileName) CreateFooterCode(Info, AutoGenC, AutoGenH) # no generation of AutoGen.c for Edk modules without unicode file if Info.AutoGenVersion < 0x00010005 and len(Info.UnicodeFileList) == 0: AutoGenC.String = '' ## Create the code file # # @param FilePath The path of code file # @param Content The content of code file # @param IsBinaryFile The flag indicating if the file is binary file or not # # @retval True If file content is changed or file doesn't exist # @retval False If the file exists and the content is not changed # def Generate(FilePath, Content, IsBinaryFile): return SaveFileOnChange(FilePath, Content, IsBinaryFile)
codeparrot/github-code-clean
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """code generator for GL/GLES extension wrangler.""" import optparse import os import collections import re import sys GL_FUNCTIONS = [ { 'return_type': 'void', 'names': ['glActiveTexture'], 'arguments': 'GLenum texture', }, { 'return_type': 'void', 'names': ['glAttachShader'], 'arguments': 'GLuint program, GLuint shader', }, { 'return_type': 'void', 'names': ['glBeginQuery'], 'arguments': 'GLenum target, GLuint id', }, { 'return_type': 'void', 'names': ['glBeginQueryARB', 'glBeginQueryEXT'], 'arguments': 'GLenum target, GLuint id', }, { 'return_type': 'void', 'names': ['glBindAttribLocation'], 'arguments': 'GLuint program, GLuint index, const char* name', }, { 'return_type': 'void', 'names': ['glBindBuffer'], 'arguments': 'GLenum target, GLuint buffer', }, { 'return_type': 'void', 'names': ['glBindFragDataLocation'], 'arguments': 'GLuint program, GLuint colorNumber, const char* name', }, { 'return_type': 'void', 'names': ['glBindFragDataLocationIndexed'], 'arguments': 'GLuint program, GLuint colorNumber, GLuint index, const char* name', }, { 'return_type': 'void', 'names': ['glBindFramebufferEXT', 'glBindFramebuffer'], 'arguments': 'GLenum target, GLuint framebuffer', }, { 'return_type': 'void', 'names': ['glBindRenderbufferEXT', 'glBindRenderbuffer'], 'arguments': 'GLenum target, GLuint renderbuffer', }, { 'return_type': 'void', 'names': ['glBindTexture'], 'arguments': 'GLenum target, GLuint texture', }, { 'return_type': 'void', 'names': ['glBlendColor'], 'arguments': 'GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha', }, { 'return_type': 'void', 'names': ['glBlendEquation'], 'arguments': ' GLenum mode ', }, { 'return_type': 'void', 'names': ['glBlendEquationSeparate'], 'arguments': 'GLenum modeRGB, GLenum modeAlpha', }, { 'return_type': 'void', 'names': ['glBlendFunc'], 'arguments': 'GLenum sfactor, GLenum dfactor', }, { 'return_type': 'void', 'names': ['glBlendFuncSeparate'], 'arguments': 'GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha', }, { 'return_type': 'void', 'names': ['glBlitFramebuffer'], 'arguments': 'GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, ' 'GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, ' 'GLbitfield mask, GLenum filter', }, { 'return_type': 'void', 'names': ['glBlitFramebufferEXT', 'glBlitFramebuffer'], 'arguments': 'GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, ' 'GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, ' 'GLbitfield mask, GLenum filter', }, { 'return_type': 'void', 'names': ['glBlitFramebufferANGLE', 'glBlitFramebuffer'], 'arguments': 'GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, ' 'GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, ' 'GLbitfield mask, GLenum filter', }, { 'return_type': 'void', 'names': ['glBufferData'], 'arguments': 'GLenum target, GLsizei size, const void* data, GLenum usage', }, { 'return_type': 'void', 'names': ['glBufferSubData'], 'arguments': 'GLenum target, GLint offset, GLsizei size, const void* data', }, { 'return_type': 'GLenum', 'names': ['glCheckFramebufferStatusEXT', 'glCheckFramebufferStatus'], 'arguments': 'GLenum target', 'logging_code': """ GL_SERVICE_LOG("GL_RESULT: " << GLES2Util::GetStringEnum(result)); """, }, { 'return_type': 'void', 'names': ['glClear'], 'arguments': 'GLbitfield mask', }, { 'return_type': 'void', 'names': ['glClearColor'], 'arguments': 'GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha', }, { 'return_type': 'void', 'names': ['glClearDepth'], 'arguments': 'GLclampd depth', }, { 'return_type': 'void', 'names': ['glClearDepthf'], 'arguments': 'GLclampf depth', }, { 'return_type': 'void', 'names': ['glClearStencil'], 'arguments': 'GLint s', }, { 'return_type': 'void', 'names': ['glColorMask'], 'arguments': 'GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha', }, { 'return_type': 'void', 'names': ['glCompileShader'], 'arguments': 'GLuint shader', }, { 'return_type': 'void', 'names': ['glCompressedTexImage2D'], 'arguments': 'GLenum target, GLint level, GLenum internalformat, GLsizei width, ' 'GLsizei height, GLint border, GLsizei imageSize, const void* data', }, { 'return_type': 'void', 'names': ['glCompressedTexSubImage2D'], 'arguments': 'GLenum target, GLint level, GLint xoffset, GLint yoffset, ' 'GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, ' 'const void* data', }, { 'return_type': 'void', 'names': ['glCopyTexImage2D'], 'arguments': 'GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, ' 'GLsizei width, GLsizei height, GLint border', }, { 'return_type': 'void', 'names': ['glCopyTexSubImage2D'], 'arguments': 'GLenum target, GLint level, GLint xoffset, ' 'GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height', }, { 'return_type': 'GLuint', 'names': ['glCreateProgram'], 'arguments': 'void', }, { 'return_type': 'GLuint', 'names': ['glCreateShader'], 'arguments': 'GLenum type', }, { 'return_type': 'void', 'names': ['glCullFace'], 'arguments': 'GLenum mode', }, { 'return_type': 'void', 'names': ['glDeleteBuffersARB', 'glDeleteBuffers'], 'arguments': 'GLsizei n, const GLuint* buffers', }, { 'return_type': 'void', 'names': ['glDeleteFramebuffersEXT', 'glDeleteFramebuffers'], 'arguments': 'GLsizei n, const GLuint* framebuffers', }, { 'return_type': 'void', 'names': ['glDeleteProgram'], 'arguments': 'GLuint program', }, { 'return_type': 'void', 'names': ['glDeleteQueries'], 'arguments': 'GLsizei n, const GLuint* ids', }, { 'return_type': 'void', 'names': ['glDeleteQueriesARB', 'glDeleteQueriesEXT'], 'arguments': 'GLsizei n, const GLuint* ids', }, { 'return_type': 'void', 'names': ['glDeleteRenderbuffersEXT', 'glDeleteRenderbuffers'], 'arguments': 'GLsizei n, const GLuint* renderbuffers', }, { 'return_type': 'void', 'names': ['glDeleteShader'], 'arguments': 'GLuint shader', }, { 'return_type': 'void', 'names': ['glDeleteTextures'], 'arguments': 'GLsizei n, const GLuint* textures', }, { 'return_type': 'void', 'names': ['glDepthFunc'], 'arguments': 'GLenum func', }, { 'return_type': 'void', 'names': ['glDepthMask'], 'arguments': 'GLboolean flag', }, { 'return_type': 'void', 'names': ['glDepthRange'], 'arguments': 'GLclampd zNear, GLclampd zFar', }, { 'return_type': 'void', 'names': ['glDepthRangef'], 'arguments': 'GLclampf zNear, GLclampf zFar', }, { 'return_type': 'void', 'names': ['glDetachShader'], 'arguments': 'GLuint program, GLuint shader', }, { 'return_type': 'void', 'names': ['glDisable'], 'arguments': 'GLenum cap', }, { 'return_type': 'void', 'names': ['glDisableVertexAttribArray'], 'arguments': 'GLuint index', }, { 'return_type': 'void', 'names': ['glDrawArrays'], 'arguments': 'GLenum mode, GLint first, GLsizei count', }, { 'return_type': 'void', 'names': ['glDrawBuffer'], 'arguments': 'GLenum mode', }, { 'return_type': 'void', 'names': ['glDrawBuffersARB', 'glDrawBuffersEXT'], 'arguments': 'GLsizei n, const GLenum* bufs', }, { 'return_type': 'void', 'names': ['glDrawElements'], 'arguments': 'GLenum mode, GLsizei count, GLenum type, const void* indices', }, { 'return_type': 'void', 'names': ['glEGLImageTargetTexture2DOES'], 'arguments': 'GLenum target, GLeglImageOES image', }, { 'return_type': 'void', 'names': ['glEGLImageTargetRenderbufferStorageOES'], 'arguments': 'GLenum target, GLeglImageOES image', }, { 'return_type': 'void', 'names': ['glEnable'], 'arguments': 'GLenum cap', }, { 'return_type': 'void', 'names': ['glEnableVertexAttribArray'], 'arguments': 'GLuint index', }, { 'return_type': 'void', 'names': ['glEndQuery'], 'arguments': 'GLenum target', }, { 'return_type': 'void', 'names': ['glEndQueryARB', 'glEndQueryEXT'], 'arguments': 'GLenum target', }, { 'return_type': 'void', 'names': ['glFinish'], 'arguments': 'void', }, { 'return_type': 'void', 'names': ['glFlush'], 'arguments': 'void', }, { 'return_type': 'void', 'names': ['glFramebufferRenderbufferEXT', 'glFramebufferRenderbuffer'], 'arguments': \ 'GLenum target, GLenum attachment, GLenum renderbuffertarget, ' 'GLuint renderbuffer', }, { 'return_type': 'void', 'names': ['glFramebufferTexture2DEXT', 'glFramebufferTexture2D'], 'arguments': 'GLenum target, GLenum attachment, GLenum textarget, GLuint texture, ' 'GLint level', }, { 'return_type': 'void', 'names': ['glFramebufferTexture2DMultisampleEXT'], 'arguments': 'GLenum target, GLenum attachment, GLenum textarget, GLuint texture, ' 'GLint level, GLsizei samples', }, { 'return_type': 'void', 'names': ['glFramebufferTexture2DMultisampleIMG'], 'arguments': 'GLenum target, GLenum attachment, GLenum textarget, GLuint texture, ' 'GLint level, GLsizei samples', }, { 'return_type': 'void', 'names': ['glFrontFace'], 'arguments': 'GLenum mode', }, { 'return_type': 'void', 'names': ['glGenBuffersARB', 'glGenBuffers'], 'arguments': 'GLsizei n, GLuint* buffers', }, { 'return_type': 'void', 'names': ['glGenQueries'], 'arguments': 'GLsizei n, GLuint* ids', }, { 'return_type': 'void', 'names': ['glGenQueriesARB', 'glGenQueriesEXT'], 'arguments': 'GLsizei n, GLuint* ids', }, { 'return_type': 'void', 'names': ['glGenerateMipmapEXT', 'glGenerateMipmap'], 'arguments': 'GLenum target', }, { 'return_type': 'void', 'names': ['glGenFramebuffersEXT', 'glGenFramebuffers'], 'arguments': 'GLsizei n, GLuint* framebuffers', }, { 'return_type': 'void', 'names': ['glGenRenderbuffersEXT', 'glGenRenderbuffers'], 'arguments': 'GLsizei n, GLuint* renderbuffers', }, { 'return_type': 'void', 'names': ['glGenTextures'], 'arguments': 'GLsizei n, GLuint* textures', }, { 'return_type': 'void', 'names': ['glGetActiveAttrib'], 'arguments': 'GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, ' 'GLint* size, GLenum* type, char* name', }, { 'return_type': 'void', 'names': ['glGetActiveUniform'], 'arguments': 'GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, ' 'GLint* size, GLenum* type, char* name', }, { 'return_type': 'void', 'names': ['glGetAttachedShaders'], 'arguments': 'GLuint program, GLsizei maxcount, GLsizei* count, GLuint* shaders', }, { 'return_type': 'GLint', 'names': ['glGetAttribLocation'], 'arguments': 'GLuint program, const char* name', }, { 'return_type': 'void', 'names': ['glGetBooleanv'], 'arguments': 'GLenum pname, GLboolean* params', }, { 'return_type': 'void', 'names': ['glGetBufferParameteriv'], 'arguments': 'GLenum target, GLenum pname, GLint* params', }, { 'return_type': 'GLenum', 'names': ['glGetError'], 'arguments': 'void', 'logging_code': """ GL_SERVICE_LOG("GL_RESULT: " << GLES2Util::GetStringError(result)); """, }, { 'return_type': 'void', 'names': ['glGetFloatv'], 'arguments': 'GLenum pname, GLfloat* params', }, { 'return_type': 'void', 'names': ['glGetFramebufferAttachmentParameterivEXT', 'glGetFramebufferAttachmentParameteriv'], 'arguments': 'GLenum target, ' 'GLenum attachment, GLenum pname, GLint* params', }, { 'return_type': 'GLenum', 'names': ['glGetGraphicsResetStatusARB', 'glGetGraphicsResetStatusEXT'], 'arguments': 'void', }, { 'return_type': 'void', 'names': ['glGetIntegerv'], 'arguments': 'GLenum pname, GLint* params', }, { 'return_type': 'void', 'names': ['glGetProgramBinary', 'glGetProgramBinaryOES'], 'arguments': 'GLuint program, GLsizei bufSize, GLsizei* length, ' 'GLenum* binaryFormat, GLvoid* binary', 'other_extensions': ['ARB_get_program_binary', 'OES_get_program_binary'] }, { 'return_type': 'void', 'names': ['glGetProgramiv'], 'arguments': 'GLuint program, GLenum pname, GLint* params', }, { 'return_type': 'void', 'names': ['glGetProgramInfoLog'], 'arguments': 'GLuint program, GLsizei bufsize, GLsizei* length, char* infolog', }, { 'return_type': 'void', 'names': ['glGetQueryiv'], 'arguments': 'GLenum target, GLenum pname, GLint* params', }, { 'return_type': 'void', 'names': ['glGetQueryivARB', 'glGetQueryivEXT'], 'arguments': 'GLenum target, GLenum pname, GLint* params', }, { 'return_type': 'void', 'names': ['glGetQueryObjecti64v'], 'arguments': 'GLuint id, GLenum pname, GLint64* params', }, { 'return_type': 'void', 'names': ['glGetQueryObjectiv'], 'arguments': 'GLuint id, GLenum pname, GLint* params', }, { 'return_type': 'void', 'names': ['glGetQueryObjectui64v'], 'arguments': 'GLuint id, GLenum pname, GLuint64* params', }, { 'return_type': 'void', 'names': ['glGetQueryObjectuiv'], 'arguments': 'GLuint id, GLenum pname, GLuint* params', }, { 'return_type': 'void', 'names': ['glGetQueryObjectuivARB', 'glGetQueryObjectuivEXT'], 'arguments': 'GLuint id, GLenum pname, GLuint* params', }, { 'return_type': 'void', 'names': ['glGetRenderbufferParameterivEXT', 'glGetRenderbufferParameteriv'], 'arguments': 'GLenum target, GLenum pname, GLint* params', }, { 'return_type': 'void', 'names': ['glGetShaderiv'], 'arguments': 'GLuint shader, GLenum pname, GLint* params', }, { 'return_type': 'void', 'names': ['glGetShaderInfoLog'], 'arguments': 'GLuint shader, GLsizei bufsize, GLsizei* length, char* infolog', }, { 'return_type': 'void', 'names': ['glGetShaderPrecisionFormat'], 'arguments': 'GLenum shadertype, GLenum precisiontype, ' 'GLint* range, GLint* precision', }, { 'return_type': 'void', 'names': ['glGetShaderSource'], 'arguments': 'GLuint shader, GLsizei bufsize, GLsizei* length, char* source', }, { 'return_type': 'const GLubyte*', 'names': ['glGetString'], 'arguments': 'GLenum name', }, { 'return_type': 'void', 'names': ['glGetTexLevelParameterfv'], 'arguments': 'GLenum target, GLint level, GLenum pname, GLfloat* params', }, { 'return_type': 'void', 'names': ['glGetTexLevelParameteriv'], 'arguments': 'GLenum target, GLint level, GLenum pname, GLint* params', }, { 'return_type': 'void', 'names': ['glGetTexParameterfv'], 'arguments': 'GLenum target, GLenum pname, GLfloat* params', }, { 'return_type': 'void', 'names': ['glGetTexParameteriv'], 'arguments': 'GLenum target, GLenum pname, GLint* params', }, { 'return_type': 'void', 'names': ['glGetTranslatedShaderSourceANGLE'], 'arguments': 'GLuint shader, GLsizei bufsize, GLsizei* length, char* source', }, { 'return_type': 'void', 'names': ['glGetUniformfv'], 'arguments': 'GLuint program, GLint location, GLfloat* params', }, { 'return_type': 'void', 'names': ['glGetUniformiv'], 'arguments': 'GLuint program, GLint location, GLint* params', }, { 'return_type': 'GLint', 'names': ['glGetUniformLocation'], 'arguments': 'GLuint program, const char* name', }, { 'return_type': 'void', 'names': ['glGetVertexAttribfv'], 'arguments': 'GLuint index, GLenum pname, GLfloat* params', }, { 'return_type': 'void', 'names': ['glGetVertexAttribiv'], 'arguments': 'GLuint index, GLenum pname, GLint* params', }, { 'return_type': 'void', 'names': ['glGetVertexAttribPointerv'], 'arguments': 'GLuint index, GLenum pname, void** pointer', }, { 'return_type': 'void', 'names': ['glHint'], 'arguments': 'GLenum target, GLenum mode', }, { 'return_type': 'GLboolean', 'names': ['glIsBuffer'], 'arguments': 'GLuint buffer', }, { 'return_type': 'GLboolean', 'names': ['glIsEnabled'], 'arguments': 'GLenum cap', }, { 'return_type': 'GLboolean', 'names': ['glIsFramebufferEXT', 'glIsFramebuffer'], 'arguments': 'GLuint framebuffer', }, { 'return_type': 'GLboolean', 'names': ['glIsProgram'], 'arguments': 'GLuint program', }, { 'return_type': 'GLboolean', 'names': ['glIsQueryARB', 'glIsQueryEXT'], 'arguments': 'GLuint query', }, { 'return_type': 'GLboolean', 'names': ['glIsRenderbufferEXT', 'glIsRenderbuffer'], 'arguments': 'GLuint renderbuffer', }, { 'return_type': 'GLboolean', 'names': ['glIsShader'], 'arguments': 'GLuint shader', }, { 'return_type': 'GLboolean', 'names': ['glIsTexture'], 'arguments': 'GLuint texture', }, { 'return_type': 'void', 'names': ['glLineWidth'], 'arguments': 'GLfloat width', }, { 'return_type': 'void', 'names': ['glLinkProgram'], 'arguments': 'GLuint program', }, { 'return_type': 'void*', 'names': ['glMapBuffer', 'glMapBufferOES'], 'arguments': 'GLenum target, GLenum access', }, { 'return_type': 'void*', 'names': ['glMapBufferRange'], 'arguments': 'GLenum target, GLintptr offset, GLsizeiptr length, GLenum access', }, { 'return_type': 'void', 'names': ['glFlushMappedBufferRange'], 'arguments': 'GLenum target, GLintptr offset, GLsizeiptr length', }, { 'return_type': 'void', 'names': ['glPixelStorei'], 'arguments': 'GLenum pname, GLint param', }, { 'return_type': 'void', 'names': ['glPointParameteri'], 'arguments': 'GLenum pname, GLint param', }, { 'return_type': 'void', 'names': ['glPolygonOffset'], 'arguments': 'GLfloat factor, GLfloat units', }, { 'return_type': 'void', 'names': ['glProgramBinary', 'glProgramBinaryOES'], 'arguments': 'GLuint program, GLenum binaryFormat, ' 'const GLvoid* binary, GLsizei length', 'other_extensions': ['ARB_get_program_binary', 'OES_get_program_binary'] }, { 'return_type': 'void', 'names': ['glProgramParameteri'], 'arguments': 'GLuint program, GLenum pname, GLint value', 'other_extensions': ['ARB_get_program_binary'] }, { 'return_type': 'void', 'names': ['glQueryCounter'], 'arguments': 'GLuint id, GLenum target', }, { 'return_type': 'void', 'names': ['glReadBuffer'], 'arguments': 'GLenum src', }, { 'return_type': 'void', 'names': ['glReadPixels'], 'arguments': 'GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, ' 'GLenum type, void* pixels', }, { 'return_type': 'void', 'names': ['glReleaseShaderCompiler'], 'arguments': 'void', }, { 'return_type': 'void', 'names': ['glRenderbufferStorageMultisample'], 'arguments': 'GLenum target, GLsizei samples, GLenum internalformat, ' 'GLsizei width, GLsizei height', }, { 'return_type': 'void', 'names': ['glRenderbufferStorageMultisampleEXT', 'glRenderbufferStorageMultisample'], 'arguments': 'GLenum target, GLsizei samples, GLenum internalformat, ' 'GLsizei width, GLsizei height', }, { 'return_type': 'void', 'names': ['glRenderbufferStorageMultisampleANGLE', 'glRenderbufferStorageMultisample'], 'arguments': 'GLenum target, GLsizei samples, GLenum internalformat, ' 'GLsizei width, GLsizei height', }, { 'return_type': 'void', 'names': ['glRenderbufferStorageMultisampleIMG'], 'arguments': 'GLenum target, GLsizei samples, GLenum internalformat, ' 'GLsizei width, GLsizei height', }, { 'return_type': 'void', 'names': ['glRenderbufferStorageEXT', 'glRenderbufferStorage'], 'arguments': 'GLenum target, GLenum internalformat, GLsizei width, GLsizei height', }, { 'return_type': 'void', 'names': ['glSampleCoverage'], 'arguments': 'GLclampf value, GLboolean invert', }, { 'return_type': 'void', 'names': ['glScissor'], 'arguments': 'GLint x, GLint y, GLsizei width, GLsizei height', }, { 'return_type': 'void', 'names': ['glShaderBinary'], 'arguments': 'GLsizei n, const GLuint* shaders, GLenum binaryformat, ' 'const void* binary, GLsizei length', }, { 'return_type': 'void', 'names': ['glShaderSource'], 'arguments': 'GLuint shader, GLsizei count, const char* const* str, const GLint* length', 'logging_code': """ GL_SERVICE_LOG_CODE_BLOCK({ for (GLsizei ii = 0; ii < count; ++ii) { if (str[ii]) { if (length && length[ii] >= 0) { std::string source(str[ii], length[ii]); GL_SERVICE_LOG(" " << ii << ": ---\\n" << source << "\\n---"); } else { GL_SERVICE_LOG(" " << ii << ": ---\\n" << str[ii] << "\\n---"); } } else { GL_SERVICE_LOG(" " << ii << ": NULL"); } } }); """, }, { 'return_type': 'void', 'names': ['glStencilFunc'], 'arguments': 'GLenum func, GLint ref, GLuint mask', }, { 'return_type': 'void', 'names': ['glStencilFuncSeparate'], 'arguments': 'GLenum face, GLenum func, GLint ref, GLuint mask', }, { 'return_type': 'void', 'names': ['glStencilMask'], 'arguments': 'GLuint mask', }, { 'return_type': 'void', 'names': ['glStencilMaskSeparate'], 'arguments': 'GLenum face, GLuint mask', }, { 'return_type': 'void', 'names': ['glStencilOp'], 'arguments': 'GLenum fail, GLenum zfail, GLenum zpass', }, { 'return_type': 'void', 'names': ['glStencilOpSeparate'], 'arguments': 'GLenum face, GLenum fail, GLenum zfail, GLenum zpass', }, { 'return_type': 'void', 'names': ['glTexImage2D'], 'arguments': 'GLenum target, GLint level, GLint internalformat, GLsizei width, ' 'GLsizei height, GLint border, GLenum format, GLenum type, ' 'const void* pixels', }, { 'return_type': 'void', 'names': ['glTexParameterf'], 'arguments': 'GLenum target, GLenum pname, GLfloat param', }, { 'return_type': 'void', 'names': ['glTexParameterfv'], 'arguments': 'GLenum target, GLenum pname, const GLfloat* params', }, { 'return_type': 'void', 'names': ['glTexParameteri'], 'arguments': 'GLenum target, GLenum pname, GLint param', }, { 'return_type': 'void', 'names': ['glTexParameteriv'], 'arguments': 'GLenum target, GLenum pname, const GLint* params', }, { 'return_type': 'void', 'names': ['glTexStorage2DEXT'], 'arguments': 'GLenum target, GLsizei levels, GLenum internalformat, ' 'GLsizei width, GLsizei height', }, { 'return_type': 'void', 'names': ['glTexSubImage2D'], 'arguments': 'GLenum target, GLint level, GLint xoffset, GLint yoffset, ' 'GLsizei width, GLsizei height, GLenum format, GLenum type, ' 'const void* pixels', }, { 'return_type': 'void', 'names': ['glUniform1f'], 'arguments': 'GLint location, GLfloat x', }, { 'return_type': 'void', 'names': ['glUniform1fv'], 'arguments': 'GLint location, GLsizei count, const GLfloat* v', }, { 'return_type': 'void', 'names': ['glUniform1i'], 'arguments': 'GLint location, GLint x', }, { 'return_type': 'void', 'names': ['glUniform1iv'], 'arguments': 'GLint location, GLsizei count, const GLint* v', }, { 'return_type': 'void', 'names': ['glUniform2f'], 'arguments': 'GLint location, GLfloat x, GLfloat y', }, { 'return_type': 'void', 'names': ['glUniform2fv'], 'arguments': 'GLint location, GLsizei count, const GLfloat* v', }, { 'return_type': 'void', 'names': ['glUniform2i'], 'arguments': 'GLint location, GLint x, GLint y', }, { 'return_type': 'void', 'names': ['glUniform2iv'], 'arguments': 'GLint location, GLsizei count, const GLint* v', }, { 'return_type': 'void', 'names': ['glUniform3f'], 'arguments': 'GLint location, GLfloat x, GLfloat y, GLfloat z', }, { 'return_type': 'void', 'names': ['glUniform3fv'], 'arguments': 'GLint location, GLsizei count, const GLfloat* v', }, { 'return_type': 'void', 'names': ['glUniform3i'], 'arguments': 'GLint location, GLint x, GLint y, GLint z', }, { 'return_type': 'void', 'names': ['glUniform3iv'], 'arguments': 'GLint location, GLsizei count, const GLint* v', }, { 'return_type': 'void', 'names': ['glUniform4f'], 'arguments': 'GLint location, GLfloat x, GLfloat y, GLfloat z, GLfloat w', }, { 'return_type': 'void', 'names': ['glUniform4fv'], 'arguments': 'GLint location, GLsizei count, const GLfloat* v', }, { 'return_type': 'void', 'names': ['glUniform4i'], 'arguments': 'GLint location, GLint x, GLint y, GLint z, GLint w', }, { 'return_type': 'void', 'names': ['glUniform4iv'], 'arguments': 'GLint location, GLsizei count, const GLint* v', }, { 'return_type': 'void', 'names': ['glUniformMatrix2fv'], 'arguments': 'GLint location, GLsizei count, ' 'GLboolean transpose, const GLfloat* value', }, { 'return_type': 'void', 'names': ['glUniformMatrix3fv'], 'arguments': 'GLint location, GLsizei count, ' 'GLboolean transpose, const GLfloat* value', }, { 'return_type': 'void', 'names': ['glUniformMatrix4fv'], 'arguments': 'GLint location, GLsizei count, ' 'GLboolean transpose, const GLfloat* value', }, { 'return_type': 'GLboolean', 'names': ['glUnmapBuffer', 'glUnmapBufferOES'], 'arguments': 'GLenum target', }, { 'return_type': 'void', 'names': ['glUseProgram'], 'arguments': 'GLuint program', }, { 'return_type': 'void', 'names': ['glValidateProgram'], 'arguments': 'GLuint program', }, { 'return_type': 'void', 'names': ['glVertexAttrib1f'], 'arguments': 'GLuint indx, GLfloat x', }, { 'return_type': 'void', 'names': ['glVertexAttrib1fv'], 'arguments': 'GLuint indx, const GLfloat* values', }, { 'return_type': 'void', 'names': ['glVertexAttrib2f'], 'arguments': 'GLuint indx, GLfloat x, GLfloat y', }, { 'return_type': 'void', 'names': ['glVertexAttrib2fv'], 'arguments': 'GLuint indx, const GLfloat* values', }, { 'return_type': 'void', 'names': ['glVertexAttrib3f'], 'arguments': 'GLuint indx, GLfloat x, GLfloat y, GLfloat z', }, { 'return_type': 'void', 'names': ['glVertexAttrib3fv'], 'arguments': 'GLuint indx, const GLfloat* values', }, { 'return_type': 'void', 'names': ['glVertexAttrib4f'], 'arguments': 'GLuint indx, GLfloat x, GLfloat y, GLfloat z, GLfloat w', }, { 'return_type': 'void', 'names': ['glVertexAttrib4fv'], 'arguments': 'GLuint indx, const GLfloat* values', }, { 'return_type': 'void', 'names': ['glVertexAttribPointer'], 'arguments': 'GLuint indx, GLint size, GLenum type, GLboolean normalized, ' 'GLsizei stride, const void* ptr', }, { 'return_type': 'void', 'names': ['glViewport'], 'arguments': 'GLint x, GLint y, GLsizei width, GLsizei height', }, { 'return_type': 'void', 'names': ['glGenFencesNV'], 'arguments': 'GLsizei n, GLuint* fences', }, { 'return_type': 'void', 'names': ['glDeleteFencesNV'], 'arguments': 'GLsizei n, const GLuint* fences', }, { 'return_type': 'void', 'names': ['glSetFenceNV'], 'arguments': 'GLuint fence, GLenum condition', }, { 'return_type': 'GLboolean', 'names': ['glTestFenceNV'], 'arguments': 'GLuint fence', }, { 'return_type': 'void', 'names': ['glFinishFenceNV'], 'arguments': 'GLuint fence', }, { 'return_type': 'GLboolean', 'names': ['glIsFenceNV'], 'arguments': 'GLuint fence', }, { 'return_type': 'void', 'names': ['glGetFenceivNV'], 'arguments': 'GLuint fence, GLenum pname, GLint* params', }, { 'return_type': 'GLsync', 'names': ['glFenceSync'], 'arguments': 'GLenum condition, GLbitfield flags', }, { 'return_type': 'void', 'names': ['glDeleteSync'], 'arguments': 'GLsync sync', }, { 'return_type': 'void', 'names': ['glGetSynciv'], 'arguments': 'GLsync sync, GLenum pname, GLsizei bufSize, GLsizei* length,' 'GLint* values', }, { 'return_type': 'GLenum', 'names': ['glClientWaitSync'], 'arguments': 'GLsync sync, GLbitfield flags, GLuint64 timeout', }, { 'return_type': 'GLenum', 'names': ['glWaitSync'], 'arguments': 'GLsync sync, GLbitfield flags, GLuint64 timeout', }, { 'return_type': 'void', 'names': ['glDrawArraysInstancedANGLE', 'glDrawArraysInstancedARB'], 'arguments': 'GLenum mode, GLint first, GLsizei count, GLsizei primcount', }, { 'return_type': 'void', 'names': ['glDrawElementsInstancedANGLE', 'glDrawElementsInstancedARB'], 'arguments': 'GLenum mode, GLsizei count, GLenum type, const void* indices, ' 'GLsizei primcount', }, { 'return_type': 'void', 'names': ['glVertexAttribDivisorANGLE', 'glVertexAttribDivisorARB'], 'arguments': 'GLuint index, GLuint divisor', }, { 'return_type': 'void', 'names': ['glGenVertexArraysOES', 'glGenVertexArraysAPPLE', 'glGenVertexArrays'], 'arguments': 'GLsizei n, GLuint* arrays', 'other_extensions': ['OES_vertex_array_object', 'APPLE_vertex_array_object', 'ARB_vertex_array_object'] }, { 'return_type': 'void', 'names': ['glDeleteVertexArraysOES', 'glDeleteVertexArraysAPPLE', 'glDeleteVertexArrays'], 'arguments': 'GLsizei n, const GLuint* arrays', 'other_extensions': ['OES_vertex_array_object', 'APPLE_vertex_array_object', 'ARB_vertex_array_object'] }, { 'return_type': 'void', 'names': ['glBindVertexArrayOES', 'glBindVertexArrayAPPLE', 'glBindVertexArray'], 'arguments': 'GLuint array', 'other_extensions': ['OES_vertex_array_object', 'APPLE_vertex_array_object', 'ARB_vertex_array_object'] }, { 'return_type': 'GLboolean', 'names': ['glIsVertexArrayOES', 'glIsVertexArrayAPPLE', 'glIsVertexArray'], 'arguments': 'GLuint array', 'other_extensions': ['OES_vertex_array_object', 'APPLE_vertex_array_object', 'ARB_vertex_array_object'] }, { 'return_type': 'void', 'names': ['glDiscardFramebufferEXT', 'glInvalidateFramebuffer'], 'arguments': 'GLenum target, GLsizei numAttachments, ' 'const GLenum* attachments' }, ] OSMESA_FUNCTIONS = [ { 'return_type': 'OSMesaContext', 'names': ['OSMesaCreateContext'], 'arguments': 'GLenum format, OSMesaContext sharelist', }, { 'return_type': 'OSMesaContext', 'names': ['OSMesaCreateContextExt'], 'arguments': 'GLenum format, GLint depthBits, GLint stencilBits, GLint accumBits, ' 'OSMesaContext sharelist', }, { 'return_type': 'void', 'names': ['OSMesaDestroyContext'], 'arguments': 'OSMesaContext ctx', }, { 'return_type': 'GLboolean', 'names': ['OSMesaMakeCurrent'], 'arguments': 'OSMesaContext ctx, void* buffer, GLenum type, GLsizei width, ' 'GLsizei height', }, { 'return_type': 'OSMesaContext', 'names': ['OSMesaGetCurrentContext'], 'arguments': 'void', }, { 'return_type': 'void', 'names': ['OSMesaPixelStore'], 'arguments': 'GLint pname, GLint value', }, { 'return_type': 'void', 'names': ['OSMesaGetIntegerv'], 'arguments': 'GLint pname, GLint* value', }, { 'return_type': 'GLboolean', 'names': ['OSMesaGetDepthBuffer'], 'arguments': 'OSMesaContext c, GLint* width, GLint* height, GLint* bytesPerValue, ' 'void** buffer', }, { 'return_type': 'GLboolean', 'names': ['OSMesaGetColorBuffer'], 'arguments': 'OSMesaContext c, GLint* width, GLint* height, GLint* format, ' 'void** buffer', }, { 'return_type': 'OSMESAproc', 'names': ['OSMesaGetProcAddress'], 'arguments': 'const char* funcName', }, { 'return_type': 'void', 'names': ['OSMesaColorClamp'], 'arguments': 'GLboolean enable', }, ] EGL_FUNCTIONS = [ { 'return_type': 'EGLint', 'names': ['eglGetError'], 'arguments': 'void', }, { 'return_type': 'EGLDisplay', 'names': ['eglGetDisplay'], 'arguments': 'EGLNativeDisplayType display_id', }, { 'return_type': 'EGLBoolean', 'names': ['eglInitialize'], 'arguments': 'EGLDisplay dpy, EGLint* major, EGLint* minor', }, { 'return_type': 'EGLBoolean', 'names': ['eglTerminate'], 'arguments': 'EGLDisplay dpy', }, { 'return_type': 'const char*', 'names': ['eglQueryString'], 'arguments': 'EGLDisplay dpy, EGLint name', }, { 'return_type': 'EGLBoolean', 'names': ['eglGetConfigs'], 'arguments': 'EGLDisplay dpy, EGLConfig* configs, EGLint config_size, ' 'EGLint* num_config', }, { 'return_type': 'EGLBoolean', 'names': ['eglChooseConfig'], 'arguments': 'EGLDisplay dpy, const EGLint* attrib_list, EGLConfig* configs, ' 'EGLint config_size, EGLint* num_config', }, { 'return_type': 'EGLBoolean', 'names': ['eglGetConfigAttrib'], 'arguments': 'EGLDisplay dpy, EGLConfig config, EGLint attribute, EGLint* value', }, { 'return_type': 'EGLImageKHR', 'names': ['eglCreateImageKHR'], 'arguments': 'EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, ' 'const EGLint* attrib_list', 'other_extensions': ['EGL_KHR_image_base', 'EGL_KHR_gl_texture_2D_image'] }, { 'return_type': 'EGLBoolean', 'names': ['eglDestroyImageKHR'], 'arguments': 'EGLDisplay dpy, EGLImageKHR image', 'other_extensions': ['EGL_KHR_image_base'] }, { 'return_type': 'EGLSurface', 'names': ['eglCreateWindowSurface'], 'arguments': 'EGLDisplay dpy, EGLConfig config, EGLNativeWindowType win, ' 'const EGLint* attrib_list', }, { 'return_type': 'EGLSurface', 'names': ['eglCreatePbufferSurface'], 'arguments': 'EGLDisplay dpy, EGLConfig config, const EGLint* attrib_list', }, { 'return_type': 'EGLSurface', 'names': ['eglCreatePixmapSurface'], 'arguments': 'EGLDisplay dpy, EGLConfig config, EGLNativePixmapType pixmap, ' 'const EGLint* attrib_list', }, { 'return_type': 'EGLBoolean', 'names': ['eglDestroySurface'], 'arguments': 'EGLDisplay dpy, EGLSurface surface', }, { 'return_type': 'EGLBoolean', 'names': ['eglQuerySurface'], 'arguments': 'EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint* value', }, { 'return_type': 'EGLBoolean', 'names': ['eglBindAPI'], 'arguments': 'EGLenum api', }, { 'return_type': 'EGLenum', 'names': ['eglQueryAPI'], 'arguments': 'void', }, { 'return_type': 'EGLBoolean', 'names': ['eglWaitClient'], 'arguments': 'void', }, { 'return_type': 'EGLBoolean', 'names': ['eglReleaseThread'], 'arguments': 'void', }, { 'return_type': 'EGLSurface', 'names': ['eglCreatePbufferFromClientBuffer'], 'arguments': 'EGLDisplay dpy, EGLenum buftype, void* buffer, EGLConfig config, ' 'const EGLint* attrib_list', }, { 'return_type': 'EGLBoolean', 'names': ['eglSurfaceAttrib'], 'arguments': 'EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint value', }, { 'return_type': 'EGLBoolean', 'names': ['eglBindTexImage'], 'arguments': 'EGLDisplay dpy, EGLSurface surface, EGLint buffer', }, { 'return_type': 'EGLBoolean', 'names': ['eglReleaseTexImage'], 'arguments': 'EGLDisplay dpy, EGLSurface surface, EGLint buffer', }, { 'return_type': 'EGLBoolean', 'names': ['eglSwapInterval'], 'arguments': 'EGLDisplay dpy, EGLint interval', }, { 'return_type': 'EGLContext', 'names': ['eglCreateContext'], 'arguments': 'EGLDisplay dpy, EGLConfig config, EGLContext share_context, ' 'const EGLint* attrib_list', }, { 'return_type': 'EGLBoolean', 'names': ['eglDestroyContext'], 'arguments': 'EGLDisplay dpy, EGLContext ctx', }, { 'return_type': 'EGLBoolean', 'names': ['eglMakeCurrent'], 'arguments': 'EGLDisplay dpy, EGLSurface draw, EGLSurface read, EGLContext ctx', }, { 'return_type': 'EGLContext', 'names': ['eglGetCurrentContext'], 'arguments': 'void', }, { 'return_type': 'EGLSurface', 'names': ['eglGetCurrentSurface'], 'arguments': 'EGLint readdraw', }, { 'return_type': 'EGLDisplay', 'names': ['eglGetCurrentDisplay'], 'arguments': 'void', }, { 'return_type': 'EGLBoolean', 'names': ['eglQueryContext'], 'arguments': 'EGLDisplay dpy, EGLContext ctx, EGLint attribute, EGLint* value', }, { 'return_type': 'EGLBoolean', 'names': ['eglWaitGL'], 'arguments': 'void', }, { 'return_type': 'EGLBoolean', 'names': ['eglWaitNative'], 'arguments': 'EGLint engine', }, { 'return_type': 'EGLBoolean', 'names': ['eglSwapBuffers'], 'arguments': 'EGLDisplay dpy, EGLSurface surface', }, { 'return_type': 'EGLBoolean', 'names': ['eglCopyBuffers'], 'arguments': 'EGLDisplay dpy, EGLSurface surface, EGLNativePixmapType target', }, { 'return_type': '__eglMustCastToProperFunctionPointerType', 'names': ['eglGetProcAddress'], 'arguments': 'const char* procname', }, { 'return_type': 'EGLBoolean', 'names': ['eglPostSubBufferNV'], 'arguments': 'EGLDisplay dpy, EGLSurface surface, ' 'EGLint x, EGLint y, EGLint width, EGLint height', }, { 'return_type': 'EGLBoolean', 'names': ['eglQuerySurfacePointerANGLE'], 'arguments': 'EGLDisplay dpy, EGLSurface surface, EGLint attribute, void** value', }, { 'return_type': 'EGLSyncKHR', 'names': ['eglCreateSyncKHR'], 'arguments': 'EGLDisplay dpy, EGLenum type, const EGLint* attrib_list', 'other_extensions': ['EGL_KHR_fence_sync'] }, { 'return_type': 'EGLint', 'names': ['eglClientWaitSyncKHR'], 'arguments': 'EGLDisplay dpy, EGLSyncKHR sync, EGLint flags, ' 'EGLTimeKHR timeout', 'other_extensions': ['EGL_KHR_fence_sync'] }, { 'return_type': 'EGLBoolean', 'names': ['eglGetSyncAttribKHR'], 'arguments': 'EGLDisplay dpy, EGLSyncKHR sync, EGLint attribute, ' 'EGLint* value', 'other_extensions': ['EGL_KHR_fence_sync'] }, { 'return_type': 'EGLBoolean', 'names': ['eglDestroySyncKHR'], 'arguments': 'EGLDisplay dpy, EGLSyncKHR sync', 'other_extensions': ['EGL_KHR_fence_sync'] }, { 'return_type': 'EGLBoolean', 'names': ['eglGetSyncValuesCHROMIUM'], 'arguments': 'EGLDisplay dpy, EGLSurface surface, ' 'EGLuint64CHROMIUM* ust, EGLuint64CHROMIUM* msc, ' 'EGLuint64CHROMIUM* sbc', }, { 'return_type': 'EGLint', 'names': ['eglWaitSyncKHR'], 'arguments': 'EGLDisplay dpy, EGLSyncKHR sync, EGLint flags', 'other_extensions': ['EGL_KHR_wait_sync'] }, ] WGL_FUNCTIONS = [ { 'return_type': 'HGLRC', 'names': ['wglCreateContext'], 'arguments': 'HDC hdc', }, { 'return_type': 'HGLRC', 'names': ['wglCreateLayerContext'], 'arguments': 'HDC hdc, int iLayerPlane', }, { 'return_type': 'BOOL', 'names': ['wglCopyContext'], 'arguments': 'HGLRC hglrcSrc, HGLRC hglrcDst, UINT mask', }, { 'return_type': 'BOOL', 'names': ['wglDeleteContext'], 'arguments': 'HGLRC hglrc', }, { 'return_type': 'HGLRC', 'names': ['wglGetCurrentContext'], 'arguments': '', }, { 'return_type': 'HDC', 'names': ['wglGetCurrentDC'], 'arguments': '', }, { 'return_type': 'BOOL', 'names': ['wglMakeCurrent'], 'arguments': 'HDC hdc, HGLRC hglrc', }, { 'return_type': 'BOOL', 'names': ['wglShareLists'], 'arguments': 'HGLRC hglrc1, HGLRC hglrc2', }, { 'return_type': 'BOOL', 'names': ['wglSwapIntervalEXT'], 'arguments': 'int interval', }, { 'return_type': 'BOOL', 'names': ['wglSwapLayerBuffers'], 'arguments': 'HDC hdc, UINT fuPlanes', }, { 'return_type': 'const char*', 'names': ['wglGetExtensionsStringARB'], 'arguments': 'HDC hDC', }, { 'return_type': 'const char*', 'names': ['wglGetExtensionsStringEXT'], 'arguments': '', }, { 'return_type': 'BOOL', 'names': ['wglChoosePixelFormatARB'], 'arguments': 'HDC dc, const int* int_attrib_list, const float* float_attrib_list, ' 'UINT max_formats, int* formats, UINT* num_formats', }, { 'return_type': 'HPBUFFERARB', 'names': ['wglCreatePbufferARB'], 'arguments': 'HDC hDC, int iPixelFormat, int iWidth, int iHeight, ' 'const int* piAttribList', }, { 'return_type': 'HDC', 'names': ['wglGetPbufferDCARB'], 'arguments': 'HPBUFFERARB hPbuffer', }, { 'return_type': 'int', 'names': ['wglReleasePbufferDCARB'], 'arguments': 'HPBUFFERARB hPbuffer, HDC hDC', }, { 'return_type': 'BOOL', 'names': ['wglDestroyPbufferARB'], 'arguments': 'HPBUFFERARB hPbuffer', }, { 'return_type': 'BOOL', 'names': ['wglQueryPbufferARB'], 'arguments': 'HPBUFFERARB hPbuffer, int iAttribute, int* piValue', }, ] GLX_FUNCTIONS = [ { 'return_type': 'int', 'names': ['glXWaitVideoSyncSGI'], 'arguments': 'int divisor, int remainder, unsigned int* count', }, { 'return_type': 'XVisualInfo*', 'names': ['glXChooseVisual'], 'arguments': 'Display* dpy, int screen, int* attribList', }, { 'return_type': 'void', 'names': ['glXCopySubBufferMESA'], 'arguments': 'Display* dpy, GLXDrawable drawable, ' 'int x, int y, int width, int height', }, { 'return_type': 'GLXContext', 'names': ['glXCreateContext'], 'arguments': 'Display* dpy, XVisualInfo* vis, GLXContext shareList, int direct', }, { 'return_type': 'void', 'names': ['glXBindTexImageEXT'], 'arguments': 'Display* dpy, GLXDrawable drawable, int buffer, int* attribList', }, { 'return_type': 'void', 'names': ['glXReleaseTexImageEXT'], 'arguments': 'Display* dpy, GLXDrawable drawable, int buffer', }, { 'return_type': 'void', 'names': ['glXDestroyContext'], 'arguments': 'Display* dpy, GLXContext ctx', }, { 'return_type': 'int', 'names': ['glXMakeCurrent'], 'arguments': 'Display* dpy, GLXDrawable drawable, GLXContext ctx', }, { 'return_type': 'void', 'names': ['glXCopyContext'], 'arguments': 'Display* dpy, GLXContext src, GLXContext dst, unsigned long mask', }, { 'return_type': 'void', 'names': ['glXSwapBuffers'], 'arguments': 'Display* dpy, GLXDrawable drawable', }, { 'return_type': 'GLXPixmap', 'names': ['glXCreateGLXPixmap'], 'arguments': 'Display* dpy, XVisualInfo* visual, Pixmap pixmap', }, { 'return_type': 'void', 'names': ['glXDestroyGLXPixmap'], 'arguments': 'Display* dpy, GLXPixmap pixmap', }, { 'return_type': 'int', 'names': ['glXQueryExtension'], 'arguments': 'Display* dpy, int* errorb, int* event', }, { 'return_type': 'int', 'names': ['glXQueryVersion'], 'arguments': 'Display* dpy, int* maj, int* min', }, { 'return_type': 'int', 'names': ['glXIsDirect'], 'arguments': 'Display* dpy, GLXContext ctx', }, { 'return_type': 'int', 'names': ['glXGetConfig'], 'arguments': 'Display* dpy, XVisualInfo* visual, int attrib, int* value', }, { 'return_type': 'GLXContext', 'names': ['glXGetCurrentContext'], 'arguments': 'void', }, { 'return_type': 'GLXDrawable', 'names': ['glXGetCurrentDrawable'], 'arguments': 'void', }, { 'return_type': 'void', 'names': ['glXWaitGL'], 'arguments': 'void', }, { 'return_type': 'void', 'names': ['glXWaitX'], 'arguments': 'void', }, { 'return_type': 'void', 'names': ['glXUseXFont'], 'arguments': 'Font font, int first, int count, int list', }, { 'return_type': 'const char*', 'names': ['glXQueryExtensionsString'], 'arguments': 'Display* dpy, int screen', }, { 'return_type': 'const char*', 'names': ['glXQueryServerString'], 'arguments': 'Display* dpy, int screen, int name', }, { 'return_type': 'const char*', 'names': ['glXGetClientString'], 'arguments': 'Display* dpy, int name', }, { 'return_type': 'Display*', 'names': ['glXGetCurrentDisplay'], 'arguments': 'void', }, { 'return_type': 'GLXFBConfig*', 'names': ['glXChooseFBConfig'], 'arguments': 'Display* dpy, int screen, const int* attribList, int* nitems', }, { 'return_type': 'int', 'names': ['glXGetFBConfigAttrib'], 'arguments': 'Display* dpy, GLXFBConfig config, int attribute, int* value', }, { 'return_type': 'GLXFBConfig*', 'names': ['glXGetFBConfigs'], 'arguments': 'Display* dpy, int screen, int* nelements', }, { 'return_type': 'XVisualInfo*', 'names': ['glXGetVisualFromFBConfig'], 'arguments': 'Display* dpy, GLXFBConfig config', }, { 'return_type': 'GLXWindow', 'names': ['glXCreateWindow'], 'arguments': 'Display* dpy, GLXFBConfig config, Window win, const int* attribList', }, { 'return_type': 'void', 'names': ['glXDestroyWindow'], 'arguments': 'Display* dpy, GLXWindow window', }, { 'return_type': 'GLXPixmap', 'names': ['glXCreatePixmap'], 'arguments': 'Display* dpy, GLXFBConfig config, ' 'Pixmap pixmap, const int* attribList', }, { 'return_type': 'void', 'names': ['glXDestroyPixmap'], 'arguments': 'Display* dpy, GLXPixmap pixmap', }, { 'return_type': 'GLXPbuffer', 'names': ['glXCreatePbuffer'], 'arguments': 'Display* dpy, GLXFBConfig config, const int* attribList', }, { 'return_type': 'void', 'names': ['glXDestroyPbuffer'], 'arguments': 'Display* dpy, GLXPbuffer pbuf', }, { 'return_type': 'void', 'names': ['glXQueryDrawable'], 'arguments': 'Display* dpy, GLXDrawable draw, int attribute, unsigned int* value', }, { 'return_type': 'GLXContext', 'names': ['glXCreateNewContext'], 'arguments': 'Display* dpy, GLXFBConfig config, int renderType, ' 'GLXContext shareList, int direct', }, { 'return_type': 'int', 'names': ['glXMakeContextCurrent'], 'arguments': 'Display* dpy, GLXDrawable draw, GLXDrawable read, GLXContext ctx', }, { 'return_type': 'GLXDrawable', 'names': ['glXGetCurrentReadDrawable'], 'arguments': 'void', }, { 'return_type': 'int', 'names': ['glXQueryContext'], 'arguments': 'Display* dpy, GLXContext ctx, int attribute, int* value', }, { 'return_type': 'void', 'names': ['glXSelectEvent'], 'arguments': 'Display* dpy, GLXDrawable drawable, unsigned long mask', }, { 'return_type': 'void', 'names': ['glXGetSelectedEvent'], 'arguments': 'Display* dpy, GLXDrawable drawable, unsigned long* mask', }, { 'return_type': 'void', 'names': ['glXSwapIntervalMESA'], 'arguments': 'unsigned int interval', }, { 'return_type': 'void', 'names': ['glXSwapIntervalEXT'], 'arguments': 'Display* dpy, GLXDrawable drawable, int interval', }, { 'return_type': 'GLXFBConfig', 'names': ['glXGetFBConfigFromVisualSGIX'], 'arguments': 'Display* dpy, XVisualInfo* visualInfo', }, { 'return_type': 'GLXContext', 'names': ['glXCreateContextAttribsARB'], 'arguments': 'Display* dpy, GLXFBConfig config, GLXContext share_context, int direct, ' 'const int* attrib_list', }, { 'return_type': 'bool', 'names': ['glXGetSyncValuesOML'], 'arguments': 'Display* dpy, GLXDrawable drawable, int64* ust, int64* msc, ' 'int64* sbc' }, { 'return_type': 'bool', 'names': ['glXGetMscRateOML'], 'arguments': 'Display* dpy, GLXDrawable drawable, int32* numerator, ' 'int32* denominator' }, ] FUNCTION_SETS = [ [GL_FUNCTIONS, 'gl', [ 'GL/glext.h', 'GLES2/gl2ext.h', # Files below are Chromium-specific and shipped with Chromium sources. 'GL/glextchromium.h', 'GLES2/gl2chromium.h', 'GLES2/gl2extchromium.h' ], []], [OSMESA_FUNCTIONS, 'osmesa', [], []], [EGL_FUNCTIONS, 'egl', [ 'EGL/eglext.h', # Files below are Chromium-specific and shipped with Chromium sources. 'EGL/eglextchromium.h', ], [ 'EGL_ANGLE_d3d_share_handle_client_buffer', 'EGL_ANGLE_surface_d3d_texture_2d_share_handle', ], ], [WGL_FUNCTIONS, 'wgl', ['GL/wglext.h'], []], [GLX_FUNCTIONS, 'glx', ['GL/glx.h', 'GL/glxext.h'], []], ] def GenerateHeader(file, functions, set_name, used_extension_functions): """Generates gl_bindings_autogen_x.h""" # Write file header. file.write( """// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file is automatically generated. #ifndef UI_GFX_GL_GL_BINDINGS_AUTOGEN_%(name)s_H_ #define UI_GFX_GL_GL_BINDINGS_AUTOGEN_%(name)s_H_ namespace gfx { class GLContext; """ % {'name': set_name.upper()}) # Write typedefs for function pointer types. Always use the GL name for the # typedef. file.write('\n') for func in functions: file.write('typedef %s (GL_BINDING_CALL *%sProc)(%s);\n' % (func['return_type'], func['names'][0], func['arguments'])) # Write declarations for booleans indicating which extensions are available. file.write('\n') file.write("struct Extensions%s {\n" % set_name.upper()) for extension, ext_functions in used_extension_functions: file.write(' bool b_%s;\n' % extension) file.write('};\n') file.write('\n') # Write Procs struct. file.write("struct Procs%s {\n" % set_name.upper()) for func in functions: file.write(' %sProc %sFn;\n' % (func['names'][0], func['names'][0])) file.write('};\n') file.write('\n') # Write Api class. file.write( """class GL_EXPORT %(name)sApi { public: %(name)sApi(); virtual ~%(name)sApi(); """ % {'name': set_name.upper()}) for func in functions: file.write(' virtual %s %sFn(%s) = 0;\n' % (func['return_type'], func['names'][0], func['arguments'])) file.write('};\n') file.write('\n') file.write( '} // namespace gfx\n') # Write macros to invoke function pointers. Always use the GL name for the # macro. file.write('\n') for func in functions: file.write('#define %s ::gfx::g_current_%s_context->%sFn\n' % (func['names'][0], set_name.lower(), func['names'][0])) file.write('\n') file.write('#endif // UI_GFX_GL_GL_BINDINGS_AUTOGEN_%s_H_\n' % set_name.upper()) def GenerateAPIHeader(file, functions, set_name, used_extension_functions): """Generates gl_bindings_api_autogen_x.h""" # Write file header. file.write( """// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file is automatically generated. """ % {'name': set_name.upper()}) # Write API declaration. for func in functions: file.write(' virtual %s %sFn(%s) OVERRIDE;\n' % (func['return_type'], func['names'][0], func['arguments'])) file.write('\n') def GenerateMockHeader(file, functions, set_name, used_extension_functions): """Generates gl_mock_autogen_x.h""" # Write file header. file.write( """// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file is automatically generated. """ % {'name': set_name.upper()}) # Write API declaration. for func in functions: args = func['arguments'] if args == 'void': args = '' arg_count = 0 if len(args): arg_count = func['arguments'].count(',') + 1 file.write(' MOCK_METHOD%d(%s, %s(%s));\n' % (arg_count, func['names'][0][2:], func['return_type'], args)) file.write('\n') def GenerateInterfaceHeader( file, functions, set_name, used_extension_functions): """Generates gl_interface_autogen_x.h""" # Write file header. file.write( """// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file is automatically generated. """ % {'name': set_name.upper()}) # Write API declaration. for func in functions: args = func['arguments'] if args == 'void': args = '' file.write(' virtual %s %s(%s) = 0;\n' % (func['return_type'], func['names'][0][2:], args)) file.write('\n') def GenerateSource(file, functions, set_name, used_extension_functions): """Generates gl_bindings_autogen_x.cc""" # Write file header. file.write( """// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file is automatically generated. #include <string> #include "base/debug/trace_event.h" #include "gpu/command_buffer/common/gles2_cmd_utils.h" #include "ui/gl/gl_bindings.h" #include "ui/gl/gl_context.h" #include "ui/gl/gl_implementation.h" #include "ui/gl/gl_%s_api_implementation.h" using gpu::gles2::GLES2Util; namespace gfx { """ % set_name.lower()) # Write definitions of function pointers. file.write('\n') file.write('static bool g_debugBindingsInitialized;\n') file.write('Driver%s g_driver_%s;\n' % (set_name.upper(), set_name.lower())) file.write('\n') # Write function to initialize the core function pointers. The code assumes # any non-NULL pointer returned by GetGLCoreProcAddress() is valid, although # it may be overwritten by an extension function pointer later. file.write('\n') file.write('void Driver%s::InitializeBindings() {\n' % set_name.upper()) for func in functions: first_name = func['names'][0] for i, name in enumerate(func['names']): if i: file.write(' if (!fn.%sFn)\n ' % first_name) file.write( ' fn.%sFn = reinterpret_cast<%sProc>(' 'GetGLCoreProcAddress("%s"));\n' % (first_name, first_name, name)) file.write('}\n') file.write('\n') # Write function to initialize the extension function pointers. This function # uses a current context to query which extensions are actually supported. file.write("""void Driver%s::InitializeExtensionBindings( GLContext* context) { """ % set_name.upper()) file.write(' DCHECK(context && context->IsCurrent(NULL));\n') for extension, ext_functions in used_extension_functions: file.write(' ext.b_%s = context->HasExtension("%s");\n' % (extension, extension)) file.write(' if (ext.b_%s) {\n' % (extension)) queried_entry_points = set() for entry_point_name, function_name in ext_functions: # Replace the pointer unconditionally unless this extension has several # alternatives for the same entry point (e.g., # GL_ARB_blend_func_extended). if entry_point_name in queried_entry_points: file.write(' if (!fn.%sFn)\n ' % entry_point_name) file.write( ' fn.%sFn = reinterpret_cast<%sProc>(GetGLProcAddress("%s"));\n' % (entry_point_name, entry_point_name, function_name)) queried_entry_points.add(entry_point_name) file.write(' }\n') file.write(' if (g_debugBindingsInitialized)\n') file.write(' UpdateDebugExtensionBindings();\n') file.write('}\n') file.write('\n') # Write logging wrappers for each function. file.write('extern "C" {\n') for func in functions: names = func['names'] return_type = func['return_type'] arguments = func['arguments'] file.write('\n') file.write('static %s GL_BINDING_CALL Debug_%s(%s) {\n' % (return_type, names[0], arguments)) argument_names = re.sub( r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', arguments) argument_names = re.sub( r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', argument_names) log_argument_names = re.sub( r'const char\* ([a-zA-Z0-9_]+)', r'CONSTCHAR_\1', arguments) log_argument_names = re.sub( r'(const )?[a-zA-Z0-9_]+\* ([a-zA-Z0-9_]+)', r'CONSTVOID_\2', log_argument_names) log_argument_names = re.sub( r'(?<!E)GLenum ([a-zA-Z0-9_]+)', r'GLenum_\1', log_argument_names) log_argument_names = re.sub( r'(?<!E)GLboolean ([a-zA-Z0-9_]+)', r'GLboolean_\1', log_argument_names) log_argument_names = re.sub( r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', log_argument_names) log_argument_names = re.sub( r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', log_argument_names) log_argument_names = re.sub( r'CONSTVOID_([a-zA-Z0-9_]+)', r'static_cast<const void*>(\1)', log_argument_names); log_argument_names = re.sub( r'CONSTCHAR_([a-zA-Z0-9_]+)', r'\1', log_argument_names); log_argument_names = re.sub( r'GLenum_([a-zA-Z0-9_]+)', r'GLES2Util::GetStringEnum(\1)', log_argument_names) log_argument_names = re.sub( r'GLboolean_([a-zA-Z0-9_]+)', r'GLES2Util::GetStringBool(\1)', log_argument_names) log_argument_names = log_argument_names.replace(',', ' << ", " <<') if argument_names == 'void' or argument_names == '': argument_names = '' log_argument_names = '' else: log_argument_names = " << " + log_argument_names function_name = names[0] if return_type == 'void': file.write(' GL_SERVICE_LOG("%s" << "(" %s << ")");\n' % (function_name, log_argument_names)) file.write(' g_driver_%s.debug_fn.%sFn(%s);\n' % (set_name.lower(), function_name, argument_names)) if 'logging_code' in func: file.write("%s\n" % func['logging_code']) else: file.write(' GL_SERVICE_LOG("%s" << "(" %s << ")");\n' % (function_name, log_argument_names)) file.write(' %s result = g_driver_%s.debug_fn.%sFn(%s);\n' % (return_type, set_name.lower(), function_name, argument_names)) if 'logging_code' in func: file.write("%s\n" % func['logging_code']) else: file.write(' GL_SERVICE_LOG("GL_RESULT: " << result);\n'); file.write(' return result;\n') file.write('}\n') file.write('} // extern "C"\n') # Write function to initialize the debug function pointers. file.write('\n') file.write('void Driver%s::InitializeDebugBindings() {\n' % set_name.upper()) for func in functions: first_name = func['names'][0] file.write(' if (!debug_fn.%sFn) {\n' % first_name) file.write(' debug_fn.%sFn = fn.%sFn;\n' % (first_name, first_name)) file.write(' fn.%sFn = Debug_%s;\n' % (first_name, first_name)) file.write(' }\n') file.write(' g_debugBindingsInitialized = true;\n') file.write('}\n') # Write function to update the debug function pointers to extension functions # after the extensions have been initialized. file.write('\n') file.write('void Driver%s::UpdateDebugExtensionBindings() {\n' % set_name.upper()) for extension, ext_functions in used_extension_functions: for name, _ in ext_functions: file.write(' if (debug_fn.%sFn != fn.%sFn &&\n' % (name, name)) file.write(' fn.%sFn != Debug_%s) {\n' % (name, name)) file.write(' debug_fn.%sFn = fn.%sFn;\n' % (name, name)) file.write(' fn.%sFn = Debug_%s;\n' % (name, name)) file.write(' }\n') file.write('}\n') # Write function to clear all function pointers. file.write('\n') file.write("""void Driver%s::ClearBindings() { memset(this, 0, sizeof(*this)); } """ % set_name.upper()) # Write GLApiBase functions for func in functions: names = func['names'] return_type = func['return_type'] arguments = func['arguments'] file.write('\n') file.write('%s %sApiBase::%sFn(%s) {\n' % (return_type, set_name.upper(), names[0], arguments)) argument_names = re.sub( r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', arguments) argument_names = re.sub( r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', argument_names) if argument_names == 'void' or argument_names == '': argument_names = '' function_name = names[0] if return_type == 'void': file.write(' driver_->fn.%sFn(%s);\n' % (function_name, argument_names)) else: file.write(' return driver_->fn.%sFn(%s);\n' % (function_name, argument_names)) file.write('}\n') # Write TraceGLApi functions for func in functions: names = func['names'] return_type = func['return_type'] arguments = func['arguments'] file.write('\n') file.write('%s Trace%sApi::%sFn(%s) {\n' % (return_type, set_name.upper(), names[0], arguments)) argument_names = re.sub( r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', arguments) argument_names = re.sub( r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', argument_names) if argument_names == 'void' or argument_names == '': argument_names = '' function_name = names[0] file.write(' TRACE_EVENT_BINARY_EFFICIENT0("gpu", "TraceGLAPI::%s")\n' % function_name) if return_type == 'void': file.write(' %s_api_->%sFn(%s);\n' % (set_name.lower(), function_name, argument_names)) else: file.write(' return %s_api_->%sFn(%s);\n' % (set_name.lower(), function_name, argument_names)) file.write('}\n') file.write('\n') file.write('} // namespace gfx\n') def GenerateMockSource(file, functions): """Generates functions that invoke a mock GLInterface""" file.write( """// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file is automatically generated. #include <string.h> #include "ui/gl/gl_interface.h" namespace gfx { """) # Write function that trampoline into the GLInterface. for func in functions: file.write('\n') file.write('%s GL_BINDING_CALL Mock_%s(%s) {\n' % (func['return_type'], func['names'][0], func['arguments'])) argument_names = re.sub(r'(const )?[a-zA-Z0-9]+((\s*const\s*)?\*)* ([a-zA-Z0-9]+)', r'\4', func['arguments']) if argument_names == 'void': argument_names = '' function_name = func['names'][0][2:] if func['return_type'] == 'void': file.write(' GLInterface::GetGLInterface()->%s(%s);\n' % (function_name, argument_names)) else: file.write(' return GLInterface::GetGLInterface()->%s(%s);\n' % (function_name, argument_names)) file.write('}\n') # Write an 'invalid' function to catch code calling through uninitialized # function pointers or trying to interpret the return value of # GLProcAddress(). file.write('\n') file.write('static void MockInvalidFunction() {\n') file.write(' NOTREACHED();\n') file.write('}\n') # Write a function to lookup a mock GL function based on its name. file.write('\n') file.write('void* GL_BINDING_CALL GetMockGLProcAddress(const char* name) {\n') for func in functions: first_name = func['names'][0] file.write(' if (strcmp(name, "%s") == 0)\n' % first_name) file.write(' return reinterpret_cast<void*>(Mock_%s);\n' % first_name) # Always return a non-NULL pointer like some EGL implementations do. file.write(' return reinterpret_cast<void*>(&MockInvalidFunction);\n') file.write('}\n'); file.write('\n') file.write('} // namespace gfx\n') def ParseExtensionFunctionsFromHeader(header_file): """Parse a C extension header file and return a map from extension names to a list of functions. Args: header_file: Line-iterable C header file. Returns: Map of extension name => functions. """ extension_start = re.compile( r'#ifndef ((?:GL|EGL|WGL|GLX)_[A-Z]+_[a-zA-Z]\w+)') extension_function = re.compile(r'.+\s+([a-z]+\w+)\s*\(') typedef = re.compile(r'typedef .*') macro_start = re.compile(r'^#(if|ifdef|ifndef).*') macro_end = re.compile(r'^#endif.*') macro_depth = 0 current_extension = None current_extension_depth = 0 extensions = collections.defaultdict(lambda: []) for line in header_file: if macro_start.match(line): macro_depth += 1 elif macro_end.match(line): macro_depth -= 1 if macro_depth < current_extension_depth: current_extension = None match = extension_start.match(line) if match: current_extension = match.group(1) current_extension_depth = macro_depth assert current_extension not in extensions, \ "Duplicate extension: " + current_extension match = extension_function.match(line) if match and current_extension and not typedef.match(line): extensions[current_extension].append(match.group(1)) return extensions def GetExtensionFunctions(extension_headers): """Parse extension functions from a list of header files. Args: extension_headers: List of header file names. Returns: Map of extension name => list of functions. """ extensions = {} for header in extension_headers: extensions.update(ParseExtensionFunctionsFromHeader(open(header))) return extensions def GetFunctionToExtensionMap(extensions): """Construct map from a function names to extensions which define the function. Args: extensions: Map of extension name => functions. Returns: Map of function name => extension name. """ function_to_extensions = {} for extension, functions in extensions.items(): for function in functions: if not function in function_to_extensions: function_to_extensions[function] = [] function_to_extensions[function].append(extension) return function_to_extensions def LooksLikeExtensionFunction(function): """Heuristic to see if a function name is consistent with extension function naming.""" vendor = re.match(r'\w+?([A-Z][A-Z]+)$', function) return vendor is not None and not vendor.group(1) in ['GL', 'API', 'DC'] def GetUsedExtensionFunctions(functions, extension_headers, extra_extensions): """Determine which functions belong to extensions. Args: functions: List of (return type, function names, arguments). extension_headers: List of header file names. Returns: List of (extension name, [function name alternatives]) sorted with least preferred extensions first. """ # Parse known extensions. extensions = GetExtensionFunctions(extension_headers) functions_to_extensions = GetFunctionToExtensionMap(extensions) # Collect all used extension functions. used_extension_functions = collections.defaultdict(lambda: []) for func in functions: for name in func['names']: # Make sure we know about all extension functions. if (LooksLikeExtensionFunction(name) and not name in functions_to_extensions): raise RuntimeError('%s looks like an extension function but does not ' 'belong to any of the known extensions.' % name) if name in functions_to_extensions: extensions = functions_to_extensions[name][:] if 'other_extensions' in func: extensions.extend(func['other_extensions']) for extension in extensions: used_extension_functions[extension].append((func['names'][0], name)) # Add extensions that do not have any functions. used_extension_functions.update(dict( [(e, []) for e in extra_extensions if e not in used_extension_functions])) def ExtensionSortKey(name): # Prefer ratified extensions and EXTs. preferences = ['_ARB_', '_OES_', '_EXT_', ''] for i, category in enumerate(preferences): if category in name: return -i used_extension_functions = sorted(used_extension_functions.items(), key = lambda item: ExtensionSortKey(item[0])) return used_extension_functions def ResolveHeader(header, header_paths): paths = header_paths.split(':') # Always use a path for Chromium-specific extensions. They are extracted # to separate files. paths.append('.') paths.append('../../gpu') root = os.path.abspath(os.path.dirname(__file__)) for path in paths: result = os.path.join(path, header) if not os.path.isabs(path): result = os.path.relpath(os.path.join(root, result), os.getcwd()) if os.path.exists(result): # Always use forward slashes as path separators. Otherwise backslashes # may be incorrectly interpreted as escape characters. return result.replace(os.path.sep, '/') raise Exception('Header %s not found.' % header) def main(argv): """This is the main function.""" parser = optparse.OptionParser() parser.add_option('--inputs', action='store_true') parser.add_option('--header-paths') options, args = parser.parse_args(argv) if options.inputs: for [_, _, headers, _] in FUNCTION_SETS: for header in headers: print ResolveHeader(header, options.header_paths) return 0 if len(args) >= 1: dir = args[0] else: dir = '.' for [functions, set_name, extension_headers, extensions] in FUNCTION_SETS: extension_headers = [ResolveHeader(h, options.header_paths) for h in extension_headers] used_extension_functions = GetUsedExtensionFunctions( functions, extension_headers, extensions) header_file = open( os.path.join(dir, 'gl_bindings_autogen_%s.h' % set_name), 'wb') GenerateHeader(header_file, functions, set_name, used_extension_functions) header_file.close() header_file = open( os.path.join(dir, 'gl_bindings_api_autogen_%s.h' % set_name), 'wb') GenerateAPIHeader( header_file, functions, set_name, used_extension_functions) header_file.close() source_file = open( os.path.join(dir, 'gl_bindings_autogen_%s.cc' % set_name), 'wb') GenerateSource(source_file, functions, set_name, used_extension_functions) source_file.close() header_file = open( os.path.join(dir, 'gl_interface_autogen_%s.h' % set_name), 'wb') GenerateInterfaceHeader( header_file, functions, set_name, used_extension_functions) header_file.close() header_file = open( os.path.join(dir, 'gl_mock_autogen_%s.h' % set_name), 'wb') GenerateMockHeader( header_file, functions, set_name, used_extension_functions) header_file.close() source_file = open(os.path.join(dir, 'gl_bindings_autogen_mock.cc'), 'wb') GenerateMockSource(source_file, GL_FUNCTIONS) source_file.close() return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
codeparrot/github-code-clean
import unittest from unittest import mock from test import support import subprocess import sys import signal import io import os import errno import tempfile import time import selectors import sysconfig import select import shutil import gc import textwrap try: import threading except ImportError: threading = None if support.PGO: raise unittest.SkipTest("test is not helpful for PGO") mswindows = (sys.platform == "win32") # # Depends on the following external programs: Python # if mswindows: SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' 'os.O_BINARY);') else: SETBINARY = '' class BaseTestCase(unittest.TestCase): def setUp(self): # Try to minimize the number of children we have so this test # doesn't crash on some buildbots (Alphas in particular). support.reap_children() def tearDown(self): for inst in subprocess._active: inst.wait() subprocess._cleanup() self.assertFalse(subprocess._active, "subprocess._active not empty") def assertStderrEqual(self, stderr, expected, msg=None): # In a debug build, stuff like "[6580 refs]" is printed to stderr at # shutdown time. That frustrates tests trying to check stderr produced # from a spawned Python process. actual = support.strip_python_stderr(stderr) # strip_python_stderr also strips whitespace, so we do too. expected = expected.strip() self.assertEqual(actual, expected, msg) class PopenTestException(Exception): pass class PopenExecuteChildRaises(subprocess.Popen): """Popen subclass for testing cleanup of subprocess.PIPE filehandles when _execute_child fails. """ def _execute_child(self, *args, **kwargs): raise PopenTestException("Forced Exception for Test") class ProcessTestCase(BaseTestCase): def test_io_buffered_by_default(self): p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try: self.assertIsInstance(p.stdin, io.BufferedIOBase) self.assertIsInstance(p.stdout, io.BufferedIOBase) self.assertIsInstance(p.stderr, io.BufferedIOBase) finally: p.stdin.close() p.stdout.close() p.stderr.close() p.wait() def test_io_unbuffered_works(self): p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0) try: self.assertIsInstance(p.stdin, io.RawIOBase) self.assertIsInstance(p.stdout, io.RawIOBase) self.assertIsInstance(p.stderr, io.RawIOBase) finally: p.stdin.close() p.stdout.close() p.stderr.close() p.wait() def test_call_seq(self): # call() function with sequence argument rc = subprocess.call([sys.executable, "-c", "import sys; sys.exit(47)"]) self.assertEqual(rc, 47) def test_call_timeout(self): # call() function with timeout argument; we want to test that the child # process gets killed when the timeout expires. If the child isn't # killed, this call will deadlock since subprocess.call waits for the # child. self.assertRaises(subprocess.TimeoutExpired, subprocess.call, [sys.executable, "-c", "while True: pass"], timeout=0.1) def test_check_call_zero(self): # check_call() function with zero return code rc = subprocess.check_call([sys.executable, "-c", "import sys; sys.exit(0)"]) self.assertEqual(rc, 0) def test_check_call_nonzero(self): # check_call() function with non-zero return code with self.assertRaises(subprocess.CalledProcessError) as c: subprocess.check_call([sys.executable, "-c", "import sys; sys.exit(47)"]) self.assertEqual(c.exception.returncode, 47) def test_check_output(self): # check_output() function with zero return code output = subprocess.check_output( [sys.executable, "-c", "print('BDFL')"]) self.assertIn(b'BDFL', output) def test_check_output_nonzero(self): # check_call() function with non-zero return code with self.assertRaises(subprocess.CalledProcessError) as c: subprocess.check_output( [sys.executable, "-c", "import sys; sys.exit(5)"]) self.assertEqual(c.exception.returncode, 5) def test_check_output_stderr(self): # check_output() function stderr redirected to stdout output = subprocess.check_output( [sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"], stderr=subprocess.STDOUT) self.assertIn(b'BDFL', output) def test_check_output_stdin_arg(self): # check_output() can be called with stdin set to a file tf = tempfile.TemporaryFile() self.addCleanup(tf.close) tf.write(b'pear') tf.seek(0) output = subprocess.check_output( [sys.executable, "-c", "import sys; sys.stdout.write(sys.stdin.read().upper())"], stdin=tf) self.assertIn(b'PEAR', output) def test_check_output_input_arg(self): # check_output() can be called with input set to a string output = subprocess.check_output( [sys.executable, "-c", "import sys; sys.stdout.write(sys.stdin.read().upper())"], input=b'pear') self.assertIn(b'PEAR', output) def test_check_output_stdout_arg(self): # check_output() refuses to accept 'stdout' argument with self.assertRaises(ValueError) as c: output = subprocess.check_output( [sys.executable, "-c", "print('will not be run')"], stdout=sys.stdout) self.fail("Expected ValueError when stdout arg supplied.") self.assertIn('stdout', c.exception.args[0]) def test_check_output_stdin_with_input_arg(self): # check_output() refuses to accept 'stdin' with 'input' tf = tempfile.TemporaryFile() self.addCleanup(tf.close) tf.write(b'pear') tf.seek(0) with self.assertRaises(ValueError) as c: output = subprocess.check_output( [sys.executable, "-c", "print('will not be run')"], stdin=tf, input=b'hare') self.fail("Expected ValueError when stdin and input args supplied.") self.assertIn('stdin', c.exception.args[0]) self.assertIn('input', c.exception.args[0]) def test_check_output_timeout(self): # check_output() function with timeout arg with self.assertRaises(subprocess.TimeoutExpired) as c: output = subprocess.check_output( [sys.executable, "-c", "import sys, time\n" "sys.stdout.write('BDFL')\n" "sys.stdout.flush()\n" "time.sleep(3600)"], # Some heavily loaded buildbots (sparc Debian 3.x) require # this much time to start and print. timeout=3) self.fail("Expected TimeoutExpired.") self.assertEqual(c.exception.output, b'BDFL') def test_call_kwargs(self): # call() function with keyword args newenv = os.environ.copy() newenv["FRUIT"] = "banana" rc = subprocess.call([sys.executable, "-c", 'import sys, os;' 'sys.exit(os.getenv("FRUIT")=="banana")'], env=newenv) self.assertEqual(rc, 1) def test_invalid_args(self): # Popen() called with invalid arguments should raise TypeError # but Popen.__del__ should not complain (issue #12085) with support.captured_stderr() as s: self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1) argcount = subprocess.Popen.__init__.__code__.co_argcount too_many_args = [0] * (argcount + 1) self.assertRaises(TypeError, subprocess.Popen, *too_many_args) self.assertEqual(s.getvalue(), '') def test_stdin_none(self): # .stdin is None when not redirected p = subprocess.Popen([sys.executable, "-c", 'print("banana")'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) p.wait() self.assertEqual(p.stdin, None) def test_stdout_none(self): # .stdout is None when not redirected, and the child's stdout will # be inherited from the parent. In order to test this we run a # subprocess in a subprocess: # this_test # \-- subprocess created by this test (parent) # \-- subprocess created by the parent subprocess (child) # The parent doesn't specify stdout, so the child will use the # parent's stdout. This test checks that the message printed by the # child goes to the parent stdout. The parent also checks that the # child's stdout is None. See #11963. code = ('import sys; from subprocess import Popen, PIPE;' 'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],' ' stdin=PIPE, stderr=PIPE);' 'p.wait(); assert p.stdout is None;') p = subprocess.Popen([sys.executable, "-c", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) out, err = p.communicate() self.assertEqual(p.returncode, 0, err) self.assertEqual(out.rstrip(), b'test_stdout_none') def test_stderr_none(self): # .stderr is None when not redirected p = subprocess.Popen([sys.executable, "-c", 'print("banana")'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stdin.close) p.wait() self.assertEqual(p.stderr, None) def _assert_python(self, pre_args, **kwargs): # We include sys.exit() to prevent the test runner from hanging # whenever python is found. args = pre_args + ["import sys; sys.exit(47)"] p = subprocess.Popen(args, **kwargs) p.wait() self.assertEqual(47, p.returncode) def test_executable(self): # Check that the executable argument works. # # On Unix (non-Mac and non-Windows), Python looks at args[0] to # determine where its standard library is, so we need the directory # of args[0] to be valid for the Popen() call to Python to succeed. # See also issue #16170 and issue #7774. doesnotexist = os.path.join(os.path.dirname(sys.executable), "doesnotexist") self._assert_python([doesnotexist, "-c"], executable=sys.executable) def test_executable_takes_precedence(self): # Check that the executable argument takes precedence over args[0]. # # Verify first that the call succeeds without the executable arg. pre_args = [sys.executable, "-c"] self._assert_python(pre_args) self.assertRaises(FileNotFoundError, self._assert_python, pre_args, executable="doesnotexist") @unittest.skipIf(mswindows, "executable argument replaces shell") def test_executable_replaces_shell(self): # Check that the executable argument replaces the default shell # when shell=True. self._assert_python([], executable=sys.executable, shell=True) # For use in the test_cwd* tests below. def _normalize_cwd(self, cwd): # Normalize an expected cwd (for Tru64 support). # We can't use os.path.realpath since it doesn't expand Tru64 {memb} # strings. See bug #1063571. with support.change_cwd(cwd): return os.getcwd() # For use in the test_cwd* tests below. def _split_python_path(self): # Return normalized (python_dir, python_base). python_path = os.path.realpath(sys.executable) return os.path.split(python_path) # For use in the test_cwd* tests below. def _assert_cwd(self, expected_cwd, python_arg, **kwargs): # Invoke Python via Popen, and assert that (1) the call succeeds, # and that (2) the current working directory of the child process # matches *expected_cwd*. p = subprocess.Popen([python_arg, "-c", "import os, sys; " "sys.stdout.write(os.getcwd()); " "sys.exit(47)"], stdout=subprocess.PIPE, **kwargs) self.addCleanup(p.stdout.close) p.wait() self.assertEqual(47, p.returncode) normcase = os.path.normcase self.assertEqual(normcase(expected_cwd), normcase(p.stdout.read().decode("utf-8"))) def test_cwd(self): # Check that cwd changes the cwd for the child process. temp_dir = tempfile.gettempdir() temp_dir = self._normalize_cwd(temp_dir) self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir) @unittest.skipIf(mswindows, "pending resolution of issue #15533") def test_cwd_with_relative_arg(self): # Check that Popen looks for args[0] relative to cwd if args[0] # is relative. python_dir, python_base = self._split_python_path() rel_python = os.path.join(os.curdir, python_base) with support.temp_cwd() as wrong_dir: # Before calling with the correct cwd, confirm that the call fails # without cwd and with the wrong cwd. self.assertRaises(FileNotFoundError, subprocess.Popen, [rel_python]) self.assertRaises(FileNotFoundError, subprocess.Popen, [rel_python], cwd=wrong_dir) python_dir = self._normalize_cwd(python_dir) self._assert_cwd(python_dir, rel_python, cwd=python_dir) @unittest.skipIf(mswindows, "pending resolution of issue #15533") def test_cwd_with_relative_executable(self): # Check that Popen looks for executable relative to cwd if executable # is relative (and that executable takes precedence over args[0]). python_dir, python_base = self._split_python_path() rel_python = os.path.join(os.curdir, python_base) doesntexist = "somethingyoudonthave" with support.temp_cwd() as wrong_dir: # Before calling with the correct cwd, confirm that the call fails # without cwd and with the wrong cwd. self.assertRaises(FileNotFoundError, subprocess.Popen, [doesntexist], executable=rel_python) self.assertRaises(FileNotFoundError, subprocess.Popen, [doesntexist], executable=rel_python, cwd=wrong_dir) python_dir = self._normalize_cwd(python_dir) self._assert_cwd(python_dir, doesntexist, executable=rel_python, cwd=python_dir) def test_cwd_with_absolute_arg(self): # Check that Popen can find the executable when the cwd is wrong # if args[0] is an absolute path. python_dir, python_base = self._split_python_path() abs_python = os.path.join(python_dir, python_base) rel_python = os.path.join(os.curdir, python_base) with support.temp_dir() as wrong_dir: # Before calling with an absolute path, confirm that using a # relative path fails. self.assertRaises(FileNotFoundError, subprocess.Popen, [rel_python], cwd=wrong_dir) wrong_dir = self._normalize_cwd(wrong_dir) self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir) @unittest.skipIf(sys.base_prefix != sys.prefix, 'Test is not venv-compatible') def test_executable_with_cwd(self): python_dir, python_base = self._split_python_path() python_dir = self._normalize_cwd(python_dir) self._assert_cwd(python_dir, "somethingyoudonthave", executable=sys.executable, cwd=python_dir) @unittest.skipIf(sys.base_prefix != sys.prefix, 'Test is not venv-compatible') @unittest.skipIf(sysconfig.is_python_build(), "need an installed Python. See #7774") def test_executable_without_cwd(self): # For a normal installation, it should work without 'cwd' # argument. For test runs in the build directory, see #7774. self._assert_cwd(os.getcwd(), "somethingyoudonthave", executable=sys.executable) def test_stdin_pipe(self): # stdin redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=subprocess.PIPE) p.stdin.write(b"pear") p.stdin.close() p.wait() self.assertEqual(p.returncode, 1) def test_stdin_filedes(self): # stdin is set to open file descriptor tf = tempfile.TemporaryFile() self.addCleanup(tf.close) d = tf.fileno() os.write(d, b"pear") os.lseek(d, 0, 0) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=d) p.wait() self.assertEqual(p.returncode, 1) def test_stdin_fileobj(self): # stdin is set to open file object tf = tempfile.TemporaryFile() self.addCleanup(tf.close) tf.write(b"pear") tf.seek(0) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=tf) p.wait() self.assertEqual(p.returncode, 1) def test_stdout_pipe(self): # stdout redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=subprocess.PIPE) with p: self.assertEqual(p.stdout.read(), b"orange") def test_stdout_filedes(self): # stdout is set to open file descriptor tf = tempfile.TemporaryFile() self.addCleanup(tf.close) d = tf.fileno() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=d) p.wait() os.lseek(d, 0, 0) self.assertEqual(os.read(d, 1024), b"orange") def test_stdout_fileobj(self): # stdout is set to open file object tf = tempfile.TemporaryFile() self.addCleanup(tf.close) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=tf) p.wait() tf.seek(0) self.assertEqual(tf.read(), b"orange") def test_stderr_pipe(self): # stderr redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=subprocess.PIPE) with p: self.assertStderrEqual(p.stderr.read(), b"strawberry") def test_stderr_filedes(self): # stderr is set to open file descriptor tf = tempfile.TemporaryFile() self.addCleanup(tf.close) d = tf.fileno() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=d) p.wait() os.lseek(d, 0, 0) self.assertStderrEqual(os.read(d, 1024), b"strawberry") def test_stderr_fileobj(self): # stderr is set to open file object tf = tempfile.TemporaryFile() self.addCleanup(tf.close) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=tf) p.wait() tf.seek(0) self.assertStderrEqual(tf.read(), b"strawberry") def test_stderr_redirect_with_no_stdout_redirect(self): # test stderr=STDOUT while stdout=None (not set) # - grandchild prints to stderr # - child redirects grandchild's stderr to its stdout # - the parent should get grandchild's stderr in child's stdout p = subprocess.Popen([sys.executable, "-c", 'import sys, subprocess;' 'rc = subprocess.call([sys.executable, "-c",' ' "import sys;"' ' "sys.stderr.write(\'42\')"],' ' stderr=subprocess.STDOUT);' 'sys.exit(rc)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() #NOTE: stdout should get stderr from grandchild self.assertStderrEqual(stdout, b'42') self.assertStderrEqual(stderr, b'') # should be empty self.assertEqual(p.returncode, 0) def test_stdout_stderr_pipe(self): # capture stdout and stderr to the same pipe p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) with p: self.assertStderrEqual(p.stdout.read(), b"appleorange") def test_stdout_stderr_file(self): # capture stdout and stderr to the same open file tf = tempfile.TemporaryFile() self.addCleanup(tf.close) p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdout=tf, stderr=tf) p.wait() tf.seek(0) self.assertStderrEqual(tf.read(), b"appleorange") def test_stdout_filedes_of_stdout(self): # stdout is set to 1 (#1531862). # To avoid printing the text on stdout, we do something similar to # test_stdout_none (see above). The parent subprocess calls the child # subprocess passing stdout=1, and this test uses stdout=PIPE in # order to capture and check the output of the parent. See #11963. code = ('import sys, subprocess; ' 'rc = subprocess.call([sys.executable, "-c", ' ' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), ' 'b\'test with stdout=1\'))"], stdout=1); ' 'assert rc == 18') p = subprocess.Popen([sys.executable, "-c", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) out, err = p.communicate() self.assertEqual(p.returncode, 0, err) self.assertEqual(out.rstrip(), b'test with stdout=1') def test_stdout_devnull(self): p = subprocess.Popen([sys.executable, "-c", 'for i in range(10240):' 'print("x" * 1024)'], stdout=subprocess.DEVNULL) p.wait() self.assertEqual(p.stdout, None) def test_stderr_devnull(self): p = subprocess.Popen([sys.executable, "-c", 'import sys\n' 'for i in range(10240):' 'sys.stderr.write("x" * 1024)'], stderr=subprocess.DEVNULL) p.wait() self.assertEqual(p.stderr, None) def test_stdin_devnull(self): p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdin.read(1)'], stdin=subprocess.DEVNULL) p.wait() self.assertEqual(p.stdin, None) def test_env(self): newenv = os.environ.copy() newenv["FRUIT"] = "orange" with subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(os.getenv("FRUIT"))'], stdout=subprocess.PIPE, env=newenv) as p: stdout, stderr = p.communicate() self.assertEqual(stdout, b"orange") # Windows requires at least the SYSTEMROOT environment variable to start # Python @unittest.skipIf(sys.platform == 'win32', 'cannot test an empty env on Windows') @unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') is not None, 'the python library cannot be loaded ' 'with an empty environment') def test_empty_env(self): with subprocess.Popen([sys.executable, "-c", 'import os; ' 'print(list(os.environ.keys()))'], stdout=subprocess.PIPE, env={}) as p: stdout, stderr = p.communicate() self.assertIn(stdout.strip(), (b"[]", # Mac OS X adds __CF_USER_TEXT_ENCODING variable to an empty # environment b"['__CF_USER_TEXT_ENCODING']")) def test_communicate_stdin(self): p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.exit(sys.stdin.read() == "pear")'], stdin=subprocess.PIPE) p.communicate(b"pear") self.assertEqual(p.returncode, 1) def test_communicate_stdout(self): p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("pineapple")'], stdout=subprocess.PIPE) (stdout, stderr) = p.communicate() self.assertEqual(stdout, b"pineapple") self.assertEqual(stderr, None) def test_communicate_stderr(self): p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("pineapple")'], stderr=subprocess.PIPE) (stdout, stderr) = p.communicate() self.assertEqual(stdout, None) self.assertStderrEqual(stderr, b"pineapple") def test_communicate(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stderr.write("pineapple");' 'sys.stdout.write(sys.stdin.read())'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) (stdout, stderr) = p.communicate(b"banana") self.assertEqual(stdout, b"banana") self.assertStderrEqual(stderr, b"pineapple") def test_communicate_timeout(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os,time;' 'sys.stderr.write("pineapple\\n");' 'time.sleep(1);' 'sys.stderr.write("pear\\n");' 'sys.stdout.write(sys.stdin.read())'], universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana", timeout=0.3) # Make sure we can keep waiting for it, and that we get the whole output # after it completes. (stdout, stderr) = p.communicate() self.assertEqual(stdout, "banana") self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n") def test_communicate_timeout_large_output(self): # Test an expiring timeout while the child is outputting lots of data. p = subprocess.Popen([sys.executable, "-c", 'import sys,os,time;' 'sys.stdout.write("a" * (64 * 1024));' 'time.sleep(0.2);' 'sys.stdout.write("a" * (64 * 1024));' 'time.sleep(0.2);' 'sys.stdout.write("a" * (64 * 1024));' 'time.sleep(0.2);' 'sys.stdout.write("a" * (64 * 1024));'], stdout=subprocess.PIPE) self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4) (stdout, _) = p.communicate() self.assertEqual(len(stdout), 4 * 64 * 1024) # Test for the fd leak reported in http://bugs.python.org/issue2791. def test_communicate_pipe_fd_leak(self): for stdin_pipe in (False, True): for stdout_pipe in (False, True): for stderr_pipe in (False, True): options = {} if stdin_pipe: options['stdin'] = subprocess.PIPE if stdout_pipe: options['stdout'] = subprocess.PIPE if stderr_pipe: options['stderr'] = subprocess.PIPE if not options: continue p = subprocess.Popen((sys.executable, "-c", "pass"), **options) p.communicate() if p.stdin is not None: self.assertTrue(p.stdin.closed) if p.stdout is not None: self.assertTrue(p.stdout.closed) if p.stderr is not None: self.assertTrue(p.stderr.closed) def test_communicate_returns(self): # communicate() should return None if no redirection is active p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(47)"]) (stdout, stderr) = p.communicate() self.assertEqual(stdout, None) self.assertEqual(stderr, None) def test_communicate_pipe_buf(self): # communicate() with writes larger than pipe_buf # This test will probably deadlock rather than fail, if # communicate() does not work properly. x, y = os.pipe() os.close(x) os.close(y) p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(sys.stdin.read(47));' 'sys.stderr.write("x" * %d);' 'sys.stdout.write(sys.stdin.read())' % support.PIPE_MAX_SIZE], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) string_to_write = b"a" * support.PIPE_MAX_SIZE (stdout, stderr) = p.communicate(string_to_write) self.assertEqual(stdout, string_to_write) def test_writes_before_communicate(self): # stdin.write before communicate() p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(sys.stdin.read())'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) p.stdin.write(b"banana") (stdout, stderr) = p.communicate(b"split") self.assertEqual(stdout, b"bananasplit") self.assertStderrEqual(stderr, b"") def test_universal_newlines(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + 'buf = sys.stdout.buffer;' 'buf.write(sys.stdin.readline().encode());' 'buf.flush();' 'buf.write(b"line2\\n");' 'buf.flush();' 'buf.write(sys.stdin.read().encode());' 'buf.flush();' 'buf.write(b"line4\\n");' 'buf.flush();' 'buf.write(b"line5\\r\\n");' 'buf.flush();' 'buf.write(b"line6\\r");' 'buf.flush();' 'buf.write(b"\\nline7");' 'buf.flush();' 'buf.write(b"\\nline8");'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=1) with p: p.stdin.write("line1\n") p.stdin.flush() self.assertEqual(p.stdout.readline(), "line1\n") p.stdin.write("line3\n") p.stdin.close() self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.readline(), "line2\n") self.assertEqual(p.stdout.read(6), "line3\n") self.assertEqual(p.stdout.read(), "line4\nline5\nline6\nline7\nline8") def test_universal_newlines_communicate(self): # universal newlines through communicate() p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + 'buf = sys.stdout.buffer;' 'buf.write(b"line2\\n");' 'buf.flush();' 'buf.write(b"line4\\n");' 'buf.flush();' 'buf.write(b"line5\\r\\n");' 'buf.flush();' 'buf.write(b"line6\\r");' 'buf.flush();' 'buf.write(b"\\nline7");' 'buf.flush();' 'buf.write(b"\\nline8");'], stderr=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=1) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) (stdout, stderr) = p.communicate() self.assertEqual(stdout, "line2\nline4\nline5\nline6\nline7\nline8") def test_universal_newlines_communicate_stdin(self): # universal newlines through communicate(), with only stdin p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + textwrap.dedent(''' s = sys.stdin.readline() assert s == "line1\\n", repr(s) s = sys.stdin.read() assert s == "line3\\n", repr(s) ''')], stdin=subprocess.PIPE, universal_newlines=1) (stdout, stderr) = p.communicate("line1\nline3\n") self.assertEqual(p.returncode, 0) def test_universal_newlines_communicate_input_none(self): # Test communicate(input=None) with universal newlines. # # We set stdout to PIPE because, as of this writing, a different # code path is tested when the number of pipes is zero or one. p = subprocess.Popen([sys.executable, "-c", "pass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) p.communicate() self.assertEqual(p.returncode, 0) def test_universal_newlines_communicate_stdin_stdout_stderr(self): # universal newlines through communicate(), with stdin, stdout, stderr p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + textwrap.dedent(''' s = sys.stdin.buffer.readline() sys.stdout.buffer.write(s) sys.stdout.buffer.write(b"line2\\r") sys.stderr.buffer.write(b"eline2\\n") s = sys.stdin.buffer.read() sys.stdout.buffer.write(s) sys.stdout.buffer.write(b"line4\\n") sys.stdout.buffer.write(b"line5\\r\\n") sys.stderr.buffer.write(b"eline6\\r") sys.stderr.buffer.write(b"eline7\\r\\nz") ''')], stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) (stdout, stderr) = p.communicate("line1\nline3\n") self.assertEqual(p.returncode, 0) self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout) # Python debug build push something like "[42442 refs]\n" # to stderr at exit of subprocess. # Don't use assertStderrEqual because it strips CR and LF from output. self.assertTrue(stderr.startswith("eline2\neline6\neline7\n")) def test_universal_newlines_communicate_encodings(self): # Check that universal newlines mode works for various encodings, # in particular for encodings in the UTF-16 and UTF-32 families. # See issue #15595. # # UTF-16 and UTF-32-BE are sufficient to check both with BOM and # without, and UTF-16 and UTF-32. for encoding in ['utf-16', 'utf-32-be']: code = ("import sys; " r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" % encoding) args = [sys.executable, '-c', code] # We set stdin to be non-None because, as of this writing, # a different code path is used when the number of pipes is # zero or one. popen = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding=encoding) stdout, stderr = popen.communicate(input='') self.assertEqual(stdout, '1\n2\n3\n4') def test_communicate_errors(self): for errors, expected in [ ('ignore', ''), ('replace', '\ufffd\ufffd'), ('surrogateescape', '\udc80\udc80'), ('backslashreplace', '\\x80\\x80'), ]: code = ("import sys; " r"sys.stdout.buffer.write(b'[\x80\x80]')") args = [sys.executable, '-c', code] # We set stdin to be non-None because, as of this writing, # a different code path is used when the number of pipes is # zero or one. popen = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding='utf-8', errors=errors) stdout, stderr = popen.communicate(input='') self.assertEqual(stdout, '[{}]'.format(expected)) def test_no_leaking(self): # Make sure we leak no resources if not mswindows: max_handles = 1026 # too much for most UNIX systems else: max_handles = 2050 # too much for (at least some) Windows setups handles = [] tmpdir = tempfile.mkdtemp() try: for i in range(max_handles): try: tmpfile = os.path.join(tmpdir, support.TESTFN) handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT)) except OSError as e: if e.errno != errno.EMFILE: raise break else: self.skipTest("failed to reach the file descriptor limit " "(tried %d)" % max_handles) # Close a couple of them (should be enough for a subprocess) for i in range(10): os.close(handles.pop()) # Loop creating some subprocesses. If one of them leaks some fds, # the next loop iteration will fail by reaching the max fd limit. for i in range(15): p = subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write(sys.stdin.read())"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) data = p.communicate(b"lime")[0] self.assertEqual(data, b"lime") finally: for h in handles: os.close(h) shutil.rmtree(tmpdir) def test_list2cmdline(self): self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']), '"a b c" d e') self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']), 'ab\\"c \\ d') self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']), 'ab\\"c " \\\\" d') self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']), 'a\\\\\\b "de fg" h') self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']), 'a\\\\\\"b c d') self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']), '"a\\\\b c" d e') self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']), '"a\\\\b\\ c" d e') self.assertEqual(subprocess.list2cmdline(['ab', '']), 'ab ""') def test_poll(self): p = subprocess.Popen([sys.executable, "-c", "import os; os.read(0, 1)"], stdin=subprocess.PIPE) self.addCleanup(p.stdin.close) self.assertIsNone(p.poll()) os.write(p.stdin.fileno(), b'A') p.wait() # Subsequent invocations should just return the returncode self.assertEqual(p.poll(), 0) def test_wait(self): p = subprocess.Popen([sys.executable, "-c", "pass"]) self.assertEqual(p.wait(), 0) # Subsequent invocations should just return the returncode self.assertEqual(p.wait(), 0) def test_wait_timeout(self): p = subprocess.Popen([sys.executable, "-c", "import time; time.sleep(0.3)"]) with self.assertRaises(subprocess.TimeoutExpired) as c: p.wait(timeout=0.0001) self.assertIn("0.0001", str(c.exception)) # For coverage of __str__. # Some heavily loaded buildbots (sparc Debian 3.x) require this much # time to start. self.assertEqual(p.wait(timeout=3), 0) def test_invalid_bufsize(self): # an invalid type of the bufsize argument should raise # TypeError. with self.assertRaises(TypeError): subprocess.Popen([sys.executable, "-c", "pass"], "orange") def test_bufsize_is_none(self): # bufsize=None should be the same as bufsize=0. p = subprocess.Popen([sys.executable, "-c", "pass"], None) self.assertEqual(p.wait(), 0) # Again with keyword arg p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None) self.assertEqual(p.wait(), 0) def _test_bufsize_equal_one(self, line, expected, universal_newlines): # subprocess may deadlock with bufsize=1, see issue #21332 with subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write(sys.stdin.readline());" "sys.stdout.flush()"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, bufsize=1, universal_newlines=universal_newlines) as p: p.stdin.write(line) # expect that it flushes the line in text mode os.close(p.stdin.fileno()) # close it without flushing the buffer read_line = p.stdout.readline() try: p.stdin.close() except OSError: pass p.stdin = None self.assertEqual(p.returncode, 0) self.assertEqual(read_line, expected) def test_bufsize_equal_one_text_mode(self): # line is flushed in text mode with bufsize=1. # we should get the full line in return line = "line\n" self._test_bufsize_equal_one(line, line, universal_newlines=True) def test_bufsize_equal_one_binary_mode(self): # line is not flushed in binary mode with bufsize=1. # we should get empty response line = b'line' + os.linesep.encode() # assume ascii-based locale self._test_bufsize_equal_one(line, b'', universal_newlines=False) def test_leaking_fds_on_error(self): # see bug #5179: Popen leaks file descriptors to PIPEs if # the child fails to execute; this will eventually exhaust # the maximum number of open fds. 1024 seems a very common # value for that limit, but Windows has 2048, so we loop # 1024 times (each call leaked two fds). for i in range(1024): with self.assertRaises(OSError) as c: subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # ignore errors that indicate the command was not found if c.exception.errno not in (errno.ENOENT, errno.EACCES): raise c.exception @unittest.skipIf(threading is None, "threading required") def test_double_close_on_error(self): # Issue #18851 fds = [] def open_fds(): for i in range(20): fds.extend(os.pipe()) time.sleep(0.001) t = threading.Thread(target=open_fds) t.start() try: with self.assertRaises(EnvironmentError): subprocess.Popen(['nonexisting_i_hope'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) finally: t.join() exc = None for fd in fds: # If a double close occurred, some of those fds will # already have been closed by mistake, and os.close() # here will raise. try: os.close(fd) except OSError as e: exc = e if exc is not None: raise exc @unittest.skipIf(threading is None, "threading required") def test_threadsafe_wait(self): """Issue21291: Popen.wait() needs to be threadsafe for returncode.""" proc = subprocess.Popen([sys.executable, '-c', 'import time; time.sleep(12)']) self.assertEqual(proc.returncode, None) results = [] def kill_proc_timer_thread(): results.append(('thread-start-poll-result', proc.poll())) # terminate it from the thread and wait for the result. proc.kill() proc.wait() results.append(('thread-after-kill-and-wait', proc.returncode)) # this wait should be a no-op given the above. proc.wait() results.append(('thread-after-second-wait', proc.returncode)) # This is a timing sensitive test, the failure mode is # triggered when both the main thread and this thread are in # the wait() call at once. The delay here is to allow the # main thread to most likely be blocked in its wait() call. t = threading.Timer(0.2, kill_proc_timer_thread) t.start() if mswindows: expected_errorcode = 1 else: # Should be -9 because of the proc.kill() from the thread. expected_errorcode = -9 # Wait for the process to finish; the thread should kill it # long before it finishes on its own. Supplying a timeout # triggers a different code path for better coverage. proc.wait(timeout=20) self.assertEqual(proc.returncode, expected_errorcode, msg="unexpected result in wait from main thread") # This should be a no-op with no change in returncode. proc.wait() self.assertEqual(proc.returncode, expected_errorcode, msg="unexpected result in second main wait.") t.join() # Ensure that all of the thread results are as expected. # When a race condition occurs in wait(), the returncode could # be set by the wrong thread that doesn't actually have it # leading to an incorrect value. self.assertEqual([('thread-start-poll-result', None), ('thread-after-kill-and-wait', expected_errorcode), ('thread-after-second-wait', expected_errorcode)], results) def test_issue8780(self): # Ensure that stdout is inherited from the parent # if stdout=PIPE is not used code = ';'.join(( 'import subprocess, sys', 'retcode = subprocess.call(' "[sys.executable, '-c', 'print(\"Hello World!\")'])", 'assert retcode == 0')) output = subprocess.check_output([sys.executable, '-c', code]) self.assertTrue(output.startswith(b'Hello World!'), ascii(output)) def test_handles_closed_on_exception(self): # If CreateProcess exits with an error, ensure the # duplicate output handles are released ifhandle, ifname = tempfile.mkstemp() ofhandle, ofname = tempfile.mkstemp() efhandle, efname = tempfile.mkstemp() try: subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle, stderr=efhandle) except OSError: os.close(ifhandle) os.remove(ifname) os.close(ofhandle) os.remove(ofname) os.close(efhandle) os.remove(efname) self.assertFalse(os.path.exists(ifname)) self.assertFalse(os.path.exists(ofname)) self.assertFalse(os.path.exists(efname)) def test_communicate_epipe(self): # Issue 10963: communicate() should hide EPIPE p = subprocess.Popen([sys.executable, "-c", 'pass'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) p.communicate(b"x" * 2**20) def test_communicate_epipe_only_stdin(self): # Issue 10963: communicate() should hide EPIPE p = subprocess.Popen([sys.executable, "-c", 'pass'], stdin=subprocess.PIPE) self.addCleanup(p.stdin.close) p.wait() p.communicate(b"x" * 2**20) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), "Requires signal.SIGUSR1") @unittest.skipUnless(hasattr(os, 'kill'), "Requires os.kill") @unittest.skipUnless(hasattr(os, 'getppid'), "Requires os.getppid") def test_communicate_eintr(self): # Issue #12493: communicate() should handle EINTR def handler(signum, frame): pass old_handler = signal.signal(signal.SIGUSR1, handler) self.addCleanup(signal.signal, signal.SIGUSR1, old_handler) args = [sys.executable, "-c", 'import os, signal;' 'os.kill(os.getppid(), signal.SIGUSR1)'] for stream in ('stdout', 'stderr'): kw = {stream: subprocess.PIPE} with subprocess.Popen(args, **kw) as process: # communicate() will be interrupted by SIGUSR1 process.communicate() # This test is Linux-ish specific for simplicity to at least have # some coverage. It is not a platform specific bug. @unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()), "Linux specific") def test_failed_child_execute_fd_leak(self): """Test for the fork() failure fd leak reported in issue16327.""" fd_directory = '/proc/%d/fd' % os.getpid() fds_before_popen = os.listdir(fd_directory) with self.assertRaises(PopenTestException): PopenExecuteChildRaises( [sys.executable, '-c', 'pass'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # NOTE: This test doesn't verify that the real _execute_child # does not close the file descriptors itself on the way out # during an exception. Code inspection has confirmed that. fds_after_exception = os.listdir(fd_directory) self.assertEqual(fds_before_popen, fds_after_exception) class RunFuncTestCase(BaseTestCase): def run_python(self, code, **kwargs): """Run Python code in a subprocess using subprocess.run""" argv = [sys.executable, "-c", code] return subprocess.run(argv, **kwargs) def test_returncode(self): # call() function with sequence argument cp = self.run_python("import sys; sys.exit(47)") self.assertEqual(cp.returncode, 47) with self.assertRaises(subprocess.CalledProcessError): cp.check_returncode() def test_check(self): with self.assertRaises(subprocess.CalledProcessError) as c: self.run_python("import sys; sys.exit(47)", check=True) self.assertEqual(c.exception.returncode, 47) def test_check_zero(self): # check_returncode shouldn't raise when returncode is zero cp = self.run_python("import sys; sys.exit(0)", check=True) self.assertEqual(cp.returncode, 0) def test_timeout(self): # run() function with timeout argument; we want to test that the child # process gets killed when the timeout expires. If the child isn't # killed, this call will deadlock since subprocess.run waits for the # child. with self.assertRaises(subprocess.TimeoutExpired): self.run_python("while True: pass", timeout=0.0001) def test_capture_stdout(self): # capture stdout with zero return code cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE) self.assertIn(b'BDFL', cp.stdout) def test_capture_stderr(self): cp = self.run_python("import sys; sys.stderr.write('BDFL')", stderr=subprocess.PIPE) self.assertIn(b'BDFL', cp.stderr) def test_check_output_stdin_arg(self): # run() can be called with stdin set to a file tf = tempfile.TemporaryFile() self.addCleanup(tf.close) tf.write(b'pear') tf.seek(0) cp = self.run_python( "import sys; sys.stdout.write(sys.stdin.read().upper())", stdin=tf, stdout=subprocess.PIPE) self.assertIn(b'PEAR', cp.stdout) def test_check_output_input_arg(self): # check_output() can be called with input set to a string cp = self.run_python( "import sys; sys.stdout.write(sys.stdin.read().upper())", input=b'pear', stdout=subprocess.PIPE) self.assertIn(b'PEAR', cp.stdout) def test_check_output_stdin_with_input_arg(self): # run() refuses to accept 'stdin' with 'input' tf = tempfile.TemporaryFile() self.addCleanup(tf.close) tf.write(b'pear') tf.seek(0) with self.assertRaises(ValueError, msg="Expected ValueError when stdin and input args supplied.") as c: output = self.run_python("print('will not be run')", stdin=tf, input=b'hare') self.assertIn('stdin', c.exception.args[0]) self.assertIn('input', c.exception.args[0]) def test_check_output_timeout(self): with self.assertRaises(subprocess.TimeoutExpired) as c: cp = self.run_python(( "import sys, time\n" "sys.stdout.write('BDFL')\n" "sys.stdout.flush()\n" "time.sleep(3600)"), # Some heavily loaded buildbots (sparc Debian 3.x) require # this much time to start and print. timeout=3, stdout=subprocess.PIPE) self.assertEqual(c.exception.output, b'BDFL') # output is aliased to stdout self.assertEqual(c.exception.stdout, b'BDFL') def test_run_kwargs(self): newenv = os.environ.copy() newenv["FRUIT"] = "banana" cp = self.run_python(('import sys, os;' 'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'), env=newenv) self.assertEqual(cp.returncode, 33) @unittest.skipIf(mswindows, "POSIX specific tests") class POSIXProcessTestCase(BaseTestCase): def setUp(self): super().setUp() self._nonexistent_dir = "/_this/pa.th/does/not/exist" def _get_chdir_exception(self): try: os.chdir(self._nonexistent_dir) except OSError as e: # This avoids hard coding the errno value or the OS perror() # string and instead capture the exception that we want to see # below for comparison. desired_exception = e desired_exception.strerror += ': ' + repr(self._nonexistent_dir) else: self.fail("chdir to nonexistent directory %s succeeded." % self._nonexistent_dir) return desired_exception def test_exception_cwd(self): """Test error in the child raised in the parent for a bad cwd.""" desired_exception = self._get_chdir_exception() try: p = subprocess.Popen([sys.executable, "-c", ""], cwd=self._nonexistent_dir) except OSError as e: # Test that the child process chdir failure actually makes # it up to the parent process as the correct exception. self.assertEqual(desired_exception.errno, e.errno) self.assertEqual(desired_exception.strerror, e.strerror) else: self.fail("Expected OSError: %s" % desired_exception) def test_exception_bad_executable(self): """Test error in the child raised in the parent for a bad executable.""" desired_exception = self._get_chdir_exception() try: p = subprocess.Popen([sys.executable, "-c", ""], executable=self._nonexistent_dir) except OSError as e: # Test that the child process exec failure actually makes # it up to the parent process as the correct exception. self.assertEqual(desired_exception.errno, e.errno) self.assertEqual(desired_exception.strerror, e.strerror) else: self.fail("Expected OSError: %s" % desired_exception) def test_exception_bad_args_0(self): """Test error in the child raised in the parent for a bad args[0].""" desired_exception = self._get_chdir_exception() try: p = subprocess.Popen([self._nonexistent_dir, "-c", ""]) except OSError as e: # Test that the child process exec failure actually makes # it up to the parent process as the correct exception. self.assertEqual(desired_exception.errno, e.errno) self.assertEqual(desired_exception.strerror, e.strerror) else: self.fail("Expected OSError: %s" % desired_exception) def test_restore_signals(self): # Code coverage for both values of restore_signals to make sure it # at least does not blow up. # A test for behavior would be complex. Contributions welcome. subprocess.call([sys.executable, "-c", ""], restore_signals=True) subprocess.call([sys.executable, "-c", ""], restore_signals=False) def test_start_new_session(self): # For code coverage of calling setsid(). We don't care if we get an # EPERM error from it depending on the test execution environment, that # still indicates that it was called. try: output = subprocess.check_output( [sys.executable, "-c", "import os; print(os.getpgid(os.getpid()))"], start_new_session=True) except OSError as e: if e.errno != errno.EPERM: raise else: parent_pgid = os.getpgid(os.getpid()) child_pgid = int(output) self.assertNotEqual(parent_pgid, child_pgid) def test_run_abort(self): # returncode handles signal termination with support.SuppressCrashReport(): p = subprocess.Popen([sys.executable, "-c", 'import os; os.abort()']) p.wait() self.assertEqual(-p.returncode, signal.SIGABRT) def test_CalledProcessError_str_signal(self): err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd") error_string = str(err) # We're relying on the repr() of the signal.Signals intenum to provide # the word signal, the signal name and the numeric value. self.assertIn("signal", error_string.lower()) # We're not being specific about the signal name as some signals have # multiple names and which name is revealed can vary. self.assertIn("SIG", error_string) self.assertIn(str(signal.SIGABRT), error_string) def test_CalledProcessError_str_unknown_signal(self): err = subprocess.CalledProcessError(-9876543, "fake cmd") error_string = str(err) self.assertIn("unknown signal 9876543.", error_string) def test_CalledProcessError_str_non_zero(self): err = subprocess.CalledProcessError(2, "fake cmd") error_string = str(err) self.assertIn("non-zero exit status 2.", error_string) def test_preexec(self): # DISCLAIMER: Setting environment variables is *not* a good use # of a preexec_fn. This is merely a test. p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(os.getenv("FRUIT"))'], stdout=subprocess.PIPE, preexec_fn=lambda: os.putenv("FRUIT", "apple")) with p: self.assertEqual(p.stdout.read(), b"apple") def test_preexec_exception(self): def raise_it(): raise ValueError("What if two swallows carried a coconut?") try: p = subprocess.Popen([sys.executable, "-c", ""], preexec_fn=raise_it) except subprocess.SubprocessError as e: self.assertTrue( subprocess._posixsubprocess, "Expected a ValueError from the preexec_fn") except ValueError as e: self.assertIn("coconut", e.args[0]) else: self.fail("Exception raised by preexec_fn did not make it " "to the parent process.") class _TestExecuteChildPopen(subprocess.Popen): """Used to test behavior at the end of _execute_child.""" def __init__(self, testcase, *args, **kwargs): self._testcase = testcase subprocess.Popen.__init__(self, *args, **kwargs) def _execute_child(self, *args, **kwargs): try: subprocess.Popen._execute_child(self, *args, **kwargs) finally: # Open a bunch of file descriptors and verify that # none of them are the same as the ones the Popen # instance is using for stdin/stdout/stderr. devzero_fds = [os.open("/dev/zero", os.O_RDONLY) for _ in range(8)] try: for fd in devzero_fds: self._testcase.assertNotIn( fd, (self.stdin.fileno(), self.stdout.fileno(), self.stderr.fileno()), msg="At least one fd was closed early.") finally: for fd in devzero_fds: os.close(fd) @unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.") def test_preexec_errpipe_does_not_double_close_pipes(self): """Issue16140: Don't double close pipes on preexec error.""" def raise_it(): raise subprocess.SubprocessError( "force the _execute_child() errpipe_data path.") with self.assertRaises(subprocess.SubprocessError): self._TestExecuteChildPopen( self, [sys.executable, "-c", "pass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=raise_it) def test_preexec_gc_module_failure(self): # This tests the code that disables garbage collection if the child # process will execute any Python. def raise_runtime_error(): raise RuntimeError("this shouldn't escape") enabled = gc.isenabled() orig_gc_disable = gc.disable orig_gc_isenabled = gc.isenabled try: gc.disable() self.assertFalse(gc.isenabled()) subprocess.call([sys.executable, '-c', ''], preexec_fn=lambda: None) self.assertFalse(gc.isenabled(), "Popen enabled gc when it shouldn't.") gc.enable() self.assertTrue(gc.isenabled()) subprocess.call([sys.executable, '-c', ''], preexec_fn=lambda: None) self.assertTrue(gc.isenabled(), "Popen left gc disabled.") gc.disable = raise_runtime_error self.assertRaises(RuntimeError, subprocess.Popen, [sys.executable, '-c', ''], preexec_fn=lambda: None) del gc.isenabled # force an AttributeError self.assertRaises(AttributeError, subprocess.Popen, [sys.executable, '-c', ''], preexec_fn=lambda: None) finally: gc.disable = orig_gc_disable gc.isenabled = orig_gc_isenabled if not enabled: gc.disable() @unittest.skipIf( sys.platform == 'darwin', 'setrlimit() seems to fail on OS X') def test_preexec_fork_failure(self): # The internal code did not preserve the previous exception when # re-enabling garbage collection try: from resource import getrlimit, setrlimit, RLIMIT_NPROC except ImportError as err: self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD limits = getrlimit(RLIMIT_NPROC) [_, hard] = limits setrlimit(RLIMIT_NPROC, (0, hard)) self.addCleanup(setrlimit, RLIMIT_NPROC, limits) try: subprocess.call([sys.executable, '-c', ''], preexec_fn=lambda: None) except BlockingIOError: # Forking should raise EAGAIN, translated to BlockingIOError pass else: self.skipTest('RLIMIT_NPROC had no effect; probably superuser') def test_args_string(self): # args is a string fd, fname = tempfile.mkstemp() # reopen in text mode with open(fd, "w", errors="surrogateescape") as fobj: fobj.write("#!%s\n" % support.unix_shell) fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" % sys.executable) os.chmod(fname, 0o700) p = subprocess.Popen(fname) p.wait() os.remove(fname) self.assertEqual(p.returncode, 47) def test_invalid_args(self): # invalid arguments should raise ValueError self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], startupinfo=47) self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], creationflags=47) def test_shell_sequence(self): # Run command through the shell (sequence) newenv = os.environ.copy() newenv["FRUIT"] = "apple" p = subprocess.Popen(["echo $FRUIT"], shell=1, stdout=subprocess.PIPE, env=newenv) with p: self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple") def test_shell_string(self): # Run command through the shell (string) newenv = os.environ.copy() newenv["FRUIT"] = "apple" p = subprocess.Popen("echo $FRUIT", shell=1, stdout=subprocess.PIPE, env=newenv) with p: self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple") def test_call_string(self): # call() function with string argument on UNIX fd, fname = tempfile.mkstemp() # reopen in text mode with open(fd, "w", errors="surrogateescape") as fobj: fobj.write("#!%s\n" % support.unix_shell) fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" % sys.executable) os.chmod(fname, 0o700) rc = subprocess.call(fname) os.remove(fname) self.assertEqual(rc, 47) def test_specific_shell(self): # Issue #9265: Incorrect name passed as arg[0]. shells = [] for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']: for name in ['bash', 'ksh']: sh = os.path.join(prefix, name) if os.path.isfile(sh): shells.append(sh) if not shells: # Will probably work for any shell but csh. self.skipTest("bash or ksh required for this test") sh = '/bin/sh' if os.path.isfile(sh) and not os.path.islink(sh): # Test will fail if /bin/sh is a symlink to csh. shells.append(sh) for sh in shells: p = subprocess.Popen("echo $0", executable=sh, shell=True, stdout=subprocess.PIPE) with p: self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii')) def _kill_process(self, method, *args): # Do not inherit file handles from the parent. # It should fix failures on some platforms. # Also set the SIGINT handler to the default to make sure it's not # being ignored (some tests rely on that.) old_handler = signal.signal(signal.SIGINT, signal.default_int_handler) try: p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() time.sleep(30) """], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) finally: signal.signal(signal.SIGINT, old_handler) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) getattr(p, method)(*args) return p @unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')), "Due to known OS bug (issue #16762)") def _kill_dead_process(self, method, *args): # Do not inherit file handles from the parent. # It should fix failures on some platforms. p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() """], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) # The process should end after this time.sleep(1) # This shouldn't raise even though the child is now dead getattr(p, method)(*args) p.communicate() def test_send_signal(self): p = self._kill_process('send_signal', signal.SIGINT) _, stderr = p.communicate() self.assertIn(b'KeyboardInterrupt', stderr) self.assertNotEqual(p.wait(), 0) def test_kill(self): p = self._kill_process('kill') _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') self.assertEqual(p.wait(), -signal.SIGKILL) def test_terminate(self): p = self._kill_process('terminate') _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') self.assertEqual(p.wait(), -signal.SIGTERM) def test_send_signal_dead(self): # Sending a signal to a dead process self._kill_dead_process('send_signal', signal.SIGINT) def test_kill_dead(self): # Killing a dead process self._kill_dead_process('kill') def test_terminate_dead(self): # Terminating a dead process self._kill_dead_process('terminate') def _save_fds(self, save_fds): fds = [] for fd in save_fds: inheritable = os.get_inheritable(fd) saved = os.dup(fd) fds.append((fd, saved, inheritable)) return fds def _restore_fds(self, fds): for fd, saved, inheritable in fds: os.dup2(saved, fd, inheritable=inheritable) os.close(saved) def check_close_std_fds(self, fds): # Issue #9905: test that subprocess pipes still work properly with # some standard fds closed stdin = 0 saved_fds = self._save_fds(fds) for fd, saved, inheritable in saved_fds: if fd == 0: stdin = saved break try: for fd in fds: os.close(fd) out, err = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() err = support.strip_python_stderr(err) self.assertEqual((out, err), (b'apple', b'orange')) finally: self._restore_fds(saved_fds) def test_close_fd_0(self): self.check_close_std_fds([0]) def test_close_fd_1(self): self.check_close_std_fds([1]) def test_close_fd_2(self): self.check_close_std_fds([2]) def test_close_fds_0_1(self): self.check_close_std_fds([0, 1]) def test_close_fds_0_2(self): self.check_close_std_fds([0, 2]) def test_close_fds_1_2(self): self.check_close_std_fds([1, 2]) def test_close_fds_0_1_2(self): # Issue #10806: test that subprocess pipes still work properly with # all standard fds closed. self.check_close_std_fds([0, 1, 2]) def test_small_errpipe_write_fd(self): """Issue #15798: Popen should work when stdio fds are available.""" new_stdin = os.dup(0) new_stdout = os.dup(1) try: os.close(0) os.close(1) # Side test: if errpipe_write fails to have its CLOEXEC # flag set this should cause the parent to think the exec # failed. Extremely unlikely: everyone supports CLOEXEC. subprocess.Popen([ sys.executable, "-c", "print('AssertionError:0:CLOEXEC failure.')"]).wait() finally: # Restore original stdin and stdout os.dup2(new_stdin, 0) os.dup2(new_stdout, 1) os.close(new_stdin) os.close(new_stdout) def test_remapping_std_fds(self): # open up some temporary files temps = [tempfile.mkstemp() for i in range(3)] try: temp_fds = [fd for fd, fname in temps] # unlink the files -- we won't need to reopen them for fd, fname in temps: os.unlink(fname) # write some data to what will become stdin, and rewind os.write(temp_fds[1], b"STDIN") os.lseek(temp_fds[1], 0, 0) # move the standard file descriptors out of the way saved_fds = self._save_fds(range(3)) try: # duplicate the file objects over the standard fd's for fd, temp_fd in enumerate(temp_fds): os.dup2(temp_fd, fd) # now use those files in the "wrong" order, so that subprocess # has to rearrange them in the child p = subprocess.Popen([sys.executable, "-c", 'import sys; got = sys.stdin.read();' 'sys.stdout.write("got %s"%got); sys.stderr.write("err")'], stdin=temp_fds[1], stdout=temp_fds[2], stderr=temp_fds[0]) p.wait() finally: self._restore_fds(saved_fds) for fd in temp_fds: os.lseek(fd, 0, 0) out = os.read(temp_fds[2], 1024) err = support.strip_python_stderr(os.read(temp_fds[0], 1024)) self.assertEqual(out, b"got STDIN") self.assertEqual(err, b"err") finally: for fd in temp_fds: os.close(fd) def check_swap_fds(self, stdin_no, stdout_no, stderr_no): # open up some temporary files temps = [tempfile.mkstemp() for i in range(3)] temp_fds = [fd for fd, fname in temps] try: # unlink the files -- we won't need to reopen them for fd, fname in temps: os.unlink(fname) # save a copy of the standard file descriptors saved_fds = self._save_fds(range(3)) try: # duplicate the temp files over the standard fd's 0, 1, 2 for fd, temp_fd in enumerate(temp_fds): os.dup2(temp_fd, fd) # write some data to what will become stdin, and rewind os.write(stdin_no, b"STDIN") os.lseek(stdin_no, 0, 0) # now use those files in the given order, so that subprocess # has to rearrange them in the child p = subprocess.Popen([sys.executable, "-c", 'import sys; got = sys.stdin.read();' 'sys.stdout.write("got %s"%got); sys.stderr.write("err")'], stdin=stdin_no, stdout=stdout_no, stderr=stderr_no) p.wait() for fd in temp_fds: os.lseek(fd, 0, 0) out = os.read(stdout_no, 1024) err = support.strip_python_stderr(os.read(stderr_no, 1024)) finally: self._restore_fds(saved_fds) self.assertEqual(out, b"got STDIN") self.assertEqual(err, b"err") finally: for fd in temp_fds: os.close(fd) # When duping fds, if there arises a situation where one of the fds is # either 0, 1 or 2, it is possible that it is overwritten (#12607). # This tests all combinations of this. def test_swap_fds(self): self.check_swap_fds(0, 1, 2) self.check_swap_fds(0, 2, 1) self.check_swap_fds(1, 0, 2) self.check_swap_fds(1, 2, 0) self.check_swap_fds(2, 0, 1) self.check_swap_fds(2, 1, 0) def test_surrogates_error_message(self): def prepare(): raise ValueError("surrogate:\uDCff") try: subprocess.call( [sys.executable, "-c", "pass"], preexec_fn=prepare) except ValueError as err: # Pure Python implementations keeps the message self.assertIsNone(subprocess._posixsubprocess) self.assertEqual(str(err), "surrogate:\uDCff") except subprocess.SubprocessError as err: # _posixsubprocess uses a default message self.assertIsNotNone(subprocess._posixsubprocess) self.assertEqual(str(err), "Exception occurred in preexec_fn.") else: self.fail("Expected ValueError or subprocess.SubprocessError") def test_undecodable_env(self): for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')): encoded_value = value.encode("ascii", "surrogateescape") # test str with surrogates script = "import os; print(ascii(os.getenv(%s)))" % repr(key) env = os.environ.copy() env[key] = value # Use C locale to get ASCII for the locale encoding to force # surrogate-escaping of \xFF in the child process; otherwise it can # be decoded as-is if the default locale is latin-1. env['LC_ALL'] = 'C' if sys.platform.startswith("aix"): # On AIX, the C locale uses the Latin1 encoding decoded_value = encoded_value.decode("latin1", "surrogateescape") else: # On other UNIXes, the C locale uses the ASCII encoding decoded_value = value stdout = subprocess.check_output( [sys.executable, "-c", script], env=env) stdout = stdout.rstrip(b'\n\r') self.assertEqual(stdout.decode('ascii'), ascii(decoded_value)) # test bytes key = key.encode("ascii", "surrogateescape") script = "import os; print(ascii(os.getenvb(%s)))" % repr(key) env = os.environ.copy() env[key] = encoded_value stdout = subprocess.check_output( [sys.executable, "-c", script], env=env) stdout = stdout.rstrip(b'\n\r') self.assertEqual(stdout.decode('ascii'), ascii(encoded_value)) def test_bytes_program(self): abs_program = os.fsencode(sys.executable) path, program = os.path.split(sys.executable) program = os.fsencode(program) # absolute bytes path exitcode = subprocess.call([abs_program, "-c", "pass"]) self.assertEqual(exitcode, 0) # absolute bytes path as a string cmd = b"'" + abs_program + b"' -c pass" exitcode = subprocess.call(cmd, shell=True) self.assertEqual(exitcode, 0) # bytes program, unicode PATH env = os.environ.copy() env["PATH"] = path exitcode = subprocess.call([program, "-c", "pass"], env=env) self.assertEqual(exitcode, 0) # bytes program, bytes PATH envb = os.environb.copy() envb[b"PATH"] = os.fsencode(path) exitcode = subprocess.call([program, "-c", "pass"], env=envb) self.assertEqual(exitcode, 0) def test_pipe_cloexec(self): sleeper = support.findfile("input_reader.py", subdir="subprocessdata") fd_status = support.findfile("fd_status.py", subdir="subprocessdata") p1 = subprocess.Popen([sys.executable, sleeper], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) self.addCleanup(p1.communicate, b'') p2 = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=False) output, error = p2.communicate() result_fds = set(map(int, output.split(b','))) unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(), p1.stderr.fileno()]) self.assertFalse(result_fds & unwanted_fds, "Expected no fds from %r to be open in child, " "found %r" % (unwanted_fds, result_fds & unwanted_fds)) def test_pipe_cloexec_real_tools(self): qcat = support.findfile("qcat.py", subdir="subprocessdata") qgrep = support.findfile("qgrep.py", subdir="subprocessdata") subdata = b'zxcvbn' data = subdata * 4 + b'\n' p1 = subprocess.Popen([sys.executable, qcat], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=False) p2 = subprocess.Popen([sys.executable, qgrep, subdata], stdin=p1.stdout, stdout=subprocess.PIPE, close_fds=False) self.addCleanup(p1.wait) self.addCleanup(p2.wait) def kill_p1(): try: p1.terminate() except ProcessLookupError: pass def kill_p2(): try: p2.terminate() except ProcessLookupError: pass self.addCleanup(kill_p1) self.addCleanup(kill_p2) p1.stdin.write(data) p1.stdin.close() readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10) self.assertTrue(readfiles, "The child hung") self.assertEqual(p2.stdout.read(), data) p1.stdout.close() p2.stdout.close() def test_close_fds(self): fd_status = support.findfile("fd_status.py", subdir="subprocessdata") fds = os.pipe() self.addCleanup(os.close, fds[0]) self.addCleanup(os.close, fds[1]) open_fds = set(fds) # add a bunch more fds for _ in range(9): fd = os.open(os.devnull, os.O_RDONLY) self.addCleanup(os.close, fd) open_fds.add(fd) for fd in open_fds: os.set_inheritable(fd, True) p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=False) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertEqual(remaining_fds & open_fds, open_fds, "Some fds were closed") p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertFalse(remaining_fds & open_fds, "Some fds were left open") self.assertIn(1, remaining_fds, "Subprocess failed") # Keep some of the fd's we opened open in the subprocess. # This tests _posixsubprocess.c's proper handling of fds_to_keep. fds_to_keep = set(open_fds.pop() for _ in range(8)) p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True, pass_fds=()) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertFalse(remaining_fds & fds_to_keep & open_fds, "Some fds not in pass_fds were left open") self.assertIn(1, remaining_fds, "Subprocess failed") @unittest.skipIf(sys.platform.startswith("freebsd") and os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev, "Requires fdescfs mounted on /dev/fd on FreeBSD.") def test_close_fds_when_max_fd_is_lowered(self): """Confirm that issue21618 is fixed (may fail under valgrind).""" fd_status = support.findfile("fd_status.py", subdir="subprocessdata") # This launches the meat of the test in a child process to # avoid messing with the larger unittest processes maximum # number of file descriptors. # This process launches: # +--> Process that lowers its RLIMIT_NOFILE aftr setting up # a bunch of high open fds above the new lower rlimit. # Those are reported via stdout before launching a new # process with close_fds=False to run the actual test: # +--> The TEST: This one launches a fd_status.py # subprocess with close_fds=True so we can find out if # any of the fds above the lowered rlimit are still open. p = subprocess.Popen([sys.executable, '-c', textwrap.dedent( ''' import os, resource, subprocess, sys, textwrap open_fds = set() # Add a bunch more fds to pass down. for _ in range(40): fd = os.open(os.devnull, os.O_RDONLY) open_fds.add(fd) # Leave a two pairs of low ones available for use by the # internal child error pipe and the stdout pipe. # We also leave 10 more open as some Python buildbots run into # "too many open files" errors during the test if we do not. for fd in sorted(open_fds)[:14]: os.close(fd) open_fds.remove(fd) for fd in open_fds: #self.addCleanup(os.close, fd) os.set_inheritable(fd, True) max_fd_open = max(open_fds) # Communicate the open_fds to the parent unittest.TestCase process. print(','.join(map(str, sorted(open_fds)))) sys.stdout.flush() rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE) try: # 29 is lower than the highest fds we are leaving open. resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max)) # Launch a new Python interpreter with our low fd rlim_cur that # inherits open fds above that limit. It then uses subprocess # with close_fds=True to get a report of open fds in the child. # An explicit list of fds to check is passed to fd_status.py as # letting fd_status rely on its default logic would miss the # fds above rlim_cur as it normally only checks up to that limit. subprocess.Popen( [sys.executable, '-c', textwrap.dedent(""" import subprocess, sys subprocess.Popen([sys.executable, %r] + [str(x) for x in range({max_fd})], close_fds=True).wait() """.format(max_fd=max_fd_open+1))], close_fds=False).wait() finally: resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max)) ''' % fd_status)], stdout=subprocess.PIPE) output, unused_stderr = p.communicate() output_lines = output.splitlines() self.assertEqual(len(output_lines), 2, msg="expected exactly two lines of output:\n%r" % output) opened_fds = set(map(int, output_lines[0].strip().split(b','))) remaining_fds = set(map(int, output_lines[1].strip().split(b','))) self.assertFalse(remaining_fds & opened_fds, msg="Some fds were left open.") # Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file # descriptor of a pipe closed in the parent process is valid in the # child process according to fstat(), but the mode of the file # descriptor is invalid, and read or write raise an error. @support.requires_mac_ver(10, 5) def test_pass_fds(self): fd_status = support.findfile("fd_status.py", subdir="subprocessdata") open_fds = set() for x in range(5): fds = os.pipe() self.addCleanup(os.close, fds[0]) self.addCleanup(os.close, fds[1]) os.set_inheritable(fds[0], True) os.set_inheritable(fds[1], True) open_fds.update(fds) for fd in open_fds: p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True, pass_fds=(fd, )) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) to_be_closed = open_fds - {fd} self.assertIn(fd, remaining_fds, "fd to be passed not passed") self.assertFalse(remaining_fds & to_be_closed, "fd to be closed passed") # pass_fds overrides close_fds with a warning. with self.assertWarns(RuntimeWarning) as context: self.assertFalse(subprocess.call( [sys.executable, "-c", "import sys; sys.exit(0)"], close_fds=False, pass_fds=(fd, ))) self.assertIn('overriding close_fds', str(context.warning)) def test_pass_fds_inheritable(self): script = support.findfile("fd_status.py", subdir="subprocessdata") inheritable, non_inheritable = os.pipe() self.addCleanup(os.close, inheritable) self.addCleanup(os.close, non_inheritable) os.set_inheritable(inheritable, True) os.set_inheritable(non_inheritable, False) pass_fds = (inheritable, non_inheritable) args = [sys.executable, script] args += list(map(str, pass_fds)) p = subprocess.Popen(args, stdout=subprocess.PIPE, close_fds=True, pass_fds=pass_fds) output, ignored = p.communicate() fds = set(map(int, output.split(b','))) # the inheritable file descriptor must be inherited, so its inheritable # flag must be set in the child process after fork() and before exec() self.assertEqual(fds, set(pass_fds), "output=%a" % output) # inheritable flag must not be changed in the parent process self.assertEqual(os.get_inheritable(inheritable), True) self.assertEqual(os.get_inheritable(non_inheritable), False) def test_stdout_stdin_are_single_inout_fd(self): with io.open(os.devnull, "r+") as inout: p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stdout=inout, stdin=inout) p.wait() def test_stdout_stderr_are_single_inout_fd(self): with io.open(os.devnull, "r+") as inout: p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stdout=inout, stderr=inout) p.wait() def test_stderr_stdin_are_single_inout_fd(self): with io.open(os.devnull, "r+") as inout: p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"], stderr=inout, stdin=inout) p.wait() def test_wait_when_sigchild_ignored(self): # NOTE: sigchild_ignore.py may not be an effective test on all OSes. sigchild_ignore = support.findfile("sigchild_ignore.py", subdir="subprocessdata") p = subprocess.Popen([sys.executable, sigchild_ignore], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() self.assertEqual(0, p.returncode, "sigchild_ignore.py exited" " non-zero with this error:\n%s" % stderr.decode('utf-8')) def test_select_unbuffered(self): # Issue #11459: bufsize=0 should really set the pipes as # unbuffered (and therefore let select() work properly). select = support.import_module("select") p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple")'], stdout=subprocess.PIPE, bufsize=0) f = p.stdout self.addCleanup(f.close) try: self.assertEqual(f.read(4), b"appl") self.assertIn(f, select.select([f], [], [], 0.0)[0]) finally: p.wait() def test_zombie_fast_process_del(self): # Issue #12650: on Unix, if Popen.__del__() was called before the # process exited, it wouldn't be added to subprocess._active, and would # remain a zombie. # spawn a Popen, and delete its reference before it exits p = subprocess.Popen([sys.executable, "-c", 'import sys, time;' 'time.sleep(0.2)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) ident = id(p) pid = p.pid with support.check_warnings(('', ResourceWarning)): p = None # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) def test_leak_fast_process_del_killed(self): # Issue #12650: on Unix, if Popen.__del__() was called before the # process exited, and the process got killed by a signal, it would never # be removed from subprocess._active, which triggered a FD and memory # leak. # spawn a Popen, delete its reference and kill it p = subprocess.Popen([sys.executable, "-c", 'import time;' 'time.sleep(3)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) ident = id(p) pid = p.pid with support.check_warnings(('', ResourceWarning)): p = None os.kill(pid, signal.SIGKILL) # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) # let some time for the process to exit, and create a new Popen: this # should trigger the wait() of p time.sleep(0.2) with self.assertRaises(OSError) as c: with subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: pass # p should have been wait()ed on, and removed from the _active list self.assertRaises(OSError, os.waitpid, pid, 0) self.assertNotIn(ident, [id(o) for o in subprocess._active]) def test_close_fds_after_preexec(self): fd_status = support.findfile("fd_status.py", subdir="subprocessdata") # this FD is used as dup2() target by preexec_fn, and should be closed # in the child process fd = os.dup(1) self.addCleanup(os.close, fd) p = subprocess.Popen([sys.executable, fd_status], stdout=subprocess.PIPE, close_fds=True, preexec_fn=lambda: os.dup2(1, fd)) output, ignored = p.communicate() remaining_fds = set(map(int, output.split(b','))) self.assertNotIn(fd, remaining_fds) @support.cpython_only def test_fork_exec(self): # Issue #22290: fork_exec() must not crash on memory allocation failure # or other errors import _posixsubprocess gc_enabled = gc.isenabled() try: # Use a preexec function and enable the garbage collector # to force fork_exec() to re-enable the garbage collector # on error. func = lambda: None gc.enable() for args, exe_list, cwd, env_list in ( (123, [b"exe"], None, [b"env"]), ([b"arg"], 123, None, [b"env"]), ([b"arg"], [b"exe"], 123, [b"env"]), ([b"arg"], [b"exe"], None, 123), ): with self.assertRaises(TypeError): _posixsubprocess.fork_exec( args, exe_list, True, [], cwd, env_list, -1, -1, -1, -1, 1, 2, 3, 4, True, True, func) finally: if not gc_enabled: gc.disable() @support.cpython_only def test_fork_exec_sorted_fd_sanity_check(self): # Issue #23564: sanity check the fork_exec() fds_to_keep sanity check. import _posixsubprocess gc_enabled = gc.isenabled() try: gc.enable() for fds_to_keep in ( (-1, 2, 3, 4, 5), # Negative number. ('str', 4), # Not an int. (18, 23, 42, 2**63), # Out of range. (5, 4), # Not sorted. (6, 7, 7, 8), # Duplicate. ): with self.assertRaises( ValueError, msg='fds_to_keep={}'.format(fds_to_keep)) as c: _posixsubprocess.fork_exec( [b"false"], [b"false"], True, fds_to_keep, None, [b"env"], -1, -1, -1, -1, 1, 2, 3, 4, True, True, None) self.assertIn('fds_to_keep', str(c.exception)) finally: if not gc_enabled: gc.disable() def test_communicate_BrokenPipeError_stdin_close(self): # By not setting stdout or stderr or a timeout we force the fast path # that just calls _stdin_write() internally due to our mock. proc = subprocess.Popen([sys.executable, '-c', 'pass']) with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin: mock_proc_stdin.close.side_effect = BrokenPipeError proc.communicate() # Should swallow BrokenPipeError from close. mock_proc_stdin.close.assert_called_with() def test_communicate_BrokenPipeError_stdin_write(self): # By not setting stdout or stderr or a timeout we force the fast path # that just calls _stdin_write() internally due to our mock. proc = subprocess.Popen([sys.executable, '-c', 'pass']) with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin: mock_proc_stdin.write.side_effect = BrokenPipeError proc.communicate(b'stuff') # Should swallow the BrokenPipeError. mock_proc_stdin.write.assert_called_once_with(b'stuff') mock_proc_stdin.close.assert_called_once_with() def test_communicate_BrokenPipeError_stdin_flush(self): # Setting stdin and stdout forces the ._communicate() code path. # python -h exits faster than python -c pass (but spams stdout). proc = subprocess.Popen([sys.executable, '-h'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \ open(os.devnull, 'wb') as dev_null: mock_proc_stdin.flush.side_effect = BrokenPipeError # because _communicate registers a selector using proc.stdin... mock_proc_stdin.fileno.return_value = dev_null.fileno() # _communicate() should swallow BrokenPipeError from flush. proc.communicate(b'stuff') mock_proc_stdin.flush.assert_called_once_with() def test_communicate_BrokenPipeError_stdin_close_with_timeout(self): # Setting stdin and stdout forces the ._communicate() code path. # python -h exits faster than python -c pass (but spams stdout). proc = subprocess.Popen([sys.executable, '-h'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin: mock_proc_stdin.close.side_effect = BrokenPipeError # _communicate() should swallow BrokenPipeError from close. proc.communicate(timeout=999) mock_proc_stdin.close.assert_called_once_with() @unittest.skipUnless(mswindows, "Windows specific tests") class Win32ProcessTestCase(BaseTestCase): def test_startupinfo(self): # startupinfo argument # We uses hardcoded constants, because we do not want to # depend on win32all. STARTF_USESHOWWINDOW = 1 SW_MAXIMIZE = 3 startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = STARTF_USESHOWWINDOW startupinfo.wShowWindow = SW_MAXIMIZE # Since Python is a console process, it won't be affected # by wShowWindow, but the argument should be silently # ignored subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"], startupinfo=startupinfo) def test_creationflags(self): # creationflags argument CREATE_NEW_CONSOLE = 16 sys.stderr.write(" a DOS box should flash briefly ...\n") subprocess.call(sys.executable + ' -c "import time; time.sleep(0.25)"', creationflags=CREATE_NEW_CONSOLE) def test_invalid_args(self): # invalid arguments should raise ValueError self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], preexec_fn=lambda: 1) self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], stdout=subprocess.PIPE, close_fds=True) def test_close_fds(self): # close file descriptors rc = subprocess.call([sys.executable, "-c", "import sys; sys.exit(47)"], close_fds=True) self.assertEqual(rc, 47) def test_shell_sequence(self): # Run command through the shell (sequence) newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen(["set"], shell=1, stdout=subprocess.PIPE, env=newenv) with p: self.assertIn(b"physalis", p.stdout.read()) def test_shell_string(self): # Run command through the shell (string) newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen("set", shell=1, stdout=subprocess.PIPE, env=newenv) with p: self.assertIn(b"physalis", p.stdout.read()) def test_shell_encodings(self): # Run command through the shell (string) for enc in ['ansi', 'oem']: newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen("set", shell=1, stdout=subprocess.PIPE, env=newenv, encoding=enc) with p: self.assertIn("physalis", p.stdout.read(), enc) def test_call_string(self): # call() function with string argument on Windows rc = subprocess.call(sys.executable + ' -c "import sys; sys.exit(47)"') self.assertEqual(rc, 47) def _kill_process(self, method, *args): # Some win32 buildbot raises EOFError if stdin is inherited p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() time.sleep(30) """], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) with p: # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) getattr(p, method)(*args) _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') returncode = p.wait() self.assertNotEqual(returncode, 0) def _kill_dead_process(self, method, *args): p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() sys.exit(42) """], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) with p: # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) # The process should end after this time.sleep(1) # This shouldn't raise even though the child is now dead getattr(p, method)(*args) _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') rc = p.wait() self.assertEqual(rc, 42) def test_send_signal(self): self._kill_process('send_signal', signal.SIGTERM) def test_kill(self): self._kill_process('kill') def test_terminate(self): self._kill_process('terminate') def test_send_signal_dead(self): self._kill_dead_process('send_signal', signal.SIGTERM) def test_kill_dead(self): self._kill_dead_process('kill') def test_terminate_dead(self): self._kill_dead_process('terminate') class MiscTests(unittest.TestCase): def test_getoutput(self): self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy') self.assertEqual(subprocess.getstatusoutput('echo xyzzy'), (0, 'xyzzy')) # we use mkdtemp in the next line to create an empty directory # under our exclusive control; from that, we can invent a pathname # that we _know_ won't exist. This is guaranteed to fail. dir = None try: dir = tempfile.mkdtemp() name = os.path.join(dir, "foo") status, output = subprocess.getstatusoutput( ("type " if mswindows else "cat ") + name) self.assertNotEqual(status, 0) finally: if dir is not None: os.rmdir(dir) def test__all__(self): """Ensure that __all__ is populated properly.""" intentionally_excluded = {"list2cmdline", "Handle"} exported = set(subprocess.__all__) possible_exports = set() import types for name, value in subprocess.__dict__.items(): if name.startswith('_'): continue if isinstance(value, (types.ModuleType,)): continue possible_exports.add(name) self.assertEqual(exported, possible_exports - intentionally_excluded) @unittest.skipUnless(hasattr(selectors, 'PollSelector'), "Test needs selectors.PollSelector") class ProcessTestCaseNoPoll(ProcessTestCase): def setUp(self): self.orig_selector = subprocess._PopenSelector subprocess._PopenSelector = selectors.SelectSelector ProcessTestCase.setUp(self) def tearDown(self): subprocess._PopenSelector = self.orig_selector ProcessTestCase.tearDown(self) @unittest.skipUnless(mswindows, "Windows-specific tests") class CommandsWithSpaces (BaseTestCase): def setUp(self): super().setUp() f, fname = tempfile.mkstemp(".py", "te st") self.fname = fname.lower () os.write(f, b"import sys;" b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))" ) os.close(f) def tearDown(self): os.remove(self.fname) super().tearDown() def with_spaces(self, *args, **kwargs): kwargs['stdout'] = subprocess.PIPE p = subprocess.Popen(*args, **kwargs) with p: self.assertEqual( p.stdout.read ().decode("mbcs"), "2 [%r, 'ab cd']" % self.fname ) def test_shell_string_with_spaces(self): # call() function with string argument with spaces on Windows self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname, "ab cd"), shell=1) def test_shell_sequence_with_spaces(self): # call() function with sequence argument with spaces on Windows self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1) def test_noshell_string_with_spaces(self): # call() function with string argument with spaces on Windows self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname, "ab cd")) def test_noshell_sequence_with_spaces(self): # call() function with sequence argument with spaces on Windows self.with_spaces([sys.executable, self.fname, "ab cd"]) class ContextManagerTests(BaseTestCase): def test_pipe(self): with subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write('stdout');" "sys.stderr.write('stderr');"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: self.assertEqual(proc.stdout.read(), b"stdout") self.assertStderrEqual(proc.stderr.read(), b"stderr") self.assertTrue(proc.stdout.closed) self.assertTrue(proc.stderr.closed) def test_returncode(self): with subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(100)"]) as proc: pass # __exit__ calls wait(), so the returncode should be set self.assertEqual(proc.returncode, 100) def test_communicate_stdin(self): with subprocess.Popen([sys.executable, "-c", "import sys;" "sys.exit(sys.stdin.read() == 'context')"], stdin=subprocess.PIPE) as proc: proc.communicate(b"context") self.assertEqual(proc.returncode, 1) def test_invalid_args(self): with self.assertRaises(FileNotFoundError) as c: with subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: pass def test_broken_pipe_cleanup(self): """Broken pipe error should not prevent wait() (Issue 21619)""" proc = subprocess.Popen([sys.executable, '-c', 'pass'], stdin=subprocess.PIPE, bufsize=support.PIPE_MAX_SIZE*2) proc = proc.__enter__() # Prepare to send enough data to overflow any OS pipe buffering and # guarantee a broken pipe error. Data is held in BufferedWriter # buffer until closed. proc.stdin.write(b'x' * support.PIPE_MAX_SIZE) self.assertIsNone(proc.returncode) # EPIPE expected under POSIX; EINVAL under Windows self.assertRaises(OSError, proc.__exit__, None, None, None) self.assertEqual(proc.returncode, 0) self.assertTrue(proc.stdin.closed) def test_main(): unit_tests = (ProcessTestCase, POSIXProcessTestCase, Win32ProcessTestCase, MiscTests, ProcessTestCaseNoPoll, CommandsWithSpaces, ContextManagerTests, RunFuncTestCase, ) support.run_unittest(*unit_tests) support.reap_children() if __name__ == "__main__": unittest.main()
codeparrot/github-code-clean
# -*- coding: utf-8 -*- #!/usr/bin/env python # # Inventory # import wx import wx.grid as gridlib import sys,re import faulthandler import json import time import numpy import xml.etree.cElementTree as ET import wx.lib.masked as masked import datetime import pout import handy_utils as HUD from button_stuff import ButtonOps from decimal import Decimal, ROUND_HALF_UP from db_related import SQConnect, LookupDB from var_operations import VarOps, LoadSaveList global debug debug = False class MainOptionsTab(wx.Panel): def __init__(self, *args, **kwargs): """""" wx.Panel.__init__(self, *args, **kwargs) self.SetName('MainOptionsTab') firstSizer = wx.BoxSizer(wx.VERTICAL) secondlevelSizer = wx.BoxSizer(wx.HORIZONTAL) combochoice_list = [('Department', 'genOptions_department_combobox', 'department'), ('Category', 'genOptions_category_combobox', 'category'), ('Sub-Category', 'genOptions_subcategory_combobox', 'subcategory'), ('Material', 'genOptions_location_combobox','location'), ('Unit Type', 'genOptions_unittype_combobox','unit_type'), ('G/L Post', 'genOptions_glpost_txtctrl', 'postacct')] for label, name, field in combochoice_list: box = wx.StaticBox(self, label=label) boxSizer = wx.StaticBoxSizer(box, wx.HORIZONTAL) if 'combobox' in name: ctrl = HUD.RH_ComboBox(self, name=name, choices=[' ',], style=wx.CB_SORT) if 'txtctrl' in name: ctrl = HUD.RH_MTextCtrl(self, -1, size=(65, -1), mask='###-###', name=name) ctrl.tableName = 'item_options' ctrl.fieldName = field boxSizer.Add(ctrl, 0, wx.ALL,3) secondlevelSizer.Add(boxSizer, 0, wx.ALL|wx.EXPAND, 3) thirdlevelSizer = wx.BoxSizer(wx.HORIZONTAL) ItemTypeChoices = ['Controlled', 'Non-Controlled', 'Matrixed', 'BOM', 'Tag Along', 'Serial No. Track', 'Mfg Coupon'] ctrl = HUD.RH_RadioBox(self, choices=ItemTypeChoices, label='Item Type', name="genOptions_itemType_radiobox", style=wx.RA_SPECIFY_COLS) for i in range(2,7): ctrl.EnableItem(i,enable=False) ctrl.tableName = 'item_options' ctrl.fieldName = 'item_type' thirdlevelSizer.Add(ctrl, 0, wx.ALL|wx.EXPAND, 3) box = wx.StaticBox(self, label="Age") sizer = wx.StaticBoxSizer(box, wx.HORIZONTAL) ctrl = HUD.RH_NumCtrl(self, -1, value=0, name="genOptions_agepopup_numctrl", integerWidth=2, fractionWidth=0) ctrl.tableName = 'item_options' ctrl.fieldName = 'agepopup' sizer.Add(ctrl, 0, wx.ALL|wx.EXPAND, 3) thirdlevelSizer.Add(sizer, 0, wx.ALL|wx.EXPAND, 3) fourthlevelSizer = wx.BoxSizer(wx.HORIZONTAL) POSOptionsChoices = ['Prompt for Quantity', 'Assume 1 Sold', 'Prompt for Price - Quantity Calculated'] ctrl = HUD.RH_RadioBox(self, -1, choices=POSOptionsChoices, label='POS Options', name="genOptions_POSoptions_radiobox", style=wx.RA_SPECIFY_COLS) ctrl.tableName = 'item_options' ctrl.fieldName = 'pos_options' fourthlevelSizer.Add(ctrl, 0, wx.ALL|wx.EXPAND, 3) fifthlevelSizer = wx.BoxSizer(wx.HORIZONTAL) txt = wx.StaticText(self, -1, label="Units\nin Package") ctrl = HUD.RH_NumCtrl(self, -1, value=1, name='genOptions_units_in_package_numctrl', min=1, selectOnEntry=True, style=0, integerWidth=6, fractionWidth=0) ctrl.tableName = 'item_options' ctrl.fieldName = 'unitsinpackage' ctrl.Bind(wx.EVT_KILL_FOCUS, self.NeverZero) fifthlevelSizer.Add(ctrl, 0, wx.ALL|wx.EXPAND, 5) fifthlevelSizer.Add(txt, 0, wx.ALL|wx.EXPAND, 5) flc_list = [('Food Stamp Exempt','genOptions_foodStampExempt_checkbox','foodstampexempt'), ('Loyalty Exempt','genOptions_loyaltyExempt_checkbox','loyaltyexempt'), ('Consignment','genOptions_consignment_checkbox','consignment'), ('Closeout','genOptions_closeout_checkbox','closeout')] for label, name, field in flc_list: cb = HUD.RH_CheckBox(self, label=label, name=name) cb.tableName = 'item_options' cb.fieldName = field fifthlevelSizer.Add(cb, 0, wx.ALL|wx.EXPAND, 5) if 'foodstampexempt' in name: cb.SetValue(True) sixthlevelSizer = wx.BoxSizer(wx.HORIZONTAL) lvl6_list = [('Part Number','genOptions_partNumber_txtctrl', 140, 'item_detailed', 'part_num'), ('OEM Part Number','genOptions_oemNumber_txtctrl', 140, 'item_detailed', 'oempart_num'), ('Aisle #','genOptions_aisleNums_combobox', 50, 'item_detailed', 'aisle_num'), ('4ft Section #','genOptions_sectionNums_combobox', 80,'item_options', 'section_num'), ('Extra Location','genOptions_extraPlaces_combobox', 140, 'item_options', 'extra_places')] for label, name, sized, table, field in lvl6_list: box = wx.StaticBox(self, label=label) boxSizer = wx.StaticBoxSizer(box, wx.HORIZONTAL) if 'txtctrl' in name: ctrl = HUD.RH_TextCtrl(self, -1, size = (sized,-1), name = name) if 'combobox' in name: ctrl = HUD.RH_ComboBox(self, -1, choices = [], name = name, size = (sized, -1)) ctrl.Disable() if 'aisleNums' in name: ctrl.Bind(wx.EVT_COMBOBOX, self.onAisleNum) ctrl.Enable() ctrl.tableName = table ctrl.fieldName = field boxSizer.Add(ctrl, 0, wx.ALL|wx.EXPAND, 3) sixthlevelSizer.Add(boxSizer, 0, wx.ALL|wx.EXPAND, 5) lvl6b_Sizer = wx.BoxSizer(wx.HORIZONTAL) lvl6b_list = [('Kit Number','genOptions_kitNumber_txtctrl', 'item_detailed','kit_num'), ('Pieces','genOptions_kitPieces_numctrl', 'item_detailed','kit_pieces')] box = wx.StaticBox(self, label='Bin Kits') boxSizer = wx.StaticBoxSizer(box, wx.HORIZONTAL) for label, name, table, field in lvl6b_list: text = wx.StaticText(self, label=label) if 'txtctrl' in name: ctrl = HUD.RH_TextCtrl(self, -1, name=name) if 'numctrl' in name: ctrl = HUD.RH_NumCtrl(self, -1, name = name, integerWidth = 3, fractionWidth = 0) ctrl.tableName = table ctrl.fieldName = field boxSizer.Add(text, 0, wx.ALL, 3) boxSizer.Add(ctrl, 0, wx.ALL, 3) lvl6b_Sizer.Add(boxSizer, 0) seventhlevelSizer = wx.BoxSizer(wx.HORIZONTAL) ctrl = HUD.RH_CheckBox(self, -1, label="Deactivated", name="genOptions_deactivated_checkbox") ctrl.tableName = 'item_options' ctrl.fieldName = 'deactived' ctrl.SetForegroundColour('Red') seventhlevelSizer.Add(ctrl, 0) numberofitemstxt = wx.StaticText(self, -1, label="Number Here", name='genOptions_numberofitems_text') returnd = HUD.QueryOps().QueryCheck('item_detailed') numofItems = '{:,}'.format(returnd) inventoryCount = "Number of Items : {}\t".format(str(numofItems)) numberofitemstxt.SetLabel(inventoryCount) seventhlevelSizer.AddStretchSpacer( prop=3) seventhlevelSizer.Add(numberofitemstxt, 0, wx.ALL|wx.EXPAND, 15) firstSizer.Add(secondlevelSizer, 0, wx.ALL|wx.EXPAND, 5) firstSizer.Add(thirdlevelSizer, 0, wx.ALL|wx.EXPAND, 5) firstSizer.Add(fourthlevelSizer, 0, wx.ALL|wx.EXPAND, 5) firstSizer.Add(fifthlevelSizer, 0, wx.ALL|wx.EXPAND, 5) firstSizer.Add(sixthlevelSizer, 0, wx.ALL|wx.EXPAND, 5) firstSizer.Add(lvl6b_Sizer, 0, wx.ALL|wx.EXPAND, 5) firstSizer.AddStretchSpacer(prop=1) firstSizer.Add(seventhlevelSizer, 1, wx.ALL|wx.EXPAND, 10) self.SetSizer(firstSizer) firstSizer.Fit(self) # self.Layout() wx.CallAfter(self.LoadDefaults, event='') def NeverZero(self, event): """ Field can never be set to zero """ valued = event.GetEventObject() raw_value = valued.GetValue() named = valued.GetName() edit_ctrl = wx.FindWindowByName(named) if raw_value == 0: new_value = 1 else: new_value = raw_value if re.search('(txtctrl|numctrl)', named, re.I): edit_ctrl.SetValue(new_value) def onAisleNum(self, event): obj = event.GetEventObject() name = obj.GetName() value = wx.FindWindowByName(name).GetValue() sectionNums = wx.FindWindowByName('genOptions_sectionNums_combobox') locations = wx.FindWindowByName('genOptions_extraPlaces_combobox') if value != '0': sectionNums.Enable() locations.Disable() locations.SetValue('') else: locations.Enable() sectionNums.Disable() sectionNums.SetValue('') def LoadDefaults(self, event): DeptCat_list = [('department', 'genOptions_department_combobox'), ('category', 'genOptions_category_combobox'), ('subcategory', 'genOptions_subcategory_combobox'), ('location', 'genOptions_location_combobox'), ('unittype', 'genOptions_unittype_combobox'), ('extra_places', 'genOptions_extraPlaces_combobox'), ('num_of_aisles', 'genOptions_aisleNums_combobox'), ('num_of_sections', 'genOptions_sectionNums_combobox')] for field, name in DeptCat_list: print(f'Field : {field} ; Name : {name}') item = wx.Window.FindWindowByName(name) item.LoadDefaults('organizations', field, 'abuser', 'rhp') def OnSave(self): upc = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl() genOpts_list = ['genOptions_department_combobox', 'genOptions_category_combobox', 'genOptions_subcategory_combobox', 'genOptions_location_combobox', 'genOptions_glpost_txtctrl', 'genOptions_unittype_combobox', 'genOptions_itemType_radiobox', 'genOptions_agepopup_numctrl', 'genOptions_POSoptions_radiobox', 'genOptions_units_in_package_numctrl', 'genOptions_foodStampExempt_checkbox', 'genOptions_loyaltyExempt_checkbox', 'genOptions_consignment_checkbox', 'genOptions_partNumber_txtctrl', 'genOptions_oemNumber_txtctrl', 'genOptions_deactivated_checkbox'] for name, table, field in genOpts_list: item = wx.Window.FindWindowByName(name) item.OnSave('upc', upc) def Clear(self): genOptions_default_list = ['genOptions_department_combobox', 'genOptions_category_combobox', 'genOptions_subcategory_combobox', 'genOptions_location_combobox', 'genOptions_glpost_txtctrl', 'genOptions_unittype_combobox', 'genOptions_itemType_radiobox', 'genOptions_agepopup_numctrl', 'genOptions_POSoptions_radiobox', 'genOptions_units_in_package_numctrl', 'genOptions_foodStampExempt_checkbox', 'genOptions_loyaltyExempt_checkbox', 'genOptions_consignment_checkbox', 'genOptions_partNumber_txtctrl', 'genOptions_oemNumber_txtctrl', 'genOptions_deactivated_checkbox'] for name in genOptions_default_list: item = wx.FindWindowByName(name) item.Clear() class page_item_detail(wx.Panel): def __init__(self, *args, **kwargs): """""" wx.Panel.__init__(self, *args, **kwargs) self.SetName('PageItemDetailTab') MainSizer = wx.BoxSizer(wx.VERTICAL) level1Sizer = wx.BoxSizer(wx.HORIZONTAL) level1Col2Sizer = wx.BoxSizer(wx.VERTICAL) # altlookupBox = wx.StaticBox(self, label="Alt Lookup") # altlookupBoxSizer = wx.StaticBoxSizer(altlookupBox, wx.HORIZONTAL) # altlookup = HUD.AltLookup(self) # # altlookupBoxSizer.Add(altlookup, 0) # level1Sizer.Add(altlookupBoxSizer, 0) level1Col3Sizer = wx.BoxSizer(wx.VERTICAL) level1Col3Sizer.Add((20,20),1) lockdisc_list = [('Do Not Discount','details_donotdiscount_checkbox','item_detailed2', 'do_not_discount')] for label,name, table, field in lockdisc_list: ctrl = HUD.RH_CheckBox(self, label=label, name=name) ctrl.tableName = table ctrl.fieldName = field level1Col3Sizer.Add(ctrl, 1) level1Sizer.Add(level1Col3Sizer, 1) box = wx.StaticBox(self, label="Tax Level Exemptions") boxSizer = wx.StaticBoxSizer(box, wx.HORIZONTAL) taxlvl_list =['1','2','3','4','never'] for item in taxlvl_list: taxName = 'details_taxlvl_{}_checkbox'.format(item) ctrl = HUD.RH_CheckBox(self, label=item, name=taxName) ctrl.tableName = 'item_detailed2' if 'never' in item: item = '_{}'.format(item) ctrl.fieldName = 'tax{}'.format(item) boxSizer.Add(ctrl, 0) level1Col4Sizer = wx.BoxSizer(wx.VERTICAL) level1Col4Sizer.Add((20,20),0) level1Col4Sizer.Add(boxSizer, 1) level1Sizer.Add((20,20),0) level1Sizer.Add(level1Col4Sizer, 1) txt = wx.StaticText(self, label="Tax Override") ctrl = HUD.RH_NumCtrl(self, -1, value=0, name='details_taxoverride_numctrl', integerWidth=4, fractionWidth=4) ctrl.tableName = 'item_detailed2' ctrl.fieldName = 'override_tax_rate' level1Col4Sizer.Add(txt, 0, wx.ALIGN_CENTER) level1Col4Sizer.Add(ctrl, 0, wx.ALIGN_CENTER) level1Col5Sizer = wx.BoxSizer(wx.VERTICAL) SalesReviewButton = wx.Button(self, -1, label="Sales Purchase\n\nReview", size=(140,60), name='details_salesReview_button') level1Col5Sizer.Add((20,20),1) level1Col5Sizer.Add(SalesReviewButton, 0) level1Sizer.Add(level1Col5Sizer, 1) level1Col6Sizer = wx.BoxSizer(wx.VERTICAL) ctrl = HUD.AltLookup(self, lbsize=(150,120), lbname='details_altlookup_listbox', tableName='item_detailed', fieldName='altlookup', boxlabel='Alt Lookups') level1Sizer.Add(ctrl, 0) level2Sizer = wx.BoxSizer(wx.HORIZONTAL) costd_list = [('Average Cost','details_avgcost_numctrl', 'item_detailed','avg_cost'), ('Last Cost','details_lastcost_numctrl', 'item_detailed','last_cost'), ('Starting Margin','details_startingMargin_numctrl','item_margin','general_margin')] for label,name, table, field in costd_list: cost_text = wx.StaticText(self, -1, label=label+":") ctrl = HUD.RH_NumCtrl(self, -1, value=0, name=name, integerWidth=6, fractionWidth=2) ctrl.tableName = table ctrl.fieldName = field ctrl.SetValue('0.000') level2Sizer.Add(cost_text, 0, wx.ALL,2) level2Sizer.Add(ctrl, 0, wx.ALL,2) level3Sizer = wx.BoxSizer(wx.HORIZONTAL) level3ColSub1Sizer = wx.BoxSizer(wx.VERTICAL) # query = "SELECT name,scheme_list,reduce_by from item_pricing_schemes" # returnd = HUD.SQConnect(query, '').ALL() returnd = LookupDB('item_pricing_schemes').General('name, scheme_list, reduce_by') pout.v(returnd) priceschema_list = returnd box = wx.StaticBox(self, label="Pricing\nSchemes") boxSizer = wx.StaticBoxSizer(box, wx.VERTICAL) boxSizer.Add((20,20), 0) xx=0 rb = wx.Button(self, id=wx.ID_ANY, label="RESET", name="details_pricescema_RESET_button") rb.Bind(wx.EVT_BUTTON, self.OnPriceSchemes) boxSizer.Add(rb, 0) print(f'Returnd : {returnd}') for i in returnd: print(i) for label, scheme_list, reduce_by in returnd: rb = HUD.RH_Button(self, id=wx.ID_ANY, label=label, name=scheme_list) rb.Bind(wx.EVT_BUTTON, self.OnPriceSchemes) boxSizer.Add(rb, 0) xx+=1 level3ColSub1Sizer.Add(boxSizer, 0) level3Col1Sizer = wx.BoxSizer(wx.VERTICAL) # checking noPenny status & rounding type self.noPenny, self.rndScheme = False, '3' try: #(self.noPenny, self.rndScheme) = LookupDB('tax_tables').Specific('TAX','tax_name','no_pennies_rounding, RNDscheme') returnd = LookupDB('tax_tables').Specific('TAX','tax_name','no_pennies_rounding, RNDscheme') pout.v(returnd) except: pout.v('Add Tax Info') exit() self.roundtype = {'1':'ROUND_DOWN','2':'ROUND_HALF_UP','3':'ROUND_UP'} if self.rndScheme == 0: self.rndScheme = 3 self.rnd = str(self.rndScheme) self.retail_grid = HUD.Retail_Grid(self, name='inv_details_cost_grid') level3Col1Sizer.Add(self.retail_grid, 0) level3Sizer.Add(level3ColSub1Sizer, 0) level3Sizer.Add((10,10),0) level3Sizer.Add(level3Col1Sizer, 0) level3Col2Sizer = wx.BoxSizer(wx.VERTICAL) SalePriceOption_text = wx.StaticText(self, label="Sales Price Option\nBuy X get Y at Sale Price") level3Col2Sizer.Add((20,20),0) #level3Col2Sizer.Add(PendingPriceChanges_button, 1) level3Col2Sizer.Add((20,20),0) level3Col2Sizer.Add(SalePriceOption_text, 1) buyget_Sizer = wx.BoxSizer(wx.HORIZONTAL) buyget_list = [('Buy','details_buy_numctrl','buyX'), ('Get','details_get_numctrl','getY')] for label,name, field in buyget_list: box = wx.StaticBox(self, label=label) boxSizer = wx.StaticBoxSizer(box, wx.VERTICAL) ctrl = HUD.RH_NumCtrl(self, -1, value=0, name=name, integerWidth=3, fractionWidth=0) ctrl.tableName = 'item_detailed2' ctrl.fieldName = field boxSizer.Add(ctrl, 0) buyget_Sizer.Add(boxSizer, 0) buyget_Sizer.Add((5,5), 0) level3Col2Sizer.Add(buyget_Sizer, 0) level3Sizer.Add((10,10),0) level3Sizer.Add(level3Col2Sizer, 0) level3Col3Sizer = wx.BoxSizer(wx.VERTICAL) order_grid = HUD.Order_Grid(self, name='inv_details_orderctrl_grid') order_grid.tableName = 'item_detailed2' order_grid.fieldName = 'orderctrl' level3Col3Sizer.Add(order_grid, 0) level3Sizer.Add(level3Col3Sizer, 0) self.eidx = 0 self.sidx = 0 level3Col4Sizer = wx.BoxSizer(wx.VERTICAL) box = wx.StaticBox(self, label="Sale Info", style=wx.TE_CENTER) boxSizer = wx.StaticBoxSizer(box, wx.VERTICAL) # ---- Date Controls on Item Detail(1) dateCtrls_list = [('details_saleDateBegin_datectrl', 'Begin', self.OnSaleBeginDateChange, 'item_detailed2','sale_begin'), ('details_saleDateEnd_datectrl',' End', self.OnSaleEndDateChange, 'item_detailed2', 'sale_end')] for dateName, label, handler, table, field in dateCtrls_list: saledate_Sizer = wx.BoxSizer(wx.HORIZONTAL) saledate_text = wx.StaticText(self, -1, label=label) datepicker = HUD.RH_DatePickerCtrl(self, name=dateName, style=wx.adv.DP_ALLOWNONE) datepicker.tableName = table datepicker.fieldName = field datepicker.Bind(wx.adv.EVT_DATE_CHANGED, handler) datepicker.Bind(wx.EVT_LEFT_DCLICK, self.onDoubleClick) datepicker.SetValue(wx.DateTime(1,1,1969)) saledate_Sizer.Add(saledate_text, 0) saledate_Sizer.Add((10,10),0) saledate_Sizer.Add(datepicker, 0) boxSizer.Add(saledate_Sizer, 0,wx.ALL,3) level3Col4Sizer.Add(boxSizer, 0) level3Sizer.Add((30,30),0) # ---- Time Controls on Item Detail(1) timectrls_list = [('details_saleTimeBegin_timectrl', 'Daily From: ', '12:00am',self.OnSaleBeginTimeChange, 'item_detailed2','sale_begin_time'), ('details_saleTimeEnd_timectrl', 'Daily To: ','11:59pm',self.OnSaleEndTimeChange, 'item_detailed2','sale_end_time')] for timeName, time_text, timectrl_value, handler, table, field in timectrls_list: saletime_Sizer = wx.BoxSizer(wx.HORIZONTAL) timeCtrl_text = wx.StaticText(self, -1, label=time_text) timeCtrl = HUD.RH_TimeCtrl(self,-1, display_seconds=False, fmt24hr=False, useFixedWidthFont=True, value=timectrl_value, pos=(250,70), name=timeName) timeCtrl.tableName = table timeCtrl.fieldName = field timeCtrl.Bind(wx.lib.masked.timectrl.EVT_TIMEUPDATE, handler) saletime_Sizer.Add(timeCtrl_text, 0,wx.ALL,3) saletime_Sizer.Add(timeCtrl, 0,wx.ALL,3) boxSizer.Add(saletime_Sizer, 0) level3Sizer.Add(level3Col4Sizer, 0) MainSizer.Add(level1Sizer, 0) MainSizer.Add((5,5),0) MainSizer.Add(level2Sizer, 0) MainSizer.Add((5,5),0) MainSizer.Add(level3Sizer, 0) self.SetSizer(MainSizer) def OnLoad(self): upc = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl() details_default_list = ['details_altlookup_listbox', 'details_altlookup_listbox_txtctrl', 'details_donotdiscount_checkbox', 'details_taxlvl_1_checkbox', 'details_taxlvl_2_checkbox', 'details_taxlvl_3_checkbox', 'details_taxlvl_4_checkbox', 'details_taxlvl_never_checkbox', 'details_taxoverride_numctrl', 'details_avgcost_numctrl', 'details_lastcost_numctrl', 'details_startingMargin_numctrl', 'details_buy_numctrl', 'details_get_numctrl', 'inv_details_orderctrl_grid', 'details_saleDateBegin_datectrl', 'details_saleDateEnd_datectrl', 'details_saleTimeBegin_timectrl', 'details_saleTimeEnd_timectrl'] for name in details_default_list: item = wx.FindWindowByName(name) item.OnLoad('upc', upc) def OnSave(self): upc = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl() details_default_list = ['details_altlookup_listbox', 'details_altlookup_listbox_txtctrl', 'details_donotdiscount_checkbox', 'details_taxlvl_1_checkbox', 'details_taxlvl_2_checkbox', 'details_taxlvl_3_checkbox', 'details_taxlvl_4_checkbox', 'details_taxlvl_never_checkbox', 'details_taxoverride_numctrl', 'details_avgcost_numctrl', 'details_lastcost_numctrl', 'details_startingMargin_numctrl', 'details_buy_numctrl', 'details_get_numctrl', 'inv_details_orderctrl_grid', 'details_saleDateBegin_datectrl', 'details_saleDateEnd_datectrl', 'details_saleTimeBegin_timectrl', 'details_saleTimeEnd_timectrl'] for name in details_default_list: item = wx.FindWindowByName(name) item.OnSave('upc', upc) def Clear(self): details_default_list = ['details_altlookup_listbox', 'details_altlookup_listbox_txtctrl', 'details_donotdiscount_checkbox', 'details_taxlvl_1_checkbox', 'details_taxlvl_2_checkbox', 'details_taxlvl_3_checkbox', 'details_taxlvl_4_checkbox', 'details_taxlvl_never_checkbox', 'details_taxoverride_numctrl', 'details_avgcost_numctrl', 'details_lastcost_numctrl', 'details_startingMargin_numctrl', 'details_buy_numctrl', 'details_get_numctrl', 'inv_details_orderctrl_grid', 'details_saleDateBegin_datectrl', 'details_saleDateEnd_datectrl', 'details_saleTimeBegin_timectrl', 'details_saleTimeEnd_timectrl'] for name in details_default_list: item = wx.FindWindowByName(name) item.Clear() def onDoubleClick(self, event): obj = event.GetEventObject() name = obj.GetName() today = datetime.date.today() wx.FindWindowByName(name).SetCtrl(today) def OnCellChange(self,event): debug = False obj = event.GetEventObject() named = obj.GetName() row = event.GetRow() col = event.GetCol() grid = wx.FindWindowByName(named) colname = grid.GetColLabelValue(col) grid.Refresh() if 'inv_details_cost_grid' in named: raw_value = grid.GetCellValue(row,col).strip() # numeric check if all(x in '0123456789.+-' for x in raw_value): # convert to float and limit to 2 decimals valued = Decimal(raw_value) if colname == 'Unit': if valued == '0': valued = '1' valued = RO.DoRound(valued, '1') else: valued = RO.DoRound(valued, '1.00') grid.SetCellValue(row,col,str(valued)) else: basic_list = ['1','$0.00','Margin','0.0000'] grid.SetCellValue(row,col,basic_list[col]) HUD.GridOps(grid.GetName()).GridFocusNGo(row, col) return # avgcost = wx.FindWindowByName('details_avgcost_numctrl').GetCtrl() #if avgcost: for yy in range(grid.GetNumberCols()): header = grid.GetColLabelValue(yy) if 'Unit' in header: unit = grid.GetCellValue(row,yy) if 'Margin %' in header: calcMargin = grid.GetCellValue(row,yy) if 'Price' in header: retail_from = grid.GetCellValue(row,yy) if colname == 'Margin %': if Decimal(calcMargin) == 0: newretail_almost = avgcost newretail = RO.DoRound(newretail_almost, '1.00') newMargin = '0.000' else: newretail_almost = RO.DoMargin(avgcost,calcMargin,unit) newretail = RO.DoRound(newretail_almost,'1.00') newMargin = RO.DoRound(calcMargin,'1.000') set_values = [('Price',newretail), ('Margin %',newMargin)] fillgrid = HUD.GridOps(grid.GetName()).FillGrid(set_values,row=row) if colname == 'Price': grossMargin = RO.GetMargin(avgcost,retail_from,unit) Margindot4 = RO.DoRound(grossMargin,'1.000') grid.SetCellValue(row,3,str(Margindot4)) if colname == 'Unit': newretail_almost = RO.DoMargin(avgcost,calcMargin,unit) newretail = RO.DoRound(newretail_almost, '1.00') grid.SetCellValue(row,1,str(newretail)) def OnSaleBeginTimeChange(self, event): debug = False onSaleTimeBegin = event.GetEventObject().GetValue() def OnSaleEndTimeChange(self, event): debug = False onSaleTimeEnd = event.GetEventObject().GetValue() def OnSaleBeginDateChange(self, event): debug = False fdateCtrl = event.GetEventObject() onSaleDateBegin = fdateCtrl.GetValue().FormatISODate() def OnSaleEndDateChange(self, event): debug = False fdateCtrl = event.GetEventObject() onSaleDateEnd = fdateCtrl.GetValue().FormatISODate() def OnCompareButtonAltLookup(self, event): debug = False pass def OnPriceSchemes(self, event): debug = False grid = wx.FindWindowByName('inv_details_cost_grid') WhichScheme = event.GetEventObject() Scheme_Name = WhichScheme.GetLabel() Scheme_List = WhichScheme.GetName().split("-") if Scheme_Name == 'RESET': reset_list = [('Unit','1'),('Price','0.00'),('Margin %','0.0000')] for xx in range(grid.GetNumberRows()): fillgrid = HUD.GridOps(grid.GetName()).FillGrid(reset_list,row=xx) else: if 'PK' in Scheme_List: (each, pack, bulk) = Scheme_List UIPctrl = wx.FindWindowByName('genOptions_units_in_package_numctrl').GetCtrl() if not UIPctrl: UIPctrl = 1 if Decimal(UIPctrl) == Decimal(each): UIPctrl = Decimal(each)*2 Bulkd = Decimal(UIPctrl) * Decimal(bulk.strip('X')) Scheme_List = str(each),str(UIPctrl),str(Bulkd) currPrice = wx.FindWindowByName('inv_details_cost_grid').GetCellValue(0,1) avgCostL = wx.FindWindowByName('details_avgcost_numctrl').GetCtrl() # query = "SELECT reduce_by from item_pricing_schemes WHERE name=(?)" # data = (Scheme_Name,) # returnd = HUD.SQConnect(query, data).ONE() returnd = LookupDB('item_pricing_schemes').Specific(Scheme_Name, 'name','reduce_by') type(returnd) if '-' in returnd[0]: Reduceby = returnd[0].split('-') else: Reduceby = list(returnd) startingMargin_L = wx.FindWindowByName('details_startingMargin_numctrl').GetCtrl() if startingMargin_L == 0: startingMargin_L = HUD.RetailOps().StartingMargin('details_startingMargin_numctrl') if Decimal(avgCostL) == 0: wx.MessageBox('Average Cost Not Set','Info',wx.OK) return Schemed = HUD.Pricing('C',Scheme_List,Reduceby,avgCostL,startingMargin_L,currPrice) schemeLen = len(Schemed.Scheme()) priceScheme_dict = Schemed.Scheme() xx = 0 for key in Scheme_List: setList = [('Unit',key),('Price',str(Decimal(priceScheme_dict[key][0]))), ('Margin %',str(Decimal(priceScheme_dict[key][1])))] fillGrid = HUD.GridOps(grid.GetName()).FillGrid(setList,row=xx) xx += 1 #------------------------------------- class page_detail_pg2(wx.Panel): def __init__(self, *args, **kwargs): """""" wx.Panel.__init__(self, *args, **kwargs) self.SetName('PageDetailPage2Tab') self.nameList = HUD.LoadSaveList() MainSizer = wx.BoxSizer(wx.HORIZONTAL) Duo1Sizer = wx.BoxSizer(wx.VERTICAL) D1level1Sizer = wx.BoxSizer(wx.HORIZONTAL) cb_list = [('Credit Book Exempt','details2_creditbookExempt_checkbox', 'item_detailed2', 'credit_book_exempt'), ('Delete when out','details2_deleteWhenOut_checkbox','item_detailed2','delete_when_out')] for label, name, table, field in cb_list: ctrl = HUD.RH_CheckBox(self,-1, label=label, name=name) ctrl.tableName = table ctrl.fieldName = field self.nameList = name D1level1Sizer.Add(ctrl, 0) D1level1Sizer.Add((10,10), 0) D1level2Sizer = wx.BoxSizer(wx.HORIZONTAL) QuantityData_box = wx.StaticBox(self, label="Quantities") QuantityDataSizer = wx.StaticBoxSizer(QuantityData_box, wx.HORIZONTAL) txtctrl_list = [('On Hand','details2_onHand_numctrl','item_detailed','quantity_on_hand'), ('Committed','details2_committed_numctrl','item_detailed', 'quantity_committed'), ('On Layaway','details2_onLayaway_numctrl','item_detailed','quantity_on_layaway')] for label, name, table, field in txtctrl_list: Sizer = wx.BoxSizer(wx.VERTICAL) text = wx.StaticText(self, label=label) ctrl = HUD.RH_NumCtrl(self, -1, value=0, name=name, integerWidth=6, fractionWidth=3) ctrl.tableName = table ctrl.fieldName = field self.nameList = name Sizer.Add(text, 0, wx.ALL|wx.ALIGN_CENTER,2) Sizer.Add(ctrl, 0, wx.ALL|wx.ALIGN_CENTER,2) QuantityDataSizer.Add(Sizer, 0,wx.ALL, 2) QuantityDataSizer.Add((20,20),0) D1level2Sizer.Add(QuantityDataSizer, 0) D1level3Sizer = wx.BoxSizer(wx.HORIZONTAL) OperationDates_box = wx.StaticBox(self, label="Operation Dates") OpDateSizer = wx.StaticBoxSizer(OperationDates_box, wx.HORIZONTAL) opdat_list =[('Last Sale','details2_lastSale_datectrl','item_detailed2','last_saledate'), ('Last Return','details2_lastReturn_datectrl','item_detailed2','last_returndate'), ('Last Maint','details2_lastMaint_datectrl','item_detailed2','maintdate'), ('Added','details2_added_datectrl','item_detailed2','added_date')] for label, name, table, field in opdat_list: OPSizer = wx.BoxSizer(wx.VERTICAL) OP_text = wx.StaticText(self, label=label) ctrl = HUD.RH_DatePickerCtrl(self, name=name, style=wx.adv.DP_ALLOWNONE) ctrl.tableName = table ctrl.fieldName = field self.nameList = name OPSizer.Add(OP_text, 0, wx.ALL|wx.ALIGN_CENTER, 2) OPSizer.Add(ctrl, 0, wx.ALL|wx.ALIGN_CENTER, 2) OpDateSizer.Add(OPSizer, 0) OpDateSizer.Add((20,20), 0) D1level3Sizer.Add(OpDateSizer, 0) D1level4Sizer = wx.BoxSizer(wx.HORIZONTAL) box = wx.StaticBox(self, label="Commission Data") boxSizer = wx.StaticBoxSizer(box, wx.VERTICAL) CD_1Sizer = wx.BoxSizer(wx.HORIZONTAL) ctrl = HUD.RH_CheckBox(self, -1, label="Override other commissions with %:", name="details2_commissionOverride_checkbox") ctrl.tableName = 'item_detailed2' ctrl.fieldName = 'override_commission' self.nameList = 'details2_commissionOverride_checkbox' CD_1Sizer.Add(ctrl, 0) ctrl = HUD.RH_NumCtrl(self, -1, value=0, name='details2_commissionOverride_numctrl', integerWidth=5, fractionWidth=3) ctrl.tableName = 'item_detailed2' ctrl.fieldName = 'over_commission' self.nameList = 'details2_commissionOverride_numctrl' CD_1Sizer.Add(ctrl, 0) boxSizer.Add(CD_1Sizer, 0) CD_2Sizer = wx.BoxSizer(wx.HORIZONTAL) text = wx.StaticText(self, label="or fixed amount:") ctrl = HUD.RH_NumCtrl(self, -1, value=0, name="details2_commissionFixedAmt_numctrl", integerWidth=5, fractionWidth=2) ctrl.tableName = 'item_detailed2' ctrl.fieldName = 'over_fixd_comm' self.nameList = "details2_commissionFixedAmt_numctrl" CD_2Sizer.Add(text, 0) CD_2Sizer.Add(ctrl, 0) boxSizer.Add(CD_2Sizer, 0, wx.ALL|wx.ALIGN_RIGHT,2) D1level4Sizer.Add(boxSizer, 0) Duo1Sizer.Add((20,20),0) Duo1Sizer.Add(D1level1Sizer, 0) Duo1Sizer.Add((20,20),0) Duo1Sizer.Add(D1level2Sizer, 0) Duo1Sizer.Add((20,20),0) Duo1Sizer.Add(D1level3Sizer, 0) Duo1Sizer.Add((20,20),0) Duo1Sizer.Add(D1level4Sizer, 0) Duo2Sizer = wx.BoxSizer(wx.VERTICAL) D2level1Sizer = wx.BoxSizer(wx.HORIZONTAL) text = wx.StaticText(self, label="Case Break #") ctrl = HUD.RH_TextCtrl(self, -1, value='', size=(120,-1), name="details2_casebreaknum_txtctrl") ctrl.tableName = 'item_detailed2' ctrl.fieldName = 'case_break_num' self.nameList = "details2_casebreaknum_txtctrl" D2level1Sizer.Add(text,0) D2level1Sizer.Add(ctrl, 0) D2level2Sizer = wx.BoxSizer(wx.HORIZONTAL) ctrl = HUD.RH_CheckBox(self, -1, label='Substitute with :', name="details2_subitem_checkbox") ctrl.tableName = 'item_detailed2' ctrl.fieldName = 'substituteYN' self.nameList = "details2_subitem_checkbox" D2level2Sizer.Add(ctrl, 0) ctrl = HUD.RH_TextCtrl(self, -1, size=(120, -1), name="details2_subitem_txtctrl") ctrl.tableName = 'item_detailed2' ctrl.fieldName = 'substitute_num' self.nameList = "details2_subitem_txtctrl" D2level2Sizer.Add(ctrl, 0) D2level3Sizer = wx.BoxSizer(wx.HORIZONTAL) D2level3_list = [('Store\nLocation', 'details2_storeloc_txtctrl', 120, 'item_detailed2', 'location')] spacerd = len(D2level3_list) cntd = 1 for label, name, sized, table, field in D2level3_list: text = wx.StaticText(self, label=label) if 'weight' in name: ctrl = HUD.RH_NumCtrl(self, -1, value=0, name=name, integerWidth=5, fractionWidth=3) else: ctrl = HUD.RH_TextCtrl(self, -1, name=name) ctrl.tableName = table ctrl.fieldName = field self.nameList = name D2level3Sizer.Add(text, 0, wx.ALL|wx.ALIGN_CENTER, 2) D2level3Sizer.Add(ctrl, 0, wx.ALL, 3) cntd += 1 if cntd < spacerd: D2level3Sizer.Add((10,10), 0) D2level4_box = wx.StaticBox(self, label="Product Image") D2level4_boxSizer = wx.StaticBoxSizer(D2level4_box, wx.VERTICAL) imageload = HUD.ButtonOps().Icons('empty') self.png = wx.StaticBitmap(self, -1, wx.Bitmap(imageload, wx.BITMAP_TYPE_ANY), size=(300,300), style=wx.SUNKEN_BORDER) ctrl = HUD.RH_FilePickerCtrl(self, wx.ID_ANY, message='Please select PNG', wildcard='*.png', size=(500,20), name='details2_imageloc_txtctrl') ctrl.tableName = 'item_detailed2' ctrl.fieldName = 'image_loc' self.nameList = 'details2_imageloc_txtctrl' D2level4_boxSizer.Add(self.png, 0) D2level4_boxSizer.Add(ctrl, 0) Duo2Sizer.Add(D2level1Sizer, 0, flag=wx.ALIGN_RIGHT) Duo2Sizer.Add((10,10),0) Duo2Sizer.Add(D2level2Sizer, 0, flag=wx.ALIGN_RIGHT) Duo2Sizer.Add((10,10),0) Duo2Sizer.Add(D2level3Sizer, 0, flag=wx.ALIGN_RIGHT) Duo2Sizer.Add((10,10),0) Duo2Sizer.Add(D2level4_boxSizer, 0, flag=wx.ALIGN_RIGHT) MainSizer.Add(Duo1Sizer, 0) MainSizer.Add(Duo2Sizer, 0) self.SetSizer(MainSizer) def OnNumbersOnly(self, event): """ check for numeric entry and limit to 2 decimals accepted result is in self.value """ debug = False valued = event.GetEventObject() raw_value = valued.GetValue().strip() # numeric check if all(x in '0123456789.+-' for x in raw_value): # convert to float and limit to 2 decimals self.value = round(float(raw_value), 2) self.edit.ChangeValue(str(self.value)) else: self.edit.ChangeValue("Numbers only") def OnLoad(self, event): upc = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl() ctrl_listd = self.nameList.get() for name in ctrl_listd: item = wx.FindWindowByName(name) item.OnLoad(whereField='upc', whereValue=upc) def OnSave(self): upc = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl() ctrl_listd = self.nameList.get() for name in ctrl_listd: item = wx.FindWindowByName(name) item.OnSave(whereField='upc',whereValue=upc) def Clear(self): ctrl_listd = self.nameList.get() for name in ctrl_listd: item = wx.FindWindowByName(name) item.Clear() #---------------------------------------- class vendorTab(wx.Panel): def __init__(self, *args, **kwargs): """""" vendorID = kwargs.pop('vendorID') wx.Panel.__init__(self, *args, **kwargs) named = 'Vendor{}DataTab'.format(vendorID) self.SetName(named) self.num = vendorID self.vendorID = 'vendor'+str(vendorID) MainSizer = wx.BoxSizer(wx.VERTICAL) level1Sizer = wx.BoxSizer(wx.HORIZONTAL) level1bSizer = wx.BoxSizer(wx.HORIZONTAL) level2Sizer = wx.BoxSizer(wx.HORIZONTAL) text = wx.StaticText(self, -1, label="Vendor #") level2Sizer.Add(text, 0) vendor_ctrlList = [('vendordata_{}_vendorNum_txtctrl'.format(self.vendorID),60,wx.TE_PROCESS_ENTER), ('vendordata_{}_vendorName_txtctrl'.format(self.vendorID),180,wx.TE_READONLY)] for name,sized,style in vendor_ctrlList: ctrl = HUD.RH_TextCtrl(self, -1, size=(sized, -1), name=name, style=style) if 'vendorNum' in name: ctrl.Bind(wx.EVT_TEXT_ENTER, self.CheckVendorName) ctrl.SetFocus() level2Sizer.Add(ctrl, 0) level2Sizer.Add((10,10),0) level3Sizer = wx.BoxSizer(wx.HORIZONTAL) vendorpagetxt_list = [('Order #', 'vendordata_{}_orderNum_txtctrl'.format(self.vendorID),120), ('Last Retail\nUnit Price:', 'vendordata_{}_lastRetail_numctrl'.format(self.vendorID),120), ('Last Unit\nUnit Cost:', 'vendordata_{}_lastCost_numctrl'.format(self.vendorID), 120)] #('Retail Units\nin Order:','vendordata_'+self.vendorID+'_retailUnits_numctrl',80)] for label,name,sized in vendorpagetxt_list: text = wx.StaticText(self, -1, label=label) if 'numctrl' in name: ctrl = HUD.RH_NumCtrl(self, -1, value=0, name=name, integerWidth=6, fractionWidth=2) else: ctrl = HUD.RH_MTextCtrl(self, -1, size=(sized,35), name=name, formatcodes='!') level3Sizer.Add(text, 0) level3Sizer.Add(ctrl, 0) level4Sizer = wx.BoxSizer(wx.HORIZONTAL) lvl4_list = [('Lead Time','vendordata_'+self.vendorID+'_leadtime_numctrl'), ('Minimum Order','vendordata_'+self.vendorID+'_minimumOrder_numctrl')] for label,name in lvl4_list: text = wx.StaticText(self, -1, label=label) ctrl = HUD.RH_NumCtrl(self, -1, value=0, name=name, integerWidth=3, fractionWidth=0) level4Sizer.Add(text, 0) level4Sizer.Add(ctrl, 0) level4Sizer.Add((20,10),0) level5Sizer = wx.BoxSizer(wx.HORIZONTAL) lvl5_list = [('Last Order','vendordata_'+self.vendorID+'_lastOrder_numctrl'), ('Date','vendordata_'+self.vendorID+'_lastOrder_datectrl')] #('Outstanding','vendordata_'+self.vendorID+'_outstanding_numctrl')] for label,name in lvl5_list: text = wx.StaticText(self, wx.ID_ANY, label=label) if 'datectrl' in name: ctrl = HUD.RH_DatePickerCtrl(self, name=name, style=wx.adv.DP_ALLOWNONE) ctrl.SetValue(wx.DateTime(1,1,1969)) if 'numctrl' in name: ctrl = HUD.RH_NumCtrl(self, -1, value=0, name=name, integerWidth=3, fractionWidth=0) level5Sizer.Add(text, 0) level5Sizer.Add(ctrl, 0) level5Sizer.Add((20,10), 0) sizer_list = [level1Sizer, level2Sizer, level3Sizer, level4Sizer, level5Sizer] for sizer in sizer_list: MainSizer.Add(sizer, 0) MainSizer.Add((10,10),0) self.SetSizer(MainSizer) def CheckVendorName(self, event): debug = False obj = event.GetEventObject() named = obj.GetName() vendorNum = obj.GetValue().upper().strip() queryWhere = 'vend_num=(?) OR name LIKE (?)' queryData = (vendorNum,vendorNum,) cnt_returnd = HUD.QueryOps().QueryChecks('vendor_basic_info',queryWhere,queryData) if cnt_returnd == 0: wx.FindWindowByName(named).SetCtrl('') return if cnt_returnd == 1: query = 'SELECT vend_num,name FROM vendor_basic_info WHERE vend_num=(?) OR name LIKE (?)' data = (vendorNum,vendorNum,) returnd = HUD.SQConnect(query, data).ONE() (vendor_numd, vendor_named) = returnd[0] elif cnt_returnd > 1: style = wx.DEFAULT_FRAME_STYLE & (wx.CLOSE_BOX) & (~wx.MAXIMIZE_BOX) VendorLookup_D = HUD.VendorFindDialog(self, title="Customer Lookup", vendNumber=vendorNum,style=style) VendorLookup_D.ShowModal() try: VendorLookup_D.itemPicked VendorLookup_D.Destroy() except: VendorLookup_D.Destroy() return self.vendPicked = VendorLookup_D.itemPicked.upper().strip() self.vendPicked = self.vendPicked.ljust(5) # query = 'SELECT vend_num,name FROM vendor_basic_info WHERE vend_num=(?)' # data = (self.vendPicked,) # returnd = HUD.SQConnect(query, data).ONE() returnd = LookupDB('vendor_basic_info').Specific(self.vendPicked, 'vend_num', 'vend_num, name') (vendor_numd, vendor_named) = returnd vendor_name_list = [('vendordata_{}_vendorNum_txtctrl'.format(self.vendorID),vendor_numd), ('vendordata_{}_vendorName_txtctrl'.format(self.vendorID),vendor_named)] for name,value in vendor_name_list: item = wx.FindWindowByName(name) item.SetValue(value) def OnLoad(self): upc = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl() for i in range(1,7): vendor_list = [('vendordata_vendor{}_vendorNum_txtctrl'.format(i), 'vendorNum'), ('vendordata_vendor{}_orderNum_txtctrl'.format(i), 'orderNum'), ('vendordata_vendor{}_lastRetail_numctrl'.format(i), 'lastRetail'), ('vendordata_vendor{}_lastCost_numctrl'.format(i), 'lastCost'), ('vendordata_vendor{}_leadtime_numctrl'.format(i), 'leadTime'), ('vendordata_vendor{}_minimumOrder_numctrl'.format(i), 'minOrder'), ('vendordata_vendor{}_lastOrder_datectrl'.format(i), 'lastOrder')] selfield = f'vendor{i}' returnd = LookupDB('item_detailed').Specific(upc, 'upc', selfield) try: vend_dict = json.loads(returnd) except TypeError as e: print(e) for item, key in vendor_list: ctrl = wx.FindWindowByName(item).SetCtrl(vend_dict[key]) # for name, table, field in vendor_list: # # query = '''SELECT {} # # FROM {} # # WHERE upc=(?)'''.format(field, table) # # data = [upc,] # # returnd = HUD.SQConnect(query, data).ONE() # #returnd = LookupDB(table).Specific(upc, 'upc', field) # #ret = VarOps().DeTupler(returnd) # #wx.FindWindowByName(name).SetCtrl(ret) # pout.v(f"Name : {name} ; Table : {table} ; Field : {field} ; UPC : {upc}") # item = wx.Window.FindWindowByName(name) # item.OnLoad(table, field, 'upc', upc) def OnSave(self): upc = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl() vend_dict = {} for i in range(1,7): vendor_list = [('vendordata_vendor{}_vendorNum_txtctrl'.format(i), 'vendorNum'), ('vendordata_vendor{}_orderNum_txtctrl'.format(i), 'orderNum'), ('vendordata_vendor{}_lastRetail_numctrl'.format(i), 'lastRetail'), ('vendordata_vendor{}_lastCost_numctrl'.format(i), 'lastCost'), ('vendordata_vendor{}_leadtime_numctrl'.format(i), 'leadTime'), ('vendordata_vendor{}_minimumOrder_numctrl'.format(i), 'minOrder'), ('vendordata_vendor{}_lastOrder_datectrl'.format(i), 'lastOrder')] dm = HUD.DictMaker() for name, key in vendor_list: val = wx.FindWindowByName(name).GetCtrl() dm.add(key, val) setField = f'vendor{i}' vend_dict = dm.get() pout.b('DM : {}'.format(vend_dict)) vend_dict_JSON = json.dumps(vend_dict) LookupDB('item_detailed').UpdateSingle(setField, vend_dict_JSON, 'upc', upc) #item = wx.Window.FindWindowByName(name) #item.OnSave(table, field, 'upc', upc) # fieldSet, dataSet, table = HUD.QueryOps().Commaize(vendor_list) # # query = '''UPDATE {} # # SET {} # # WHERE upc=(?)'''.format(table, fieldSet) # data = dataSet + [upc,] # # returnd = HUD.SQConnect(query, data).ONE() # returnd = LookupDB(table).UpdateGroup(fieldSet, 'upc', data) def Clear(self): vendor_list = ['vendor1','vendor2','vendor3','vendor4','vendor5','vendor6'] for num in vendor_list: vendordata_default_list = ['vendordata_{}_vendorNum_txtctrl'.format(num), 'vendordata_{}_vendorName_txtctrl'.format(num), 'vendordata_{}_lastRetail_numctrl'.format(num), 'vendordata_{}_orderNum_txtctrl'.format(num), 'vendordata_{}_lastRetail_numctrl'.format(num), 'vendordata_{}_retailUnits_numctrl'.format(num), 'vendordata_{}_leadtime_numctrl'.format(num), 'vendordata_{}_minimumOrder_numctrl'.format(num), 'vendordata_{}_lastOrder_numctrl'.format(num), 'vendordata_{}_lastOrder_datectrl'.format(num), 'vendordata_{}_outstanding_numctrl'.format(num)] for name in vendordata_default_list: clear = wx.FindWindowByName(name).Clear() #---------------------------------- class page_vendordata(wx.Panel): def __init__(self, *args, **kwargs): """""" wx.Panel.__init__(self, *args, **kwargs) sizer = wx.BoxSizer(wx.VERTICAL) nestednb = wx.Notebook(self, wx.ID_ANY, name='vendordata_vendor_notebook') vendorTabs = [1, 2, 3, 4, 5, 6] for tab in vendorTabs: page = vendorTab(nestednb, vendorID=tab) nestednb.AddPage(page, "Vendor {}".format(tab)) #page1 = vendorTab(nestednb, vendorID=1) #page2 = vendorTab(nestednb, vendorID=2) #page3 = vendorTab(nestednb, vendorID=3) #page4 = vendorTab(nestednb, vendorID=4) #page5 = vendorTab(nestednb, vendorID=5) #page6 = vendorTab(nestednb, vendorID=6) #nestednb.AddPage(page1, "Vendor 1") #nestednb.AddPage(page2, "Vendor 2") #nestednb.AddPage(page3, "Vendor 3") #nestednb.AddPage(page4, "Vendor 4") #nestednb.AddPage(page5, "Vendor 5") #nestednb.AddPage(page6, "Vendor 6") sizer.Add(nestednb, 1, wx.ALL|wx.EXPAND, 5) self.SetSizer(sizer) self.Layout() #---------------------------------------- class page_notes(wx.Panel): def __init__(self, *args, **kwargs): """""" wx.Panel.__init__(self, *args, **kwargs) self.SetName('NotesTab') MainSizer = wx.BoxSizer(wx.VERTICAL) self.ItemNumberd = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl() yellowback = (246,241,87) self.ctrl = HUD.RH_TextCtrl(self, -1, size=(700,500), name="notes_notes_txtctrl", style=wx.TE_MULTILINE) self.ctrl.SetBackgroundColour(yellowback) MainSizer.Add(self.ctrl, 0, wx.ALL|wx.ALIGN_CENTER, 5) self.SetSizer(MainSizer) def OnLoad(self, event): # query = '''SELECT {} # FROM {} # WHERE upc=(?)'''.format('notes','item_notes') # data = [self.ItemNumberd,] # returnd = HUD.SQConnect(query, data).ONE() returnd = LookupDB('item_notes').Specific(self.ItemNumberd, 'upc', 'notes') ret = VarOps().DeTupler(returnd) self.ctrl.SetValue(ret) #CO('notes_notes_txtctrl').SetCtrl(ret) def OnSave(self): returnd = LookupDB('item_notes').UpdateSingle('notes',self.ctrl.GetValue(),'upc',self.ItemNumberd) def Clear(self): notes_default_list = ['notes_notes_txtctrl'] for name in notes_default_list: clear = wx.FindWindowByName(name).ClearCtrl() #---------------------------------------- class page_pos_saleslinks(wx.Panel): def __init__(self, *args, **kwargs): """""" wx.Panel.__init__(self, *args, **kwargs) self.SetName('posSalesLinks_Tab') MainSizer = wx.BoxSizer(wx.VERTICAL) Level1Sizer = wx.BoxSizer(wx.HORIZONTAL) self.grid = HUD.POSLinks_Grid(self, name='inv_posSalesLinks_poslinks_grid') Level1Sizer.Add(self.grid, 0) MainSizer.Add(Level1Sizer, 0, wx.ALL|wx.ALIGN_CENTER,10) self.SetSizer(MainSizer) #---------------------------------------- class custInstructionTab(wx.Panel): def __init__(self, *args, **kwargs): """""" infoType = kwargs.pop('infoType') wx.Panel.__init__(self, *args, **kwargs) self.tabType = infoType named = f'CustInstruct_{self.tabType}_Tab' self.SetName(named) MainSizer = wx.BoxSizer(wx.VERTICAL) Level1Sizer = wx.BoxSizer(wx.HORIZONTAL) self.ItemNumberd = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl() Level2Sizer = wx.BoxSizer(wx.HORIZONTAL) idx = 0 print_choices = ['Always Print','Prompt Before Printing'] radiobox = HUD.RH_RadioBox(self, -1, label='Print Options', choices=print_choices, name='custinst_'+self.tabType+'_printOptions_radiobox') Level2Sizer.Add(radiobox, 0) std_dialog_combobox = HUD.RH_ComboBox(self, -1, name='custinst_'+self.tabType+'_stdDialog_combobox') Level2Sizer.Add((250,10),0) Level2Sizer.Add(std_dialog_combobox, 0, wx.ALL|wx.EXPAND|wx.ALIGN_RIGHT, 5) ctrl = HUD.RH_TextCtrl(self, -1, size=(800,300),name='custinst_'+self.tabType+'_custinstruct_txtctrl',style=wx.TE_MULTILINE) infoButton = wx.FindWindowByName('custinst_custInfo_button') MainSizer.Add(Level1Sizer, 0) MainSizer.Add(Level2Sizer, 0, wx.ALL, 15) MainSizer.Add(ctrl, 0, wx.ALL|wx.ALIGN_CENTER, 5) self.SetSizer(MainSizer) wx.CallAfter(self.OnLoad, event='') def OnLoad(self, event): custInst_list = ['Info','ReturnPolicy','Warranty'] for typed in custInst_list: irw = re.sub('Policy', '', typed, flags=re.I).lower() clist = [('print_{}_options'.format(irw),'custinst_{}_printOptions_radiobox'.format(typed), 'item_cust_instructions'), ('{}_box'.format(irw),'custinst_{}_custinstruct_txtctrl'.format(typed), 'item_cust_instructions')] for field, name, table in clist: returnd = LookupDB('table').Specific(self.ItemNumberd,'upc',field) ret = VarOps().DeTupler(returnd) wx.FindWindowByName(name).SetCtrl(ret) def OnSave(self, upc): custInst_list = ['Info','ReturnPolicy','Warranty'] for typed in custInst_list: irw = re.sub('Policy', '', typed, flags=re.I).lower() clist = [('print_{}_options'.format(irw),'custinst_{}_printOptions_radiobox'.format(typed), 'item_cust_instructions'), ('{}_box'.format(irw),'custinst_{}_custinstruct_txtctrl'.format(typed), 'item_cust_instructions')] for field, name, table in clist: item = wx.FindWindowByName(name).GetCtrl() # query = '''UPDATE {} # SET {}=(?) # WHERE upc=(?) # '''.format(table, field) # data = [item, self.ItemNumberd,] # returnd = HUD.SQConnect(query, data).ONE() returnd = LookupDB(table).UpdateSingle(field, item, 'upc',self.ItemNumberd) def Clear(self): custInst_list = ['Info','ReturnPolicy','Warranty'] for typed in custInst_list: custinst_default_list = ['custinst_'+typed+'_printOptions_radiobox','custinst_'+typed+'_custinstruct_txtctrl'] for name in custinst_default_list: clear = wx.FindWindowByName(name).ClearCtrl() class page_customer_instructions(wx.Panel): def __init__(self, *args, **kwargs): """""" wx.Panel.__init__(self, *args, **kwargs) sizer = wx.BoxSizer(wx.VERTICAL) nestednb = wx.Notebook(self, wx.ID_ANY, name='custinst_info_notebook') tabType = [("Info", "Information"), ("ReturnPolicy", "Return Policy"), ("Warranty", "Warranty")] for field_part, header in tabType: page = custInstructionTab(nestednb, infoType=field_part) nestednb.AddPage(page, header) sizer.Add(nestednb, 1, wx.ALL|wx.EXPAND, 5) self.SetSizer(sizer) self.Layout() def OnLoad(self, event): pass def OnSave(self, event): pass def Clear(self): pass #---------------------------------------- class page_consignment(wx.Panel): def __init__(self, *args, **kwargs): """""" wx.Panel.__init__(self, *args, **kwargs) self.SetName('PageConsignmentTab') MainSizer = wx.BoxSizer(wx.VERTICAL) Level1Sizer = wx.BoxSizer(wx.HORIZONTAL) consign_list = [('Vendor # ','consignment_vendor_txtctrl'), ('Override Standard Fee :', 'consignment_overrideFee_txtctrl')] for label, name in consign_list: text = wx.StaticText(self, -1, label=label) txtctrl = HUD.RH_TextCtrl(self, -1, size=(80,-1), name=name) Level1Sizer.Add(text, 0,wx.ALL,5) Level1Sizer.Add(txtctrl, 1,wx.ALL, 5) Level1Sizer.Add((30,30),0) MainSizer.Add(Level1Sizer, 0) self.SetSizer(MainSizer) def OnLoad(self, event): conList = [('consignment_vendor_txtctrl','')] pass def OnSave(self, event): pass def Clear(self): consign_list = ['consignment_vendor_txtctrl', 'consignment_overrideFee_txtctrl'] for name in consign_list: wx.FindWindowByName(name).ClearCtrl() #---------------------------------------- class page_8(wx.Panel): def __init__(self, *args, **kwargs): """""" wx.Panel.__init__(self, *args, **kwargs) #---------------------------------------- class page_daily_movement(wx.Panel): def __init__(self, *args, **kwargs): """""" wx.Panel.__init__(self, *args, **kwargs) MainSizer = wx.BoxSizer(wx.VERTICAL) level1Sizer = wx.BoxSizer(wx.HORIZONTAL) date_list = [('Begin Date','inv_dailyMovement_movement_start_datectrl'), ('End Date','inv_dailyMovement_movement_end_datectrl')] for label, name in date_list: box = wx.StaticBox(self, label=label) boxSizer = wx.StaticBoxSizer(box, wx.HORIZONTAL) datectrl = HUD.RH_DatePickerCtrl(self, -1, name=name, style=wx.adv.DP_ALLOWNONE) today = datetime.date.today() datectrl.SetValue(wx.DateTime().Today()) boxSizer.Add(datectrl, 0) level1Sizer.Add(boxSizer, 0, wx.ALL|wx.CENTER, 5) button = wx.Button(self, -1, label='GO', name='inv_dailyMovement_go_button') button.Bind(wx.EVT_BUTTON, self.checkTransactions) level1Sizer.Add((10,10), 0) level1Sizer.Add(button, 0, wx.ALL|wx.ALIGN_CENTER, 3) MainSizer.Add(level1Sizer, 0, wx.ALL|wx.CENTER, 3) grid = HUD.Activity_Grid(self,name='inv_dailyMovement_movement_grid') MainSizer.Add(grid, 0, wx.ALL|wx.LEFT, 5) #graph = HUD.SalesGraph(self) #MainSizer.Add(graph, 0, wx.ALL|wx.RIGHT, 5) self.SetSizer(MainSizer) def ActivityCheck(self, event): debug = False pass def checkTransactions(self, event): """ Check Transactions for Item Sold on this Date """ itemNumber = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl() debug = True fields = 'date,quantity,total_price' # query = 'SELECT date, quantity, avg_cost, total_price FROM transactions WHERE upc=(?)' # data = (itemNumber,) # returnd = HUD.SQConnect(query,data).ALL() returnd = LookupDB('transactions').Specific(itemNumber, 'upc', 'date,quantity,avg_cost,total_price') begin_date = wx.FindWindowByName('inv_dailyMovement_movement_start_datectrl').GetCtrl() end_date = wx.FindWindowByName('inv_dailyMovement_movement_end_datectrl').GetCtrl() gridname = 'inv_dailyMovement_movement_grid' VarOps().GetTyped(returnd) HUD.GridOps(gridname).AlterGrid(returnd) if returnd is not None: idx = 0 accum = {} for dated, qty, avgcost, price in returnd: if dated is None: continue VarOps().GetTyped(begin_date) VarOps().GetTyped(dated) VarOps().GetTyped(end_date) gross_profit = Decimal(qty)*(Decimal(price) - Decimal(avgcost)) begin = datetime.datetime.strptime(begin_date, '%Y-%m-%d').date() end = datetime.datetime.strptime(end_date, '%Y-%m-%d').date() if begin <= dated <= end: if dated in accum: qty_d = accum[dated][0] + qty accum[dated][0]=qty_d accum[dated][2] += Decimal(gross_profit) else: accum[dated]=[qty, avgcost, gross_profit] idx = 0 for key, value in list(accum.items()): setList = [('Activity Date',key),('Sales Volume',value[0]),('Sales Amount',value[1]), ('Gross Profit', RO.DoRound(value[2], '1.00'))] HUD.GridOps(gridname).FillGrid(setList,row=idx) idx += 1 #---------------------------------------- class DetailsTab(wx.Panel): def __init__(self, *args, **kwargs): """""" wx.Panel.__init__(self, *args, **kwargs) sizer = wx.BoxSizer(wx.VERTICAL) nestednb = wx.Notebook(self, wx.ID_ANY, name='inventory_details_notebook') tabType = [('Item Detail', page_item_detail), ('Item Detail(2)', page_detail_pg2), ('Vendor Data', page_vendordata), ('Notes', page_notes), ('POS Sales Links', page_pos_saleslinks), ('Consignment', page_consignment), ('', page_8), ('Movement', page_daily_movement)] for title, tabpage in tabType: page = tabpage(nestednb) nestednb.AddPage(page, title) # page1 = page_item_detail(nestednb) # page2 = page_detail_pg2(nestednb) # page3 = page_vendordata(nestednb) # page4 = page_notes(nestednb) # page5 = page_pos_saleslinks(nestednb) # page6 = page_customer_instructions(nestednb) # page7 = page_consignment(nestednb) # page8 = page_8(nestednb) # page9 = page_daily_movement(nestednb) # # nestednb.AddPage(page1, "Item_Detail") # nestednb.AddPage(page2, "Item_Detail(2)") # nestednb.AddPage(page3, "Vendor Data") # nestednb.AddPage(page4, "Notes") # nestednb.AddPage(page5, "POS Sales Links") # nestednb.AddPage(page6, "Customer Instructions") # nestednb.AddPage(page7, "Consignment") # nestednb.AddPage(page8, " ") # nestednb.AddPage(page9, "Daily Movement") sizer.Add(nestednb, 1, wx.ALL|wx.EXPAND, 5) self.SetSizer(sizer) self.Layout() #---------------------------------------------------------------------- class StartPanel(wx.Panel): """""" def __init__(self, *args, **kwargs): wx.Panel.__init__(self, *args, **kwargs) self.SetName('StartPanel') lookupSizer = wx.BoxSizer(wx.VERTICAL) debug = False IconBar_list =[('Save', self.OnSave), ('Undo', self.OnUndo), ('Find', self.OnFind), ('Add', self.OnAdd), ('Delete', self.OnMinus), ('Receiving', self.OnReceive), ('Exit', self.OnExitButton)] iconbar = HUD.IconPanel(self, iconList=IconBar_list) lookupSizer.Add(iconbar, 0, wx.EXPAND) level1Sizer = wx.BoxSizer(wx.HORIZONTAL) name = 'inventory_itemNumber_txtctrl' ctrl = HUD.RH_TextCtrl(self, -1, size=(250,-1), name=name, style=wx.TE_PROCESS_ENTER|wx.TE_PROCESS_TAB) ctrl.SetFocus() ctrl.SelectAll() ctrl.SetHint('Item Number') ctrl.SetToolTip(wx.ToolTip('Enter Item Number Here & press Enter')) ctrl.Bind(wx.EVT_KEY_DOWN, self.onCatchKey) ctrl.fieldName = 'upc' ctrl.tableName = 'item_detailed' level1Sizer.Add(ctrl, 0, wx.ALL, 3) name = 'inventory_itemDescription_txtctrl' ctrl = HUD.RH_TextCtrl(self, -1, size=(350,-1), name=name, style=wx.TE_PROCESS_TAB) ctrl.SetHint('Item Description') ctrl.SetToolTip(wx.ToolTip('Enter Item Description Here')) ctrl.Bind(wx.EVT_KILL_FOCUS, HUD.EventOps().CheckMeasurements) ctrl.fieldName = 'description' ctrl.tableName = 'item_detailed' level1Sizer.Add(ctrl, 0, wx.ALL, 3) # for label, name, sized, table, field in lvl1_list: # box = wx.StaticBox(self, label=label) # boxSizer = wx.StaticBoxSizer(box, wx.HORIZONTAL) # if 'itemNumber' in name: # ctrl = HUD.RH_TextCtrl(self, -1, # size=(sized, 21), # name=name, # style=wx.TE_PROCESS_ENTER|wx.TE_PROCESS_TAB) # ctrl.SetFocus() # ctrl.SelectAll() # ctrl.Bind(wx.EVT_KEY_DOWN, self.onCatchKey) # if 'itemDescription' in name: # ctrl = HUD.RH_TextCtrl(self, -1, # size=(sized, 21), # name=name, # style=wx.TE_PROCESS_TAB) # ctrl.Bind(wx.EVT_KILL_FOCUS, HUD.EventOps().CheckMeasurements) # ctrl.fieldName = field # ctrl.tableName = table # boxSizer.Add(ctrl, 0, wx.EXPAND|wx.ALL,3) # level1Sizer.Add(boxSizer, 0, wx.ALL,3) countreturn = HUD.QueryOps().QueryCheck('item_detailed') pout.v(countreturn) if countreturn > 0: returnd = LookupDB('item_detailed').General('upc',limit=1) pout.v(str(type(returnd)),returnd) itemNumber = wx.FindWindowByName('inventory_itemNumber_txtctrl') itemNumber.SetCtrl(returnd) wx.CallAfter(self.OnItemNumber, event=None, upc=itemNumber.GetValue().strip()) lookupSizer.Add(level1Sizer, 0) level2Sizer = wx.BoxSizer(wx.HORIZONTAL) notebook = wx.Notebook(self, -1, name='inventory_main_notebook') tabOne = MainOptionsTab(notebook) notebook.AddPage(tabOne, "General Options") tabTwo = DetailsTab(notebook) notebook.AddPage(tabTwo, "Details") notebook.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.OnPageChanged) level2Sizer.Add(notebook, 1, wx.ALL|wx.EXPAND, 5) lookupSizer.Add(level2Sizer, 0) self.SetSizer(lookupSizer) self.Layout() def OnPageChanged(self, event): debug = False obj = event.GetEventObject() named = obj.GetName() notebook = wx.FindWindowByName(named) old = notebook.GetPageText(event.GetOldSelection()) new = notebook.GetPageText(event.GetSelection()) itemNumber = wx.FindWindowByName('inventory_itemNumber_txtctrl') if 'General Options' in new : itemNumber.SetEditable(True) else: itemNumber.SetEditable(False) event.Skip() def OnUndo(self, event): debug = False wx.CallAfter(self.OnItemNumber, event='') def OnExitButton(self, event): debug = False item = wx.FindWindowByName('Inventory_Frame') item.Close() def OnFind(self, event): debug = False itemNumber = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl() style = wx.DEFAULT_FRAME_STYLE & (wx.CLOSE_BOX) & (~wx.MAXIMIZE_BOX) ItemLookupD = HUD.ItemLookupDialog(self, title="Item Lookup", style=style, itemNumber=itemNumber) ItemLookupD.ShowModal() try: self.itempick = ItemLookupD.itemPicked ItemLookupD.Destroy() except: ItemLookupD.Destroy() else: wx.CallAfter(self.OnItemNumber, event='') itemNum = wx.FindWindowByName('inventory_itemNumber_txtctrl').SetCtrl(self.itempick.upper().strip()) wx.CallAfter(self.OnItemNumber,event=None, upc=self.itempick) def OnReceive(self, event): debug = False style = wx.DEFAULT_FRAME_STYLE & (wx.CLOSE_BOX) & (~wx.MAXIMIZE_BOX) recdlg = HUD.ReceivingDialog(self, title='Receiving Utility', style=style) recdlg.ShowModal() def OnMinus(self, event): """ Delete Record """ debug = False itemNumberd = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl().strip() itemDesc = wx.FindWindowByName('inventory_itemDescription_txtctrl').GetCtrl() yesno = wx.MessageBox('Delete Item #{0} ?'.format(itemNumberd),'Delete Item',wx.YES_NO) if yesno == wx.YES: table_list = ['item_options','item_detailed','item_detailed2', 'item_vendor_data','item_notes','item_sales_links', 'item_cust_instructions','item_options','item_history'] for table in table_list: query = "DELETE FROM {0} WHERE upc=(?)".format(table) data = (itemNumberd,) countreturn = HUD.SQConnect(query, data).ONE() query = 'select upc from item_detailed limit 1' data = '' getNew = HUD.SQConnect(query, data).ONE()[0] wx.FindWindowByName('inventory_itemNumber_txtctrl').SetCtrl(getNew) wx.CallAfter(self.OnItemNumber, event='') def OnAdd(self, event): debug = False item_list = ['inventory_itemDescription_txtctrl', 'inventory_itemNumber_txtctrl'] for name in item_list: item = wx.FindWindowByName(name) item.SetEditable(True) item.Clear() tab_list = ['MainOptionsTab','PageItemDetailTab','inv_details_cost_grid','PageDetailPage2Tab','NotesTab','posSalesLinks_Tab','PageConsignmentTab'] for name in tab_list: tab = wx.FindWindowByName(name) print(f'Tab : {name}') tab.Clear() for i in range(1,7): tab = wx.FindWindowByName('Vendor{}DataTab'.format(i)) tab.Clear() custInst_list = ['Info','ReturnPolicy','Warranty'] for item in custInst_list: tab = wx.FindWindowByName('CustInstruct_{}_Tab'.format(item)) if VarOps().CheckNone(tab) is not None: tab.Clear() #---- Flash Add Dialog style = wx.DEFAULT_FRAME_STYLE & (wx.CLOSE_BOX) & (~wx.MAXIMIZE_BOX) FlashAdd_D = HUD.FlashAddInventory(self, title="Add Item", size=(1000,800), style=style) FlashAdd_D.ShowModal() # try: self.itemPicked = FlashAdd_D.itemPicked item = wx.FindWindowByName('inventory_itemNumber_txtctrl').SetValue(str(self.itemPicked)) FlashAdd_D.Destroy() except: FlashAdd_D.Destroy() return #EndOnAdd def OnSave(self, event): debug = True ItemNumberd = wx.FindWindowByName('inventory_itemNumber_txtctrl').GetCtrl().upper().strip() table_list = ['item_options','item_detailed','item_detailed2','item_vendor_data','item_notes', 'item_sales_links','item_cust_instructions','item_options','item_history', 'item_retails'] HUD.QueryOps().CheckEntryExist('upc',ItemNumberd,table_list) #retail_grid = HUD.Retail_Grid(self, name='inv_details_cost_grid') #retail_grid.OnSave(upc=ItemNumberd) #Retails Grid grid = wx.FindWindowByName('inv_details_cost_grid') grid.OnSave(upc=ItemNumberd) a = '-'*60 #General Options TabPage save_list1 = [('genOptions_department_combobox'), ('genOptions_category_combobox'), ('genOptions_subcategory_combobox'), ('genOptions_location_combobox'), ('genOptions_glpost_txtctrl'), ('genOptions_unittype_combobox'), ('genOptions_itemType_radiobox'), ('genOptions_agepopup_numctrl'), ('genOptions_POSoptions_radiobox'), ('genOptions_units_in_package_numctrl'), ('genOptions_foodStampExempt_checkbox'), ('genOptions_loyaltyExempt_checkbox'), ('genOptions_consignment_checkbox'), ('genOptions_aisleNums_combobox'), ('genOptions_extraPlaces_combobox'), ('genOptions_sectionNums_combobox'), ('genOptions_deactivated_checkbox'), ('genOptions_closeout_checkbox'), ('genOptions_partNumber_txtctrl'), ('genOptions_oemNumber_txtctrl'), ('inventory_itemDescription_txtctrl'), ('genOptions_kitNumber_txtctrl'), ('genOptions_kitPieces_numctrl')] for name in save_list1: item = wx.Window.FindWindowByName(name) item.OnSave('upc', ItemNumberd) # save_lists = [save_list1, save_list2] # for listd in save_lists: # for name,table,column_header in listd: # if 'listbox' in name: # LCO(name).ListBoxClearSpaces() # fieldSet, dataSet, table = HUD.QueryOps().Commaize(listd) # if len(dataSet) > 0: # query = '''UPDATE {} # SET {} # WHERE upc=(?)'''.format(table,fieldSet) # data = dataSet + [ItemNumberd,] # call_db = HUD.SQConnect(query, data).ONE() fieldSet='' dataSet = [] numv = 0 for num in range(1,7): tab = wx.FindWindowByName('Vendor{}DataTab'.format(num)) tab.OnSave() retail_list = [('inv_details_cost_grid','item_detailed','retails')] save_list0 = [('details_altlookup_listbox','item_detailed', 'altlookup'), ('details_avgcost_numctrl','item_detailed','avg_cost'), ('details_lastcost_numctrl','item_detailed','last_cost')] save_list1 = [('details_donotdiscount_checkbox','item_detailed2','do_not_discount'), ('details_taxlvl_1_checkbox','item_detailed2','tax1'), ('details_taxlvl_2_checkbox','item_detailed2','tax2'), ('details_taxlvl_3_checkbox','item_detailed2','tax3'), ('details_taxlvl_4_checkbox','item_detailed2','tax4'), ('details_taxlvl_never_checkbox','item_detailed2','tax_never'), ('details_taxoverride_numctrl','item_detailed2','override_tax_rate'), ('details_buy_numctrl','item_detailed2','buyx'), ('details_get_numctrl','item_detailed2','gety'), ('details_saleDateBegin_datectrl','item_detailed2','sale_begin'), ('details_saleDateEnd_datectrl','item_detailed2','sale_end'), ('details_saleTimeBegin_timectrl','item_detailed2','sale_begin_time'), ('details_saleTimeEnd_timectrl','item_detailed2','sale_end_time'), ('inv_details_orderctrl_grid','item_detailed2','orderctrl')] save_list = [save_list0, save_list1] for listd in save_list: for name,table,column_header in listd: pout.v(f'Name : {name}') if 'listbox' in name: HUD.ListBox_Ops(name).ListBoxClearSpaces() item = wx.FindWindowByName(name) item.OnSave(table, column_header, 'upc', ItemNumberd) # fieldSet, dataSet, table = HUD.QueryOps().Commaize(listd) # if len(dataSet) > 0: # query = 'UPDATE {0} SET {1} WHERE upc=(?)'.format(table,fieldSet) # data = dataSet + [ItemNumberd,] # call_db = HUD.SQConnect(query, data).ONE() save_list2 = [('details2_creditbookExempt_checkbox', 'item_detailed2','credit_book_exempt'), ('details2_deleteWhenOut_checkbox', 'item_detailed2','delete_when_out'), ('details2_lastSale_datectrl','item_detailed2','last_saledate'), ('details2_lastReturn_datectrl','item_detailed2','last_returndate'), ('details2_lastMaint_datectrl','item_detailed2','maintdate'), ('details2_added_datectrl','item_detailed2','added_date'), ('details2_commissionOverride_checkbox','item_detailed2','override_commission'), ('details2_commissionOverride_numctrl','item_detailed2','over_commission'), ('details2_commissionFixedAmt_numctrl','item_detailed2','over_fixd_comm'), ('details2_casebreaknum_txtctrl','item_detailed2','case_break_num'), ('details2_subitem_checkbox','item_detailed2','substituteyn'), ('details2_subitem_txtctrl','item_detailed2','substitute_num'), ('details2_storeloc_txtctrl','item_detailed2','location')] # ('details2_weight_numctrl','item_detailed2','weight'), # ('details2_tareweight_numctrl','item_detailed2','tare_weight')] save_list3 = [('details2_onHand_numctrl','item_detailed','quantity_on_hand'), ('details2_committed_numctrl','item_detailed','quantity_committed'), ('details2_onLayaway_numctrl','item_detailed','quantity_on_layaway')] save_list4 = [('inv_posSalesLinks_poslinks_grid','item_sales_links','sales_links')] save_list5 = [('notes_notes_txtctrl','item_notes','notes')] save_list = [save_list2, save_list3, save_list4, save_list5] for listd in save_list: for name, table, field in listd: item = wx.FindWindowByName(name) pout.v(f"name : {name}") item.OnSave(table, field, 'upc', ItemNumberd) HUD.RecordOps('item_detailed2').UpdateRecordDate('maintdate','upc',ItemNumberd, 'details2_lastMaint_datectrl') for item in ['Info','ReturnPolicy','Warranty']: tabname = f'CustInstruct_{item}_tab' tab = wx.FindWindowByName(tabname) pout.v(f"item : {item} ; tabName : {tabname}") tab.OnSave(ItemNumberd) # a = wx.FindWindowByName('PageAltLookups') # a.OnSave() def onCatchKey(self, event): debug = False keycode = event.GetKeyCode() if keycode in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER, wx.WXK_TAB]: self.OnItemNumber(event=None) event.EventObject.Navigate() event.Skip() def ItemLookup(self, upc, itemNumber_name=None): debug = False upc if upc == None: return query = 'SELECT upc FROM item_detailed WHERE upc=(?)' data = (upc,) returnd = HUD.SQConnect(query, data).ONE() if not returnd: whereFrom = '''upc LIKE (?) OR altlookup LIKE (?) OR description LIKE (?) OR oempart_num LIKE (?) OR part_num LIKE (?)''' query = "SELECT upc FROM item_detailed WHERE {0}".format(whereFrom) data = (upc, upc, upc, upc, upc,) returnd = HUD.SQConnect(query, data).ALL() numresult = len(returnd) if numresult == 0: wx.MessageBox('Item Not Found','Info',wx.OK) query = 'SELECT upc FROM item_detailed LIMIT 1' data = '' returnd = HUD.SQConnect(query, data).ONE() if itemNumber_name is not None: wx.FindWindowByName('inventory_itemNumber_txtctrl').SetCtrl(returnd[0]) #wx.CallAfter(self.OnItemNumber, event='') found = False return returnd[0], found if numresult > 1: itemreturnd = list(sum(returnd, ())) #Debugger("{0} Create Modal Window for Individual Choosing...".format('*'*15)) style = wx.DEFAULT_FRAME_STYLE & (wx.CLOSE_BOX) & (~wx.MAXIMIZE_BOX) ItemLookupD = HUD.ItemLookupDialog(self, title="Item Lookup", style=style, itemNumber=upc) ItemLookupD.ShowModal() itempick = ItemLookupD.itemPicked try: itempick except: if itemNumber_name is not None: wx.FindWindowByName('inventory_itemNumber_txtctrl').SetCtrl((str(self.itempick))) ItemLookupD.Destroy() else: itempick = VarOps().DeTupler(returnd) typed = type(itempick) found = True return itempick, found def OnItemNumber(self, event, upc=None): msg = "*** Check Table for Item Number & fill Variables with data ***" itemNumber_name = 'inventory_itemNumber_txtctrl' focus = wx.FindWindowByName(itemNumber_name).SetFocus() ItemNumberd = wx.FindWindowByName(itemNumber_name).GetCtrl().upper().strip() wx.FindWindowByName(itemNumber_name).SetCtrl(ItemNumberd) self.itempick, found = self.ItemLookup(ItemNumberd, itemNumber_name) # if found is False: wx.CallAfter(self.OnItemNumber, event='') if self.itempick: wx.FindWindowByName('inventory_itemNumber_txtctrl').SetCtrl(str(self.itempick).strip()) genOptions_load_list = [('genOptions_department_combobox', 'item_options', 'department'), ('genOptions_category_combobox', 'item_options', 'category'), ('genOptions_subcategory_combobox', 'item_options', 'subcategory'), ('genOptions_location_combobox', 'item_options', 'location'), ('genOptions_glpost_txtctrl', 'item_options', 'postacct'), ('genOptions_unittype_combobox', 'item_options', 'unit_type'), ('genOptions_itemType_radiobox', 'item_options', 'item_type'), ('genOptions_agepopup_numctrl', 'item_options', 'agepopup'), ('genOptions_POSoptions_radiobox', 'item_options', 'posoptions'), ('genOptions_units_in_package_numctrl', 'item_options', 'unitsinpackage'), ('genOptions_foodStampExempt_checkbox', 'item_options', 'foodstampexempt'), ('genOptions_loyaltyExempt_checkbox', 'item_options', 'loyaltyexempt'), ('genOptions_consignment_checkbox', 'item_options', 'consignment'), ('genOptions_partNumber_txtctrl', 'item_detailed', 'part_num'), ('genOptions_oemNumber_txtctrl', 'item_detailed', 'oempart_num'), ('genOptions_deactivated_checkbox', 'item_options', 'deactivated'), ('genOptions_aisleNums_combobox', 'item_options', 'aisle_num'), ('genOptions_extraPlaces_combobox', 'item_options', 'extra_places'), ('genOptions_sectionNums_combobox', 'item_options', 'section_num'), ('inventory_itemDescription_txtctrl', 'item_detailed', 'description'), ('genOptions_kitNumber_txtctrl', 'item_detailed', 'kit_num'), ('genOptions_kitPieces_numctrl', 'item_detailed', 'kit_pieces')] for name, table, column_header in genOptions_load_list: query = '''SELECT {0} FROM {1} WHERE upc=(?)'''.format(column_header,table) data = (self.itempick,) returnd = HUD.SQConnect(query, data).ONE() pout.v(f'Name :{name} ; returnd : {returnd}') a = wx.FindWindowByName(name).ReturndSet(returnd,0) details_load_list = ['details_altlookup_listbox', 'details_donotdiscount_checkbox', 'details_taxlvl_1_checkbox', 'details_taxlvl_2_checkbox', 'details_taxlvl_3_checkbox', 'details_taxlvl_4_checkbox', 'details_taxlvl_never_checkbox', 'details_taxoverride_numctrl', 'details_avgcost_numctrl', 'details_lastcost_numctrl', 'details_buy_numctrl', 'details_get_numctrl', 'details_saleDateBegin_datectrl', 'details_saleDateEnd_datectrl', 'details_saleTimeBegin_timectrl', 'details_saleTimeEnd_timectrl', 'inv_details_orderctrl_grid'] for name in details_load_list: item = wx.FindWindowByName(name) pout.b(name) item.OnLoad(whereField='upc', whereValue=self.itempick) details2_load_list = [('details2_creditbookExempt_checkbox', 'item_detailed2','credit_book_exempt'), ('details2_deleteWhenOut_checkbox', 'item_detailed2','delete_when_out'), ('details2_onHand_numctrl','item_detailed', 'quantity_on_hand'), ('details2_committed_numctrl','item_detailed', 'quantity_committed'), ('details2_onLayaway_numctrl','item_detailed', 'quantity_on_layaway'), ('details2_lastSale_datectrl','item_detailed2', 'last_saledate'), ('details2_lastReturn_datectrl','item_detailed2', 'last_returndate'), ('details2_lastMaint_datectrl','item_detailed2', 'maintdate'), ('details2_added_datectrl','item_detailed2', 'added_date'), ('details2_commissionOverride_checkbox', 'item_detailed2','override_commission'), ('details2_commissionOverride_numctrl', 'item_detailed2','over_commission'), ('details2_commissionFixedAmt_numctrl', 'item_detailed2','over_fixd_comm'), ('details2_casebreaknum_txtctrl', 'item_detailed2','case_break_num'), ('details2_subitem_checkbox','item_detailed2', 'substituteyn'), ('details2_subitem_txtctrl','item_detailed2', 'substitute_num'), ('details2_storeloc_txtctrl','item_detailed2', 'location'), ('inv_posSalesLinks_poslinks_grid','item_sales_links', 'sales_links'), ('notes_notes_txtctrl','item_notes','notes')] # ('details2_weight_numctrl','item_detailed2', 'weight'), # ('details2_tareweight_numctrl','item_detailed2', 'tare_weight'), for name,table,column_header in details2_load_list: query = 'SELECT {0} FROM {1} WHERE upc=(?)'.format(column_header,table) data = (self.itempick,) returnd = HUD.SQConnect(query, data).ONE() a = wx.FindWindowByName(name).ReturndSet(returnd,0) #--- Alt Lookups ----# # a = wx.FindWindowByName('PageAltLookups') # a.OnLoad() #---- Vendor ----# for numvend in range(1,7): tab = wx.FindWindowByName('Vendor{}DataTab'.format(numvend)) tab.OnLoad() #---- Customer Info ----# item_box_list = [('custinst_Info_custinstruct_txtctrl', 'item_cust_instructions', 'info_box'), ('custinst_ReturnPolicy_custinstruct_txtctrl', 'item_cust_instructions', 'return_box'), ('custinst_Warranty_custinstruct_txtctrl', 'item_cust_instructions','warranty_box'), ('custinst_Info_printOptions_radiobox', 'item_cust_instructions','print_info_options'), ('custinst_ReturnPolicy_printOptions_radiobox','item_cust_instructions','print_return_options'), ('custinst_Warranty_printOptions_radiobox','item_cust_instructions','print_warranty_options'), ('custinst_Info_stdDialog_combobox', 'item_cust_instructions','info_dialog'), ('custinst_Return_stdDialog_combobox','item_cust_instructions','return_dialog'), ('custinst_Warranty_stdDialog_combobox','item_cust_instructions','warranty_dialog')] for name, table, field in item_box_list: query = '''SELECT {} FROM {} WHERE upc=(?)'''.format(field, table) data = (self.itempick,) returnd = HUD.SQConnect(query, data).ONE() ret = VarOps().CheckNone(returnd) wx.FindWindowByName(name).SetCtrl(ret) #----------- Cost Grid Load HUD.QueryOps().CheckEntryExist('upc', self.itempick.upper().strip(), ['item_retails']) grid = wx.FindWindowByName('inv_details_cost_grid') grid.Load(upc=self.itempick) #-------- # def MarginUpdate(self, avg_cost, retail, unit, debug=False): # ''' Readjust Margin in margin Column according to avg_cost ''' # actual_retail = Decimal(retail)/Decimal(unit) # gross_profit = Decimal(actual_retail) - Decimal(avg_cost) # deci_margin = Decimal(gross_profit) / Decimal(actual_retail) # perc_margin = Decimal(deci_margin) * Decimal(100) # percentage_margin = RO.DoRound(perc_margin, '1.000') # return percentage_margin #EndSetVariables class InventoryScreen(wx.Frame): def __init__(self, *args, **kwargs): kwargs['style'] = wx.DEFAULT_FRAME_STYLE & (~wx.CLOSE_BOX | wx.MAXIMIZE_BOX | wx.MINIMIZE_BOX) kwargs['title'] = "RHP Inventory Management" kwargs['size'] = (1200, 800) wx.Frame.__init__(self, None, *args, **kwargs) self.panel_one = StartPanel(self) self.sizer = wx.BoxSizer(wx.VERTICAL) self.sizer.Add(self.panel_one, 1, wx.EXPAND) self.SetSizer(self.sizer) self.Layout() # Run the program if __name__ == "__main__": app = wx.App(False) frame = InventoryScreen() frame.Centre() frame.SetName('Inventory_Frame') frame.Show() app.MainLoop()
codeparrot/github-code-clean
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import datetime from typing import Dict, List, Optional, Union from azure.core.exceptions import HttpResponseError import msrest.serialization from ._network_management_client_enums import * class AddressSpace(msrest.serialization.Model): """AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network. :param address_prefixes: A list of address blocks reserved for this virtual network in CIDR notation. :type address_prefixes: list[str] """ _attribute_map = { 'address_prefixes': {'key': 'addressPrefixes', 'type': '[str]'}, } def __init__( self, *, address_prefixes: Optional[List[str]] = None, **kwargs ): super(AddressSpace, self).__init__(**kwargs) self.address_prefixes = address_prefixes class Resource(msrest.serialization.Model): """Common resource representation. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__( self, *, id: Optional[str] = None, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = id self.name = None self.type = None self.location = location self.tags = tags class ApplicationGateway(Resource): """Application gateway resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param zones: A list of availability zones denoting where the resource needs to come from. :type zones: list[str] :param identity: The identity of the application gateway, if configured. :type identity: ~azure.mgmt.network.v2019_07_01.models.ManagedServiceIdentity :param sku: SKU of the application gateway resource. :type sku: ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySku :param ssl_policy: SSL policy of the application gateway resource. :type ssl_policy: ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslPolicy :ivar operational_state: Operational state of the application gateway resource. Possible values include: "Stopped", "Starting", "Running", "Stopping". :vartype operational_state: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayOperationalState :param gateway_ip_configurations: Subnets of the application gateway resource. For default limits, see `Application Gateway limits <https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_. :type gateway_ip_configurations: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayIPConfiguration] :param authentication_certificates: Authentication certificates of the application gateway resource. For default limits, see `Application Gateway limits <https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_. :type authentication_certificates: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayAuthenticationCertificate] :param trusted_root_certificates: Trusted Root certificates of the application gateway resource. For default limits, see `Application Gateway limits <https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_. :type trusted_root_certificates: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayTrustedRootCertificate] :param ssl_certificates: SSL certificates of the application gateway resource. For default limits, see `Application Gateway limits <https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_. :type ssl_certificates: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslCertificate] :param frontend_ip_configurations: Frontend IP addresses of the application gateway resource. For default limits, see `Application Gateway limits <https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_. :type frontend_ip_configurations: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayFrontendIPConfiguration] :param frontend_ports: Frontend ports of the application gateway resource. For default limits, see `Application Gateway limits <https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_. :type frontend_ports: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayFrontendPort] :param probes: Probes of the application gateway resource. :type probes: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayProbe] :param backend_address_pools: Backend address pool of the application gateway resource. For default limits, see `Application Gateway limits <https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_. :type backend_address_pools: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayBackendAddressPool] :param backend_http_settings_collection: Backend http settings of the application gateway resource. For default limits, see `Application Gateway limits <https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_. :type backend_http_settings_collection: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayBackendHttpSettings] :param http_listeners: Http listeners of the application gateway resource. For default limits, see `Application Gateway limits <https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_. :type http_listeners: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayHttpListener] :param url_path_maps: URL path map of the application gateway resource. For default limits, see `Application Gateway limits <https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_. :type url_path_maps: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayUrlPathMap] :param request_routing_rules: Request routing rules of the application gateway resource. :type request_routing_rules: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayRequestRoutingRule] :param rewrite_rule_sets: Rewrite rules for the application gateway resource. :type rewrite_rule_sets: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayRewriteRuleSet] :param redirect_configurations: Redirect configurations of the application gateway resource. For default limits, see `Application Gateway limits <https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_. :type redirect_configurations: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayRedirectConfiguration] :param web_application_firewall_configuration: Web application firewall configuration. :type web_application_firewall_configuration: ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayWebApplicationFirewallConfiguration :param firewall_policy: Reference of the FirewallPolicy resource. :type firewall_policy: ~azure.mgmt.network.v2019_07_01.models.SubResource :param enable_http2: Whether HTTP2 is enabled on the application gateway resource. :type enable_http2: bool :param enable_fips: Whether FIPS is enabled on the application gateway resource. :type enable_fips: bool :param autoscale_configuration: Autoscale Configuration. :type autoscale_configuration: ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayAutoscaleConfiguration :param resource_guid: The resource GUID property of the application gateway resource. :type resource_guid: str :ivar provisioning_state: The provisioning state of the application gateway resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState :param custom_error_configurations: Custom error configurations of the application gateway resource. :type custom_error_configurations: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayCustomError] """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'operational_state': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[str]'}, 'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'}, 'sku': {'key': 'properties.sku', 'type': 'ApplicationGatewaySku'}, 'ssl_policy': {'key': 'properties.sslPolicy', 'type': 'ApplicationGatewaySslPolicy'}, 'operational_state': {'key': 'properties.operationalState', 'type': 'str'}, 'gateway_ip_configurations': {'key': 'properties.gatewayIPConfigurations', 'type': '[ApplicationGatewayIPConfiguration]'}, 'authentication_certificates': {'key': 'properties.authenticationCertificates', 'type': '[ApplicationGatewayAuthenticationCertificate]'}, 'trusted_root_certificates': {'key': 'properties.trustedRootCertificates', 'type': '[ApplicationGatewayTrustedRootCertificate]'}, 'ssl_certificates': {'key': 'properties.sslCertificates', 'type': '[ApplicationGatewaySslCertificate]'}, 'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[ApplicationGatewayFrontendIPConfiguration]'}, 'frontend_ports': {'key': 'properties.frontendPorts', 'type': '[ApplicationGatewayFrontendPort]'}, 'probes': {'key': 'properties.probes', 'type': '[ApplicationGatewayProbe]'}, 'backend_address_pools': {'key': 'properties.backendAddressPools', 'type': '[ApplicationGatewayBackendAddressPool]'}, 'backend_http_settings_collection': {'key': 'properties.backendHttpSettingsCollection', 'type': '[ApplicationGatewayBackendHttpSettings]'}, 'http_listeners': {'key': 'properties.httpListeners', 'type': '[ApplicationGatewayHttpListener]'}, 'url_path_maps': {'key': 'properties.urlPathMaps', 'type': '[ApplicationGatewayUrlPathMap]'}, 'request_routing_rules': {'key': 'properties.requestRoutingRules', 'type': '[ApplicationGatewayRequestRoutingRule]'}, 'rewrite_rule_sets': {'key': 'properties.rewriteRuleSets', 'type': '[ApplicationGatewayRewriteRuleSet]'}, 'redirect_configurations': {'key': 'properties.redirectConfigurations', 'type': '[ApplicationGatewayRedirectConfiguration]'}, 'web_application_firewall_configuration': {'key': 'properties.webApplicationFirewallConfiguration', 'type': 'ApplicationGatewayWebApplicationFirewallConfiguration'}, 'firewall_policy': {'key': 'properties.firewallPolicy', 'type': 'SubResource'}, 'enable_http2': {'key': 'properties.enableHttp2', 'type': 'bool'}, 'enable_fips': {'key': 'properties.enableFips', 'type': 'bool'}, 'autoscale_configuration': {'key': 'properties.autoscaleConfiguration', 'type': 'ApplicationGatewayAutoscaleConfiguration'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'custom_error_configurations': {'key': 'properties.customErrorConfigurations', 'type': '[ApplicationGatewayCustomError]'}, } def __init__( self, *, id: Optional[str] = None, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, etag: Optional[str] = None, zones: Optional[List[str]] = None, identity: Optional["ManagedServiceIdentity"] = None, sku: Optional["ApplicationGatewaySku"] = None, ssl_policy: Optional["ApplicationGatewaySslPolicy"] = None, gateway_ip_configurations: Optional[List["ApplicationGatewayIPConfiguration"]] = None, authentication_certificates: Optional[List["ApplicationGatewayAuthenticationCertificate"]] = None, trusted_root_certificates: Optional[List["ApplicationGatewayTrustedRootCertificate"]] = None, ssl_certificates: Optional[List["ApplicationGatewaySslCertificate"]] = None, frontend_ip_configurations: Optional[List["ApplicationGatewayFrontendIPConfiguration"]] = None, frontend_ports: Optional[List["ApplicationGatewayFrontendPort"]] = None, probes: Optional[List["ApplicationGatewayProbe"]] = None, backend_address_pools: Optional[List["ApplicationGatewayBackendAddressPool"]] = None, backend_http_settings_collection: Optional[List["ApplicationGatewayBackendHttpSettings"]] = None, http_listeners: Optional[List["ApplicationGatewayHttpListener"]] = None, url_path_maps: Optional[List["ApplicationGatewayUrlPathMap"]] = None, request_routing_rules: Optional[List["ApplicationGatewayRequestRoutingRule"]] = None, rewrite_rule_sets: Optional[List["ApplicationGatewayRewriteRuleSet"]] = None, redirect_configurations: Optional[List["ApplicationGatewayRedirectConfiguration"]] = None, web_application_firewall_configuration: Optional["ApplicationGatewayWebApplicationFirewallConfiguration"] = None, firewall_policy: Optional["SubResource"] = None, enable_http2: Optional[bool] = None, enable_fips: Optional[bool] = None, autoscale_configuration: Optional["ApplicationGatewayAutoscaleConfiguration"] = None, resource_guid: Optional[str] = None, custom_error_configurations: Optional[List["ApplicationGatewayCustomError"]] = None, **kwargs ): super(ApplicationGateway, self).__init__(id=id, location=location, tags=tags, **kwargs) self.etag = etag self.zones = zones self.identity = identity self.sku = sku self.ssl_policy = ssl_policy self.operational_state = None self.gateway_ip_configurations = gateway_ip_configurations self.authentication_certificates = authentication_certificates self.trusted_root_certificates = trusted_root_certificates self.ssl_certificates = ssl_certificates self.frontend_ip_configurations = frontend_ip_configurations self.frontend_ports = frontend_ports self.probes = probes self.backend_address_pools = backend_address_pools self.backend_http_settings_collection = backend_http_settings_collection self.http_listeners = http_listeners self.url_path_maps = url_path_maps self.request_routing_rules = request_routing_rules self.rewrite_rule_sets = rewrite_rule_sets self.redirect_configurations = redirect_configurations self.web_application_firewall_configuration = web_application_firewall_configuration self.firewall_policy = firewall_policy self.enable_http2 = enable_http2 self.enable_fips = enable_fips self.autoscale_configuration = autoscale_configuration self.resource_guid = resource_guid self.provisioning_state = None self.custom_error_configurations = custom_error_configurations class SubResource(msrest.serialization.Model): """Reference to another subresource. :param id: Resource ID. :type id: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, **kwargs ): super(SubResource, self).__init__(**kwargs) self.id = id class ApplicationGatewayAuthenticationCertificate(SubResource): """Authentication certificates of an application gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the authentication certificate that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param data: Certificate public data. :type data: str :ivar provisioning_state: The provisioning state of the authentication certificate resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'data': {'key': 'properties.data', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, data: Optional[str] = None, **kwargs ): super(ApplicationGatewayAuthenticationCertificate, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.data = data self.provisioning_state = None class ApplicationGatewayAutoscaleConfiguration(msrest.serialization.Model): """Application Gateway autoscale configuration. All required parameters must be populated in order to send to Azure. :param min_capacity: Required. Lower bound on number of Application Gateway capacity. :type min_capacity: int :param max_capacity: Upper bound on number of Application Gateway capacity. :type max_capacity: int """ _validation = { 'min_capacity': {'required': True, 'minimum': 0}, 'max_capacity': {'minimum': 2}, } _attribute_map = { 'min_capacity': {'key': 'minCapacity', 'type': 'int'}, 'max_capacity': {'key': 'maxCapacity', 'type': 'int'}, } def __init__( self, *, min_capacity: int, max_capacity: Optional[int] = None, **kwargs ): super(ApplicationGatewayAutoscaleConfiguration, self).__init__(**kwargs) self.min_capacity = min_capacity self.max_capacity = max_capacity class ApplicationGatewayAvailableSslOptions(Resource): """Response for ApplicationGatewayAvailableSslOptions API service call. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param predefined_policies: List of available Ssl predefined policy. :type predefined_policies: list[~azure.mgmt.network.v2019_07_01.models.SubResource] :param default_policy: Name of the Ssl predefined policy applied by default to application gateway. Possible values include: "AppGwSslPolicy20150501", "AppGwSslPolicy20170401", "AppGwSslPolicy20170401S". :type default_policy: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslPolicyName :param available_cipher_suites: List of available Ssl cipher suites. :type available_cipher_suites: list[str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslCipherSuite] :param available_protocols: List of available Ssl protocols. :type available_protocols: list[str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslProtocol] """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'predefined_policies': {'key': 'properties.predefinedPolicies', 'type': '[SubResource]'}, 'default_policy': {'key': 'properties.defaultPolicy', 'type': 'str'}, 'available_cipher_suites': {'key': 'properties.availableCipherSuites', 'type': '[str]'}, 'available_protocols': {'key': 'properties.availableProtocols', 'type': '[str]'}, } def __init__( self, *, id: Optional[str] = None, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, predefined_policies: Optional[List["SubResource"]] = None, default_policy: Optional[Union[str, "ApplicationGatewaySslPolicyName"]] = None, available_cipher_suites: Optional[List[Union[str, "ApplicationGatewaySslCipherSuite"]]] = None, available_protocols: Optional[List[Union[str, "ApplicationGatewaySslProtocol"]]] = None, **kwargs ): super(ApplicationGatewayAvailableSslOptions, self).__init__(id=id, location=location, tags=tags, **kwargs) self.predefined_policies = predefined_policies self.default_policy = default_policy self.available_cipher_suites = available_cipher_suites self.available_protocols = available_protocols class ApplicationGatewayAvailableSslPredefinedPolicies(msrest.serialization.Model): """Response for ApplicationGatewayAvailableSslOptions API service call. :param value: List of available Ssl predefined policy. :type value: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslPredefinedPolicy] :param next_link: URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ApplicationGatewaySslPredefinedPolicy]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, *, value: Optional[List["ApplicationGatewaySslPredefinedPolicy"]] = None, next_link: Optional[str] = None, **kwargs ): super(ApplicationGatewayAvailableSslPredefinedPolicies, self).__init__(**kwargs) self.value = value self.next_link = next_link class ApplicationGatewayAvailableWafRuleSetsResult(msrest.serialization.Model): """Response for ApplicationGatewayAvailableWafRuleSets API service call. :param value: The list of application gateway rule sets. :type value: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayFirewallRuleSet] """ _attribute_map = { 'value': {'key': 'value', 'type': '[ApplicationGatewayFirewallRuleSet]'}, } def __init__( self, *, value: Optional[List["ApplicationGatewayFirewallRuleSet"]] = None, **kwargs ): super(ApplicationGatewayAvailableWafRuleSetsResult, self).__init__(**kwargs) self.value = value class ApplicationGatewayBackendAddress(msrest.serialization.Model): """Backend address of an application gateway. :param fqdn: Fully qualified domain name (FQDN). :type fqdn: str :param ip_address: IP address. :type ip_address: str """ _attribute_map = { 'fqdn': {'key': 'fqdn', 'type': 'str'}, 'ip_address': {'key': 'ipAddress', 'type': 'str'}, } def __init__( self, *, fqdn: Optional[str] = None, ip_address: Optional[str] = None, **kwargs ): super(ApplicationGatewayBackendAddress, self).__init__(**kwargs) self.fqdn = fqdn self.ip_address = ip_address class ApplicationGatewayBackendAddressPool(SubResource): """Backend Address Pool of an application gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the backend address pool that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param backend_ip_configurations: Collection of references to IPs defined in network interfaces. :type backend_ip_configurations: list[~azure.mgmt.network.v2019_07_01.models.NetworkInterfaceIPConfiguration] :param backend_addresses: Backend addresses. :type backend_addresses: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayBackendAddress] :ivar provisioning_state: The provisioning state of the backend address pool resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'backend_ip_configurations': {'key': 'properties.backendIPConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'}, 'backend_addresses': {'key': 'properties.backendAddresses', 'type': '[ApplicationGatewayBackendAddress]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, backend_ip_configurations: Optional[List["NetworkInterfaceIPConfiguration"]] = None, backend_addresses: Optional[List["ApplicationGatewayBackendAddress"]] = None, **kwargs ): super(ApplicationGatewayBackendAddressPool, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.backend_ip_configurations = backend_ip_configurations self.backend_addresses = backend_addresses self.provisioning_state = None class ApplicationGatewayBackendHealth(msrest.serialization.Model): """Response for ApplicationGatewayBackendHealth API service call. :param backend_address_pools: A list of ApplicationGatewayBackendHealthPool resources. :type backend_address_pools: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayBackendHealthPool] """ _attribute_map = { 'backend_address_pools': {'key': 'backendAddressPools', 'type': '[ApplicationGatewayBackendHealthPool]'}, } def __init__( self, *, backend_address_pools: Optional[List["ApplicationGatewayBackendHealthPool"]] = None, **kwargs ): super(ApplicationGatewayBackendHealth, self).__init__(**kwargs) self.backend_address_pools = backend_address_pools class ApplicationGatewayBackendHealthHttpSettings(msrest.serialization.Model): """Application gateway BackendHealthHttp settings. :param backend_http_settings: Reference of an ApplicationGatewayBackendHttpSettings resource. :type backend_http_settings: ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayBackendHttpSettings :param servers: List of ApplicationGatewayBackendHealthServer resources. :type servers: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayBackendHealthServer] """ _attribute_map = { 'backend_http_settings': {'key': 'backendHttpSettings', 'type': 'ApplicationGatewayBackendHttpSettings'}, 'servers': {'key': 'servers', 'type': '[ApplicationGatewayBackendHealthServer]'}, } def __init__( self, *, backend_http_settings: Optional["ApplicationGatewayBackendHttpSettings"] = None, servers: Optional[List["ApplicationGatewayBackendHealthServer"]] = None, **kwargs ): super(ApplicationGatewayBackendHealthHttpSettings, self).__init__(**kwargs) self.backend_http_settings = backend_http_settings self.servers = servers class ApplicationGatewayBackendHealthOnDemand(msrest.serialization.Model): """Result of on demand test probe. :param backend_address_pool: Reference of an ApplicationGatewayBackendAddressPool resource. :type backend_address_pool: ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayBackendAddressPool :param backend_health_http_settings: Application gateway BackendHealthHttp settings. :type backend_health_http_settings: ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayBackendHealthHttpSettings """ _attribute_map = { 'backend_address_pool': {'key': 'backendAddressPool', 'type': 'ApplicationGatewayBackendAddressPool'}, 'backend_health_http_settings': {'key': 'backendHealthHttpSettings', 'type': 'ApplicationGatewayBackendHealthHttpSettings'}, } def __init__( self, *, backend_address_pool: Optional["ApplicationGatewayBackendAddressPool"] = None, backend_health_http_settings: Optional["ApplicationGatewayBackendHealthHttpSettings"] = None, **kwargs ): super(ApplicationGatewayBackendHealthOnDemand, self).__init__(**kwargs) self.backend_address_pool = backend_address_pool self.backend_health_http_settings = backend_health_http_settings class ApplicationGatewayBackendHealthPool(msrest.serialization.Model): """Application gateway BackendHealth pool. :param backend_address_pool: Reference of an ApplicationGatewayBackendAddressPool resource. :type backend_address_pool: ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayBackendAddressPool :param backend_http_settings_collection: List of ApplicationGatewayBackendHealthHttpSettings resources. :type backend_http_settings_collection: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayBackendHealthHttpSettings] """ _attribute_map = { 'backend_address_pool': {'key': 'backendAddressPool', 'type': 'ApplicationGatewayBackendAddressPool'}, 'backend_http_settings_collection': {'key': 'backendHttpSettingsCollection', 'type': '[ApplicationGatewayBackendHealthHttpSettings]'}, } def __init__( self, *, backend_address_pool: Optional["ApplicationGatewayBackendAddressPool"] = None, backend_http_settings_collection: Optional[List["ApplicationGatewayBackendHealthHttpSettings"]] = None, **kwargs ): super(ApplicationGatewayBackendHealthPool, self).__init__(**kwargs) self.backend_address_pool = backend_address_pool self.backend_http_settings_collection = backend_http_settings_collection class ApplicationGatewayBackendHealthServer(msrest.serialization.Model): """Application gateway backendhealth http settings. :param address: IP address or FQDN of backend server. :type address: str :param ip_configuration: Reference of IP configuration of backend server. :type ip_configuration: ~azure.mgmt.network.v2019_07_01.models.NetworkInterfaceIPConfiguration :param health: Health of backend server. Possible values include: "Unknown", "Up", "Down", "Partial", "Draining". :type health: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayBackendHealthServerHealth :param health_probe_log: Health Probe Log. :type health_probe_log: str """ _attribute_map = { 'address': {'key': 'address', 'type': 'str'}, 'ip_configuration': {'key': 'ipConfiguration', 'type': 'NetworkInterfaceIPConfiguration'}, 'health': {'key': 'health', 'type': 'str'}, 'health_probe_log': {'key': 'healthProbeLog', 'type': 'str'}, } def __init__( self, *, address: Optional[str] = None, ip_configuration: Optional["NetworkInterfaceIPConfiguration"] = None, health: Optional[Union[str, "ApplicationGatewayBackendHealthServerHealth"]] = None, health_probe_log: Optional[str] = None, **kwargs ): super(ApplicationGatewayBackendHealthServer, self).__init__(**kwargs) self.address = address self.ip_configuration = ip_configuration self.health = health self.health_probe_log = health_probe_log class ApplicationGatewayBackendHttpSettings(SubResource): """Backend address pool settings of an application gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the backend http settings that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param port: The destination port on the backend. :type port: int :param protocol: The protocol used to communicate with the backend. Possible values include: "Http", "Https". :type protocol: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayProtocol :param cookie_based_affinity: Cookie based affinity. Possible values include: "Enabled", "Disabled". :type cookie_based_affinity: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayCookieBasedAffinity :param request_timeout: Request timeout in seconds. Application Gateway will fail the request if response is not received within RequestTimeout. Acceptable values are from 1 second to 86400 seconds. :type request_timeout: int :param probe: Probe resource of an application gateway. :type probe: ~azure.mgmt.network.v2019_07_01.models.SubResource :param authentication_certificates: Array of references to application gateway authentication certificates. :type authentication_certificates: list[~azure.mgmt.network.v2019_07_01.models.SubResource] :param trusted_root_certificates: Array of references to application gateway trusted root certificates. :type trusted_root_certificates: list[~azure.mgmt.network.v2019_07_01.models.SubResource] :param connection_draining: Connection draining of the backend http settings resource. :type connection_draining: ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayConnectionDraining :param host_name: Host header to be sent to the backend servers. :type host_name: str :param pick_host_name_from_backend_address: Whether to pick host header should be picked from the host name of the backend server. Default value is false. :type pick_host_name_from_backend_address: bool :param affinity_cookie_name: Cookie name to use for the affinity cookie. :type affinity_cookie_name: str :param probe_enabled: Whether the probe is enabled. Default value is false. :type probe_enabled: bool :param path: Path which should be used as a prefix for all HTTP requests. Null means no path will be prefixed. Default value is null. :type path: str :ivar provisioning_state: The provisioning state of the backend HTTP settings resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'port': {'key': 'properties.port', 'type': 'int'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'cookie_based_affinity': {'key': 'properties.cookieBasedAffinity', 'type': 'str'}, 'request_timeout': {'key': 'properties.requestTimeout', 'type': 'int'}, 'probe': {'key': 'properties.probe', 'type': 'SubResource'}, 'authentication_certificates': {'key': 'properties.authenticationCertificates', 'type': '[SubResource]'}, 'trusted_root_certificates': {'key': 'properties.trustedRootCertificates', 'type': '[SubResource]'}, 'connection_draining': {'key': 'properties.connectionDraining', 'type': 'ApplicationGatewayConnectionDraining'}, 'host_name': {'key': 'properties.hostName', 'type': 'str'}, 'pick_host_name_from_backend_address': {'key': 'properties.pickHostNameFromBackendAddress', 'type': 'bool'}, 'affinity_cookie_name': {'key': 'properties.affinityCookieName', 'type': 'str'}, 'probe_enabled': {'key': 'properties.probeEnabled', 'type': 'bool'}, 'path': {'key': 'properties.path', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, port: Optional[int] = None, protocol: Optional[Union[str, "ApplicationGatewayProtocol"]] = None, cookie_based_affinity: Optional[Union[str, "ApplicationGatewayCookieBasedAffinity"]] = None, request_timeout: Optional[int] = None, probe: Optional["SubResource"] = None, authentication_certificates: Optional[List["SubResource"]] = None, trusted_root_certificates: Optional[List["SubResource"]] = None, connection_draining: Optional["ApplicationGatewayConnectionDraining"] = None, host_name: Optional[str] = None, pick_host_name_from_backend_address: Optional[bool] = None, affinity_cookie_name: Optional[str] = None, probe_enabled: Optional[bool] = None, path: Optional[str] = None, **kwargs ): super(ApplicationGatewayBackendHttpSettings, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.port = port self.protocol = protocol self.cookie_based_affinity = cookie_based_affinity self.request_timeout = request_timeout self.probe = probe self.authentication_certificates = authentication_certificates self.trusted_root_certificates = trusted_root_certificates self.connection_draining = connection_draining self.host_name = host_name self.pick_host_name_from_backend_address = pick_host_name_from_backend_address self.affinity_cookie_name = affinity_cookie_name self.probe_enabled = probe_enabled self.path = path self.provisioning_state = None class ApplicationGatewayConnectionDraining(msrest.serialization.Model): """Connection draining allows open connections to a backend server to be active for a specified time after the backend server got removed from the configuration. All required parameters must be populated in order to send to Azure. :param enabled: Required. Whether connection draining is enabled or not. :type enabled: bool :param drain_timeout_in_sec: Required. The number of seconds connection draining is active. Acceptable values are from 1 second to 3600 seconds. :type drain_timeout_in_sec: int """ _validation = { 'enabled': {'required': True}, 'drain_timeout_in_sec': {'required': True, 'maximum': 3600, 'minimum': 1}, } _attribute_map = { 'enabled': {'key': 'enabled', 'type': 'bool'}, 'drain_timeout_in_sec': {'key': 'drainTimeoutInSec', 'type': 'int'}, } def __init__( self, *, enabled: bool, drain_timeout_in_sec: int, **kwargs ): super(ApplicationGatewayConnectionDraining, self).__init__(**kwargs) self.enabled = enabled self.drain_timeout_in_sec = drain_timeout_in_sec class ApplicationGatewayCustomError(msrest.serialization.Model): """Customer error of an application gateway. :param status_code: Status code of the application gateway customer error. Possible values include: "HttpStatus403", "HttpStatus502". :type status_code: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayCustomErrorStatusCode :param custom_error_page_url: Error page URL of the application gateway customer error. :type custom_error_page_url: str """ _attribute_map = { 'status_code': {'key': 'statusCode', 'type': 'str'}, 'custom_error_page_url': {'key': 'customErrorPageUrl', 'type': 'str'}, } def __init__( self, *, status_code: Optional[Union[str, "ApplicationGatewayCustomErrorStatusCode"]] = None, custom_error_page_url: Optional[str] = None, **kwargs ): super(ApplicationGatewayCustomError, self).__init__(**kwargs) self.status_code = status_code self.custom_error_page_url = custom_error_page_url class ApplicationGatewayFirewallDisabledRuleGroup(msrest.serialization.Model): """Allows to disable rules within a rule group or an entire rule group. All required parameters must be populated in order to send to Azure. :param rule_group_name: Required. The name of the rule group that will be disabled. :type rule_group_name: str :param rules: The list of rules that will be disabled. If null, all rules of the rule group will be disabled. :type rules: list[int] """ _validation = { 'rule_group_name': {'required': True}, } _attribute_map = { 'rule_group_name': {'key': 'ruleGroupName', 'type': 'str'}, 'rules': {'key': 'rules', 'type': '[int]'}, } def __init__( self, *, rule_group_name: str, rules: Optional[List[int]] = None, **kwargs ): super(ApplicationGatewayFirewallDisabledRuleGroup, self).__init__(**kwargs) self.rule_group_name = rule_group_name self.rules = rules class ApplicationGatewayFirewallExclusion(msrest.serialization.Model): """Allow to exclude some variable satisfy the condition for the WAF check. All required parameters must be populated in order to send to Azure. :param match_variable: Required. The variable to be excluded. :type match_variable: str :param selector_match_operator: Required. When matchVariable is a collection, operate on the selector to specify which elements in the collection this exclusion applies to. :type selector_match_operator: str :param selector: Required. When matchVariable is a collection, operator used to specify which elements in the collection this exclusion applies to. :type selector: str """ _validation = { 'match_variable': {'required': True}, 'selector_match_operator': {'required': True}, 'selector': {'required': True}, } _attribute_map = { 'match_variable': {'key': 'matchVariable', 'type': 'str'}, 'selector_match_operator': {'key': 'selectorMatchOperator', 'type': 'str'}, 'selector': {'key': 'selector', 'type': 'str'}, } def __init__( self, *, match_variable: str, selector_match_operator: str, selector: str, **kwargs ): super(ApplicationGatewayFirewallExclusion, self).__init__(**kwargs) self.match_variable = match_variable self.selector_match_operator = selector_match_operator self.selector = selector class ApplicationGatewayFirewallRule(msrest.serialization.Model): """A web application firewall rule. All required parameters must be populated in order to send to Azure. :param rule_id: Required. The identifier of the web application firewall rule. :type rule_id: int :param description: The description of the web application firewall rule. :type description: str """ _validation = { 'rule_id': {'required': True}, } _attribute_map = { 'rule_id': {'key': 'ruleId', 'type': 'int'}, 'description': {'key': 'description', 'type': 'str'}, } def __init__( self, *, rule_id: int, description: Optional[str] = None, **kwargs ): super(ApplicationGatewayFirewallRule, self).__init__(**kwargs) self.rule_id = rule_id self.description = description class ApplicationGatewayFirewallRuleGroup(msrest.serialization.Model): """A web application firewall rule group. All required parameters must be populated in order to send to Azure. :param rule_group_name: Required. The name of the web application firewall rule group. :type rule_group_name: str :param description: The description of the web application firewall rule group. :type description: str :param rules: Required. The rules of the web application firewall rule group. :type rules: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayFirewallRule] """ _validation = { 'rule_group_name': {'required': True}, 'rules': {'required': True}, } _attribute_map = { 'rule_group_name': {'key': 'ruleGroupName', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'rules': {'key': 'rules', 'type': '[ApplicationGatewayFirewallRule]'}, } def __init__( self, *, rule_group_name: str, rules: List["ApplicationGatewayFirewallRule"], description: Optional[str] = None, **kwargs ): super(ApplicationGatewayFirewallRuleGroup, self).__init__(**kwargs) self.rule_group_name = rule_group_name self.description = description self.rules = rules class ApplicationGatewayFirewallRuleSet(Resource): """A web application firewall rule set. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :ivar provisioning_state: The provisioning state of the web application firewall rule set. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState :param rule_set_type: The type of the web application firewall rule set. :type rule_set_type: str :param rule_set_version: The version of the web application firewall rule set type. :type rule_set_version: str :param rule_groups: The rule groups of the web application firewall rule set. :type rule_groups: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayFirewallRuleGroup] """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'rule_set_type': {'key': 'properties.ruleSetType', 'type': 'str'}, 'rule_set_version': {'key': 'properties.ruleSetVersion', 'type': 'str'}, 'rule_groups': {'key': 'properties.ruleGroups', 'type': '[ApplicationGatewayFirewallRuleGroup]'}, } def __init__( self, *, id: Optional[str] = None, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, rule_set_type: Optional[str] = None, rule_set_version: Optional[str] = None, rule_groups: Optional[List["ApplicationGatewayFirewallRuleGroup"]] = None, **kwargs ): super(ApplicationGatewayFirewallRuleSet, self).__init__(id=id, location=location, tags=tags, **kwargs) self.provisioning_state = None self.rule_set_type = rule_set_type self.rule_set_version = rule_set_version self.rule_groups = rule_groups class ApplicationGatewayFrontendIPConfiguration(SubResource): """Frontend IP configuration of an application gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the frontend IP configuration that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param private_ip_address: PrivateIPAddress of the network interface IP Configuration. :type private_ip_address: str :param private_ip_allocation_method: The private IP address allocation method. Possible values include: "Static", "Dynamic". :type private_ip_allocation_method: str or ~azure.mgmt.network.v2019_07_01.models.IPAllocationMethod :param subnet: Reference of the subnet resource. :type subnet: ~azure.mgmt.network.v2019_07_01.models.SubResource :param public_ip_address: Reference of the PublicIP resource. :type public_ip_address: ~azure.mgmt.network.v2019_07_01.models.SubResource :ivar provisioning_state: The provisioning state of the frontend IP configuration resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'}, 'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'}, 'subnet': {'key': 'properties.subnet', 'type': 'SubResource'}, 'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, private_ip_address: Optional[str] = None, private_ip_allocation_method: Optional[Union[str, "IPAllocationMethod"]] = None, subnet: Optional["SubResource"] = None, public_ip_address: Optional["SubResource"] = None, **kwargs ): super(ApplicationGatewayFrontendIPConfiguration, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.private_ip_address = private_ip_address self.private_ip_allocation_method = private_ip_allocation_method self.subnet = subnet self.public_ip_address = public_ip_address self.provisioning_state = None class ApplicationGatewayFrontendPort(SubResource): """Frontend port of an application gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the frontend port that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param port: Frontend port. :type port: int :ivar provisioning_state: The provisioning state of the frontend port resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'port': {'key': 'properties.port', 'type': 'int'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, port: Optional[int] = None, **kwargs ): super(ApplicationGatewayFrontendPort, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.port = port self.provisioning_state = None class ApplicationGatewayHeaderConfiguration(msrest.serialization.Model): """Header configuration of the Actions set in Application Gateway. :param header_name: Header name of the header configuration. :type header_name: str :param header_value: Header value of the header configuration. :type header_value: str """ _attribute_map = { 'header_name': {'key': 'headerName', 'type': 'str'}, 'header_value': {'key': 'headerValue', 'type': 'str'}, } def __init__( self, *, header_name: Optional[str] = None, header_value: Optional[str] = None, **kwargs ): super(ApplicationGatewayHeaderConfiguration, self).__init__(**kwargs) self.header_name = header_name self.header_value = header_value class ApplicationGatewayHttpListener(SubResource): """Http listener of an application gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the HTTP listener that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param frontend_ip_configuration: Frontend IP configuration resource of an application gateway. :type frontend_ip_configuration: ~azure.mgmt.network.v2019_07_01.models.SubResource :param frontend_port: Frontend port resource of an application gateway. :type frontend_port: ~azure.mgmt.network.v2019_07_01.models.SubResource :param protocol: Protocol of the HTTP listener. Possible values include: "Http", "Https". :type protocol: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayProtocol :param host_name: Host name of HTTP listener. :type host_name: str :param ssl_certificate: SSL certificate resource of an application gateway. :type ssl_certificate: ~azure.mgmt.network.v2019_07_01.models.SubResource :param require_server_name_indication: Applicable only if protocol is https. Enables SNI for multi-hosting. :type require_server_name_indication: bool :ivar provisioning_state: The provisioning state of the HTTP listener resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState :param custom_error_configurations: Custom error configurations of the HTTP listener. :type custom_error_configurations: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayCustomError] """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'}, 'frontend_port': {'key': 'properties.frontendPort', 'type': 'SubResource'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'host_name': {'key': 'properties.hostName', 'type': 'str'}, 'ssl_certificate': {'key': 'properties.sslCertificate', 'type': 'SubResource'}, 'require_server_name_indication': {'key': 'properties.requireServerNameIndication', 'type': 'bool'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'custom_error_configurations': {'key': 'properties.customErrorConfigurations', 'type': '[ApplicationGatewayCustomError]'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, frontend_ip_configuration: Optional["SubResource"] = None, frontend_port: Optional["SubResource"] = None, protocol: Optional[Union[str, "ApplicationGatewayProtocol"]] = None, host_name: Optional[str] = None, ssl_certificate: Optional["SubResource"] = None, require_server_name_indication: Optional[bool] = None, custom_error_configurations: Optional[List["ApplicationGatewayCustomError"]] = None, **kwargs ): super(ApplicationGatewayHttpListener, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.frontend_ip_configuration = frontend_ip_configuration self.frontend_port = frontend_port self.protocol = protocol self.host_name = host_name self.ssl_certificate = ssl_certificate self.require_server_name_indication = require_server_name_indication self.provisioning_state = None self.custom_error_configurations = custom_error_configurations class ApplicationGatewayIPConfiguration(SubResource): """IP configuration of an application gateway. Currently 1 public and 1 private IP configuration is allowed. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the IP configuration that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param subnet: Reference of the subnet resource. A subnet from where application gateway gets its private address. :type subnet: ~azure.mgmt.network.v2019_07_01.models.SubResource :ivar provisioning_state: The provisioning state of the application gateway IP configuration resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'subnet': {'key': 'properties.subnet', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, subnet: Optional["SubResource"] = None, **kwargs ): super(ApplicationGatewayIPConfiguration, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.subnet = subnet self.provisioning_state = None class ApplicationGatewayListResult(msrest.serialization.Model): """Response for ListApplicationGateways API service call. :param value: List of an application gateways in a resource group. :type value: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGateway] :param next_link: URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ApplicationGateway]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, *, value: Optional[List["ApplicationGateway"]] = None, next_link: Optional[str] = None, **kwargs ): super(ApplicationGatewayListResult, self).__init__(**kwargs) self.value = value self.next_link = next_link class ApplicationGatewayOnDemandProbe(msrest.serialization.Model): """Details of on demand test probe request. :param protocol: The protocol used for the probe. Possible values include: "Http", "Https". :type protocol: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayProtocol :param host: Host name to send the probe to. :type host: str :param path: Relative path of probe. Valid path starts from '/'. Probe is sent to :code:`<Protocol>`://:code:`<host>`::code:`<port>`:code:`<path>`. :type path: str :param timeout: The probe timeout in seconds. Probe marked as failed if valid response is not received with this timeout period. Acceptable values are from 1 second to 86400 seconds. :type timeout: int :param pick_host_name_from_backend_http_settings: Whether the host header should be picked from the backend http settings. Default value is false. :type pick_host_name_from_backend_http_settings: bool :param match: Criterion for classifying a healthy probe response. :type match: ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayProbeHealthResponseMatch :param backend_address_pool: Reference of backend pool of application gateway to which probe request will be sent. :type backend_address_pool: ~azure.mgmt.network.v2019_07_01.models.SubResource :param backend_http_settings: Reference of backend http setting of application gateway to be used for test probe. :type backend_http_settings: ~azure.mgmt.network.v2019_07_01.models.SubResource """ _attribute_map = { 'protocol': {'key': 'protocol', 'type': 'str'}, 'host': {'key': 'host', 'type': 'str'}, 'path': {'key': 'path', 'type': 'str'}, 'timeout': {'key': 'timeout', 'type': 'int'}, 'pick_host_name_from_backend_http_settings': {'key': 'pickHostNameFromBackendHttpSettings', 'type': 'bool'}, 'match': {'key': 'match', 'type': 'ApplicationGatewayProbeHealthResponseMatch'}, 'backend_address_pool': {'key': 'backendAddressPool', 'type': 'SubResource'}, 'backend_http_settings': {'key': 'backendHttpSettings', 'type': 'SubResource'}, } def __init__( self, *, protocol: Optional[Union[str, "ApplicationGatewayProtocol"]] = None, host: Optional[str] = None, path: Optional[str] = None, timeout: Optional[int] = None, pick_host_name_from_backend_http_settings: Optional[bool] = None, match: Optional["ApplicationGatewayProbeHealthResponseMatch"] = None, backend_address_pool: Optional["SubResource"] = None, backend_http_settings: Optional["SubResource"] = None, **kwargs ): super(ApplicationGatewayOnDemandProbe, self).__init__(**kwargs) self.protocol = protocol self.host = host self.path = path self.timeout = timeout self.pick_host_name_from_backend_http_settings = pick_host_name_from_backend_http_settings self.match = match self.backend_address_pool = backend_address_pool self.backend_http_settings = backend_http_settings class ApplicationGatewayPathRule(SubResource): """Path rule of URL path map of an application gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the path rule that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param paths: Path rules of URL path map. :type paths: list[str] :param backend_address_pool: Backend address pool resource of URL path map path rule. :type backend_address_pool: ~azure.mgmt.network.v2019_07_01.models.SubResource :param backend_http_settings: Backend http settings resource of URL path map path rule. :type backend_http_settings: ~azure.mgmt.network.v2019_07_01.models.SubResource :param redirect_configuration: Redirect configuration resource of URL path map path rule. :type redirect_configuration: ~azure.mgmt.network.v2019_07_01.models.SubResource :param rewrite_rule_set: Rewrite rule set resource of URL path map path rule. :type rewrite_rule_set: ~azure.mgmt.network.v2019_07_01.models.SubResource :ivar provisioning_state: The provisioning state of the path rule resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'paths': {'key': 'properties.paths', 'type': '[str]'}, 'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'}, 'backend_http_settings': {'key': 'properties.backendHttpSettings', 'type': 'SubResource'}, 'redirect_configuration': {'key': 'properties.redirectConfiguration', 'type': 'SubResource'}, 'rewrite_rule_set': {'key': 'properties.rewriteRuleSet', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, paths: Optional[List[str]] = None, backend_address_pool: Optional["SubResource"] = None, backend_http_settings: Optional["SubResource"] = None, redirect_configuration: Optional["SubResource"] = None, rewrite_rule_set: Optional["SubResource"] = None, **kwargs ): super(ApplicationGatewayPathRule, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.paths = paths self.backend_address_pool = backend_address_pool self.backend_http_settings = backend_http_settings self.redirect_configuration = redirect_configuration self.rewrite_rule_set = rewrite_rule_set self.provisioning_state = None class ApplicationGatewayProbe(SubResource): """Probe of the application gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the probe that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param protocol: The protocol used for the probe. Possible values include: "Http", "Https". :type protocol: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayProtocol :param host: Host name to send the probe to. :type host: str :param path: Relative path of probe. Valid path starts from '/'. Probe is sent to :code:`<Protocol>`://:code:`<host>`::code:`<port>`:code:`<path>`. :type path: str :param interval: The probing interval in seconds. This is the time interval between two consecutive probes. Acceptable values are from 1 second to 86400 seconds. :type interval: int :param timeout: The probe timeout in seconds. Probe marked as failed if valid response is not received with this timeout period. Acceptable values are from 1 second to 86400 seconds. :type timeout: int :param unhealthy_threshold: The probe retry count. Backend server is marked down after consecutive probe failure count reaches UnhealthyThreshold. Acceptable values are from 1 second to 20. :type unhealthy_threshold: int :param pick_host_name_from_backend_http_settings: Whether the host header should be picked from the backend http settings. Default value is false. :type pick_host_name_from_backend_http_settings: bool :param min_servers: Minimum number of servers that are always marked healthy. Default value is 0. :type min_servers: int :param match: Criterion for classifying a healthy probe response. :type match: ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayProbeHealthResponseMatch :ivar provisioning_state: The provisioning state of the probe resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState :param port: Custom port which will be used for probing the backend servers. The valid value ranges from 1 to 65535. In case not set, port from http settings will be used. This property is valid for Standard_v2 and WAF_v2 only. :type port: int """ _validation = { 'provisioning_state': {'readonly': True}, 'port': {'maximum': 65535, 'minimum': 1}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'protocol': {'key': 'properties.protocol', 'type': 'str'}, 'host': {'key': 'properties.host', 'type': 'str'}, 'path': {'key': 'properties.path', 'type': 'str'}, 'interval': {'key': 'properties.interval', 'type': 'int'}, 'timeout': {'key': 'properties.timeout', 'type': 'int'}, 'unhealthy_threshold': {'key': 'properties.unhealthyThreshold', 'type': 'int'}, 'pick_host_name_from_backend_http_settings': {'key': 'properties.pickHostNameFromBackendHttpSettings', 'type': 'bool'}, 'min_servers': {'key': 'properties.minServers', 'type': 'int'}, 'match': {'key': 'properties.match', 'type': 'ApplicationGatewayProbeHealthResponseMatch'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'port': {'key': 'properties.port', 'type': 'int'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, protocol: Optional[Union[str, "ApplicationGatewayProtocol"]] = None, host: Optional[str] = None, path: Optional[str] = None, interval: Optional[int] = None, timeout: Optional[int] = None, unhealthy_threshold: Optional[int] = None, pick_host_name_from_backend_http_settings: Optional[bool] = None, min_servers: Optional[int] = None, match: Optional["ApplicationGatewayProbeHealthResponseMatch"] = None, port: Optional[int] = None, **kwargs ): super(ApplicationGatewayProbe, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.protocol = protocol self.host = host self.path = path self.interval = interval self.timeout = timeout self.unhealthy_threshold = unhealthy_threshold self.pick_host_name_from_backend_http_settings = pick_host_name_from_backend_http_settings self.min_servers = min_servers self.match = match self.provisioning_state = None self.port = port class ApplicationGatewayProbeHealthResponseMatch(msrest.serialization.Model): """Application gateway probe health response match. :param body: Body that must be contained in the health response. Default value is empty. :type body: str :param status_codes: Allowed ranges of healthy status codes. Default range of healthy status codes is 200-399. :type status_codes: list[str] """ _attribute_map = { 'body': {'key': 'body', 'type': 'str'}, 'status_codes': {'key': 'statusCodes', 'type': '[str]'}, } def __init__( self, *, body: Optional[str] = None, status_codes: Optional[List[str]] = None, **kwargs ): super(ApplicationGatewayProbeHealthResponseMatch, self).__init__(**kwargs) self.body = body self.status_codes = status_codes class ApplicationGatewayRedirectConfiguration(SubResource): """Redirect configuration of an application gateway. :param id: Resource ID. :type id: str :param name: Name of the redirect configuration that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param redirect_type: HTTP redirection type. Possible values include: "Permanent", "Found", "SeeOther", "Temporary". :type redirect_type: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayRedirectType :param target_listener: Reference to a listener to redirect the request to. :type target_listener: ~azure.mgmt.network.v2019_07_01.models.SubResource :param target_url: Url to redirect the request to. :type target_url: str :param include_path: Include path in the redirected url. :type include_path: bool :param include_query_string: Include query string in the redirected url. :type include_query_string: bool :param request_routing_rules: Request routing specifying redirect configuration. :type request_routing_rules: list[~azure.mgmt.network.v2019_07_01.models.SubResource] :param url_path_maps: Url path maps specifying default redirect configuration. :type url_path_maps: list[~azure.mgmt.network.v2019_07_01.models.SubResource] :param path_rules: Path rules specifying redirect configuration. :type path_rules: list[~azure.mgmt.network.v2019_07_01.models.SubResource] """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'redirect_type': {'key': 'properties.redirectType', 'type': 'str'}, 'target_listener': {'key': 'properties.targetListener', 'type': 'SubResource'}, 'target_url': {'key': 'properties.targetUrl', 'type': 'str'}, 'include_path': {'key': 'properties.includePath', 'type': 'bool'}, 'include_query_string': {'key': 'properties.includeQueryString', 'type': 'bool'}, 'request_routing_rules': {'key': 'properties.requestRoutingRules', 'type': '[SubResource]'}, 'url_path_maps': {'key': 'properties.urlPathMaps', 'type': '[SubResource]'}, 'path_rules': {'key': 'properties.pathRules', 'type': '[SubResource]'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, redirect_type: Optional[Union[str, "ApplicationGatewayRedirectType"]] = None, target_listener: Optional["SubResource"] = None, target_url: Optional[str] = None, include_path: Optional[bool] = None, include_query_string: Optional[bool] = None, request_routing_rules: Optional[List["SubResource"]] = None, url_path_maps: Optional[List["SubResource"]] = None, path_rules: Optional[List["SubResource"]] = None, **kwargs ): super(ApplicationGatewayRedirectConfiguration, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.redirect_type = redirect_type self.target_listener = target_listener self.target_url = target_url self.include_path = include_path self.include_query_string = include_query_string self.request_routing_rules = request_routing_rules self.url_path_maps = url_path_maps self.path_rules = path_rules class ApplicationGatewayRequestRoutingRule(SubResource): """Request routing rule of an application gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the request routing rule that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param rule_type: Rule type. Possible values include: "Basic", "PathBasedRouting". :type rule_type: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayRequestRoutingRuleType :param priority: Priority of the request routing rule. :type priority: int :param backend_address_pool: Backend address pool resource of the application gateway. :type backend_address_pool: ~azure.mgmt.network.v2019_07_01.models.SubResource :param backend_http_settings: Backend http settings resource of the application gateway. :type backend_http_settings: ~azure.mgmt.network.v2019_07_01.models.SubResource :param http_listener: Http listener resource of the application gateway. :type http_listener: ~azure.mgmt.network.v2019_07_01.models.SubResource :param url_path_map: URL path map resource of the application gateway. :type url_path_map: ~azure.mgmt.network.v2019_07_01.models.SubResource :param rewrite_rule_set: Rewrite Rule Set resource in Basic rule of the application gateway. :type rewrite_rule_set: ~azure.mgmt.network.v2019_07_01.models.SubResource :param redirect_configuration: Redirect configuration resource of the application gateway. :type redirect_configuration: ~azure.mgmt.network.v2019_07_01.models.SubResource :ivar provisioning_state: The provisioning state of the request routing rule resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'priority': {'maximum': 20000, 'minimum': 1}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'rule_type': {'key': 'properties.ruleType', 'type': 'str'}, 'priority': {'key': 'properties.priority', 'type': 'int'}, 'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'}, 'backend_http_settings': {'key': 'properties.backendHttpSettings', 'type': 'SubResource'}, 'http_listener': {'key': 'properties.httpListener', 'type': 'SubResource'}, 'url_path_map': {'key': 'properties.urlPathMap', 'type': 'SubResource'}, 'rewrite_rule_set': {'key': 'properties.rewriteRuleSet', 'type': 'SubResource'}, 'redirect_configuration': {'key': 'properties.redirectConfiguration', 'type': 'SubResource'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, rule_type: Optional[Union[str, "ApplicationGatewayRequestRoutingRuleType"]] = None, priority: Optional[int] = None, backend_address_pool: Optional["SubResource"] = None, backend_http_settings: Optional["SubResource"] = None, http_listener: Optional["SubResource"] = None, url_path_map: Optional["SubResource"] = None, rewrite_rule_set: Optional["SubResource"] = None, redirect_configuration: Optional["SubResource"] = None, **kwargs ): super(ApplicationGatewayRequestRoutingRule, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.rule_type = rule_type self.priority = priority self.backend_address_pool = backend_address_pool self.backend_http_settings = backend_http_settings self.http_listener = http_listener self.url_path_map = url_path_map self.rewrite_rule_set = rewrite_rule_set self.redirect_configuration = redirect_configuration self.provisioning_state = None class ApplicationGatewayRewriteRule(msrest.serialization.Model): """Rewrite rule of an application gateway. :param name: Name of the rewrite rule that is unique within an Application Gateway. :type name: str :param rule_sequence: Rule Sequence of the rewrite rule that determines the order of execution of a particular rule in a RewriteRuleSet. :type rule_sequence: int :param conditions: Conditions based on which the action set execution will be evaluated. :type conditions: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayRewriteRuleCondition] :param action_set: Set of actions to be done as part of the rewrite Rule. :type action_set: ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayRewriteRuleActionSet """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'rule_sequence': {'key': 'ruleSequence', 'type': 'int'}, 'conditions': {'key': 'conditions', 'type': '[ApplicationGatewayRewriteRuleCondition]'}, 'action_set': {'key': 'actionSet', 'type': 'ApplicationGatewayRewriteRuleActionSet'}, } def __init__( self, *, name: Optional[str] = None, rule_sequence: Optional[int] = None, conditions: Optional[List["ApplicationGatewayRewriteRuleCondition"]] = None, action_set: Optional["ApplicationGatewayRewriteRuleActionSet"] = None, **kwargs ): super(ApplicationGatewayRewriteRule, self).__init__(**kwargs) self.name = name self.rule_sequence = rule_sequence self.conditions = conditions self.action_set = action_set class ApplicationGatewayRewriteRuleActionSet(msrest.serialization.Model): """Set of actions in the Rewrite Rule in Application Gateway. :param request_header_configurations: Request Header Actions in the Action Set. :type request_header_configurations: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayHeaderConfiguration] :param response_header_configurations: Response Header Actions in the Action Set. :type response_header_configurations: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayHeaderConfiguration] """ _attribute_map = { 'request_header_configurations': {'key': 'requestHeaderConfigurations', 'type': '[ApplicationGatewayHeaderConfiguration]'}, 'response_header_configurations': {'key': 'responseHeaderConfigurations', 'type': '[ApplicationGatewayHeaderConfiguration]'}, } def __init__( self, *, request_header_configurations: Optional[List["ApplicationGatewayHeaderConfiguration"]] = None, response_header_configurations: Optional[List["ApplicationGatewayHeaderConfiguration"]] = None, **kwargs ): super(ApplicationGatewayRewriteRuleActionSet, self).__init__(**kwargs) self.request_header_configurations = request_header_configurations self.response_header_configurations = response_header_configurations class ApplicationGatewayRewriteRuleCondition(msrest.serialization.Model): """Set of conditions in the Rewrite Rule in Application Gateway. :param variable: The condition parameter of the RewriteRuleCondition. :type variable: str :param pattern: The pattern, either fixed string or regular expression, that evaluates the truthfulness of the condition. :type pattern: str :param ignore_case: Setting this parameter to truth value with force the pattern to do a case in-sensitive comparison. :type ignore_case: bool :param negate: Setting this value as truth will force to check the negation of the condition given by the user. :type negate: bool """ _attribute_map = { 'variable': {'key': 'variable', 'type': 'str'}, 'pattern': {'key': 'pattern', 'type': 'str'}, 'ignore_case': {'key': 'ignoreCase', 'type': 'bool'}, 'negate': {'key': 'negate', 'type': 'bool'}, } def __init__( self, *, variable: Optional[str] = None, pattern: Optional[str] = None, ignore_case: Optional[bool] = None, negate: Optional[bool] = None, **kwargs ): super(ApplicationGatewayRewriteRuleCondition, self).__init__(**kwargs) self.variable = variable self.pattern = pattern self.ignore_case = ignore_case self.negate = negate class ApplicationGatewayRewriteRuleSet(SubResource): """Rewrite rule set of an application gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the rewrite rule set that is unique within an Application Gateway. :type name: str :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :param rewrite_rules: Rewrite rules in the rewrite rule set. :type rewrite_rules: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayRewriteRule] :ivar provisioning_state: The provisioning state of the rewrite rule set resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'etag': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'rewrite_rules': {'key': 'properties.rewriteRules', 'type': '[ApplicationGatewayRewriteRule]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, rewrite_rules: Optional[List["ApplicationGatewayRewriteRule"]] = None, **kwargs ): super(ApplicationGatewayRewriteRuleSet, self).__init__(id=id, **kwargs) self.name = name self.etag = None self.rewrite_rules = rewrite_rules self.provisioning_state = None class ApplicationGatewaySku(msrest.serialization.Model): """SKU of an application gateway. :param name: Name of an application gateway SKU. Possible values include: "Standard_Small", "Standard_Medium", "Standard_Large", "WAF_Medium", "WAF_Large", "Standard_v2", "WAF_v2". :type name: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySkuName :param tier: Tier of an application gateway. Possible values include: "Standard", "WAF", "Standard_v2", "WAF_v2". :type tier: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayTier :param capacity: Capacity (instance count) of an application gateway. :type capacity: int """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'}, 'capacity': {'key': 'capacity', 'type': 'int'}, } def __init__( self, *, name: Optional[Union[str, "ApplicationGatewaySkuName"]] = None, tier: Optional[Union[str, "ApplicationGatewayTier"]] = None, capacity: Optional[int] = None, **kwargs ): super(ApplicationGatewaySku, self).__init__(**kwargs) self.name = name self.tier = tier self.capacity = capacity class ApplicationGatewaySslCertificate(SubResource): """SSL certificates of an application gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the SSL certificate that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param data: Base-64 encoded pfx certificate. Only applicable in PUT Request. :type data: str :param password: Password for the pfx file specified in data. Only applicable in PUT request. :type password: str :param public_cert_data: Base-64 encoded Public cert data corresponding to pfx specified in data. Only applicable in GET request. :type public_cert_data: str :param key_vault_secret_id: Secret Id of (base-64 encoded unencrypted pfx) 'Secret' or 'Certificate' object stored in KeyVault. :type key_vault_secret_id: str :ivar provisioning_state: The provisioning state of the SSL certificate resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'data': {'key': 'properties.data', 'type': 'str'}, 'password': {'key': 'properties.password', 'type': 'str'}, 'public_cert_data': {'key': 'properties.publicCertData', 'type': 'str'}, 'key_vault_secret_id': {'key': 'properties.keyVaultSecretId', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, data: Optional[str] = None, password: Optional[str] = None, public_cert_data: Optional[str] = None, key_vault_secret_id: Optional[str] = None, **kwargs ): super(ApplicationGatewaySslCertificate, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.data = data self.password = password self.public_cert_data = public_cert_data self.key_vault_secret_id = key_vault_secret_id self.provisioning_state = None class ApplicationGatewaySslPolicy(msrest.serialization.Model): """Application Gateway Ssl policy. :param disabled_ssl_protocols: Ssl protocols to be disabled on application gateway. :type disabled_ssl_protocols: list[str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslProtocol] :param policy_type: Type of Ssl Policy. Possible values include: "Predefined", "Custom". :type policy_type: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslPolicyType :param policy_name: Name of Ssl predefined policy. Possible values include: "AppGwSslPolicy20150501", "AppGwSslPolicy20170401", "AppGwSslPolicy20170401S". :type policy_name: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslPolicyName :param cipher_suites: Ssl cipher suites to be enabled in the specified order to application gateway. :type cipher_suites: list[str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslCipherSuite] :param min_protocol_version: Minimum version of Ssl protocol to be supported on application gateway. Possible values include: "TLSv1_0", "TLSv1_1", "TLSv1_2". :type min_protocol_version: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslProtocol """ _attribute_map = { 'disabled_ssl_protocols': {'key': 'disabledSslProtocols', 'type': '[str]'}, 'policy_type': {'key': 'policyType', 'type': 'str'}, 'policy_name': {'key': 'policyName', 'type': 'str'}, 'cipher_suites': {'key': 'cipherSuites', 'type': '[str]'}, 'min_protocol_version': {'key': 'minProtocolVersion', 'type': 'str'}, } def __init__( self, *, disabled_ssl_protocols: Optional[List[Union[str, "ApplicationGatewaySslProtocol"]]] = None, policy_type: Optional[Union[str, "ApplicationGatewaySslPolicyType"]] = None, policy_name: Optional[Union[str, "ApplicationGatewaySslPolicyName"]] = None, cipher_suites: Optional[List[Union[str, "ApplicationGatewaySslCipherSuite"]]] = None, min_protocol_version: Optional[Union[str, "ApplicationGatewaySslProtocol"]] = None, **kwargs ): super(ApplicationGatewaySslPolicy, self).__init__(**kwargs) self.disabled_ssl_protocols = disabled_ssl_protocols self.policy_type = policy_type self.policy_name = policy_name self.cipher_suites = cipher_suites self.min_protocol_version = min_protocol_version class ApplicationGatewaySslPredefinedPolicy(SubResource): """An Ssl predefined policy. :param id: Resource ID. :type id: str :param name: Name of the Ssl predefined policy. :type name: str :param cipher_suites: Ssl cipher suites to be enabled in the specified order for application gateway. :type cipher_suites: list[str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslCipherSuite] :param min_protocol_version: Minimum version of Ssl protocol to be supported on application gateway. Possible values include: "TLSv1_0", "TLSv1_1", "TLSv1_2". :type min_protocol_version: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewaySslProtocol """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'cipher_suites': {'key': 'properties.cipherSuites', 'type': '[str]'}, 'min_protocol_version': {'key': 'properties.minProtocolVersion', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, cipher_suites: Optional[List[Union[str, "ApplicationGatewaySslCipherSuite"]]] = None, min_protocol_version: Optional[Union[str, "ApplicationGatewaySslProtocol"]] = None, **kwargs ): super(ApplicationGatewaySslPredefinedPolicy, self).__init__(id=id, **kwargs) self.name = name self.cipher_suites = cipher_suites self.min_protocol_version = min_protocol_version class ApplicationGatewayTrustedRootCertificate(SubResource): """Trusted Root certificates of an application gateway. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the trusted root certificate that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param data: Certificate public data. :type data: str :param key_vault_secret_id: Secret Id of (base-64 encoded unencrypted pfx) 'Secret' or 'Certificate' object stored in KeyVault. :type key_vault_secret_id: str :ivar provisioning_state: The provisioning state of the trusted root certificate resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'data': {'key': 'properties.data', 'type': 'str'}, 'key_vault_secret_id': {'key': 'properties.keyVaultSecretId', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, data: Optional[str] = None, key_vault_secret_id: Optional[str] = None, **kwargs ): super(ApplicationGatewayTrustedRootCertificate, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.data = data self.key_vault_secret_id = key_vault_secret_id self.provisioning_state = None class ApplicationGatewayUrlPathMap(SubResource): """UrlPathMaps give a url path to the backend mapping information for PathBasedRouting. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :param name: Name of the URL path map that is unique within an Application Gateway. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str :param default_backend_address_pool: Default backend address pool resource of URL path map. :type default_backend_address_pool: ~azure.mgmt.network.v2019_07_01.models.SubResource :param default_backend_http_settings: Default backend http settings resource of URL path map. :type default_backend_http_settings: ~azure.mgmt.network.v2019_07_01.models.SubResource :param default_rewrite_rule_set: Default Rewrite rule set resource of URL path map. :type default_rewrite_rule_set: ~azure.mgmt.network.v2019_07_01.models.SubResource :param default_redirect_configuration: Default redirect configuration resource of URL path map. :type default_redirect_configuration: ~azure.mgmt.network.v2019_07_01.models.SubResource :param path_rules: Path rule of URL path map resource. :type path_rules: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayPathRule] :ivar provisioning_state: The provisioning state of the URL path map resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'default_backend_address_pool': {'key': 'properties.defaultBackendAddressPool', 'type': 'SubResource'}, 'default_backend_http_settings': {'key': 'properties.defaultBackendHttpSettings', 'type': 'SubResource'}, 'default_rewrite_rule_set': {'key': 'properties.defaultRewriteRuleSet', 'type': 'SubResource'}, 'default_redirect_configuration': {'key': 'properties.defaultRedirectConfiguration', 'type': 'SubResource'}, 'path_rules': {'key': 'properties.pathRules', 'type': '[ApplicationGatewayPathRule]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, name: Optional[str] = None, etag: Optional[str] = None, type: Optional[str] = None, default_backend_address_pool: Optional["SubResource"] = None, default_backend_http_settings: Optional["SubResource"] = None, default_rewrite_rule_set: Optional["SubResource"] = None, default_redirect_configuration: Optional["SubResource"] = None, path_rules: Optional[List["ApplicationGatewayPathRule"]] = None, **kwargs ): super(ApplicationGatewayUrlPathMap, self).__init__(id=id, **kwargs) self.name = name self.etag = etag self.type = type self.default_backend_address_pool = default_backend_address_pool self.default_backend_http_settings = default_backend_http_settings self.default_rewrite_rule_set = default_rewrite_rule_set self.default_redirect_configuration = default_redirect_configuration self.path_rules = path_rules self.provisioning_state = None class ApplicationGatewayWebApplicationFirewallConfiguration(msrest.serialization.Model): """Application gateway web application firewall configuration. All required parameters must be populated in order to send to Azure. :param enabled: Required. Whether the web application firewall is enabled or not. :type enabled: bool :param firewall_mode: Required. Web application firewall mode. Possible values include: "Detection", "Prevention". :type firewall_mode: str or ~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayFirewallMode :param rule_set_type: Required. The type of the web application firewall rule set. Possible values are: 'OWASP'. :type rule_set_type: str :param rule_set_version: Required. The version of the rule set type. :type rule_set_version: str :param disabled_rule_groups: The disabled rule groups. :type disabled_rule_groups: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayFirewallDisabledRuleGroup] :param request_body_check: Whether allow WAF to check request Body. :type request_body_check: bool :param max_request_body_size: Maximum request body size for WAF. :type max_request_body_size: int :param max_request_body_size_in_kb: Maximum request body size in Kb for WAF. :type max_request_body_size_in_kb: int :param file_upload_limit_in_mb: Maximum file upload size in Mb for WAF. :type file_upload_limit_in_mb: int :param exclusions: The exclusion list. :type exclusions: list[~azure.mgmt.network.v2019_07_01.models.ApplicationGatewayFirewallExclusion] """ _validation = { 'enabled': {'required': True}, 'firewall_mode': {'required': True}, 'rule_set_type': {'required': True}, 'rule_set_version': {'required': True}, 'max_request_body_size': {'maximum': 128, 'minimum': 8}, 'max_request_body_size_in_kb': {'maximum': 128, 'minimum': 8}, 'file_upload_limit_in_mb': {'minimum': 0}, } _attribute_map = { 'enabled': {'key': 'enabled', 'type': 'bool'}, 'firewall_mode': {'key': 'firewallMode', 'type': 'str'}, 'rule_set_type': {'key': 'ruleSetType', 'type': 'str'}, 'rule_set_version': {'key': 'ruleSetVersion', 'type': 'str'}, 'disabled_rule_groups': {'key': 'disabledRuleGroups', 'type': '[ApplicationGatewayFirewallDisabledRuleGroup]'}, 'request_body_check': {'key': 'requestBodyCheck', 'type': 'bool'}, 'max_request_body_size': {'key': 'maxRequestBodySize', 'type': 'int'}, 'max_request_body_size_in_kb': {'key': 'maxRequestBodySizeInKb', 'type': 'int'}, 'file_upload_limit_in_mb': {'key': 'fileUploadLimitInMb', 'type': 'int'}, 'exclusions': {'key': 'exclusions', 'type': '[ApplicationGatewayFirewallExclusion]'}, } def __init__( self, *, enabled: bool, firewall_mode: Union[str, "ApplicationGatewayFirewallMode"], rule_set_type: str, rule_set_version: str, disabled_rule_groups: Optional[List["ApplicationGatewayFirewallDisabledRuleGroup"]] = None, request_body_check: Optional[bool] = None, max_request_body_size: Optional[int] = None, max_request_body_size_in_kb: Optional[int] = None, file_upload_limit_in_mb: Optional[int] = None, exclusions: Optional[List["ApplicationGatewayFirewallExclusion"]] = None, **kwargs ): super(ApplicationGatewayWebApplicationFirewallConfiguration, self).__init__(**kwargs) self.enabled = enabled self.firewall_mode = firewall_mode self.rule_set_type = rule_set_type self.rule_set_version = rule_set_version self.disabled_rule_groups = disabled_rule_groups self.request_body_check = request_body_check self.max_request_body_size = max_request_body_size self.max_request_body_size_in_kb = max_request_body_size_in_kb self.file_upload_limit_in_mb = file_upload_limit_in_mb self.exclusions = exclusions class FirewallPolicyRuleCondition(msrest.serialization.Model): """Properties of a rule. You probably want to use the sub-classes and not this class directly. Known sub-classes are: ApplicationRuleCondition, NetworkRuleCondition. All required parameters must be populated in order to send to Azure. :param name: Name of the rule condition. :type name: str :param description: Description of the rule condition. :type description: str :param rule_condition_type: Required. Rule Condition Type.Constant filled by server. Possible values include: "ApplicationRuleCondition", "NetworkRuleCondition". :type rule_condition_type: str or ~azure.mgmt.network.v2019_07_01.models.FirewallPolicyRuleConditionType """ _validation = { 'rule_condition_type': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'rule_condition_type': {'key': 'ruleConditionType', 'type': 'str'}, } _subtype_map = { 'rule_condition_type': {'ApplicationRuleCondition': 'ApplicationRuleCondition', 'NetworkRuleCondition': 'NetworkRuleCondition'} } def __init__( self, *, name: Optional[str] = None, description: Optional[str] = None, **kwargs ): super(FirewallPolicyRuleCondition, self).__init__(**kwargs) self.name = name self.description = description self.rule_condition_type = None # type: Optional[str] class ApplicationRuleCondition(FirewallPolicyRuleCondition): """Rule condition of type application. All required parameters must be populated in order to send to Azure. :param name: Name of the rule condition. :type name: str :param description: Description of the rule condition. :type description: str :param rule_condition_type: Required. Rule Condition Type.Constant filled by server. Possible values include: "ApplicationRuleCondition", "NetworkRuleCondition". :type rule_condition_type: str or ~azure.mgmt.network.v2019_07_01.models.FirewallPolicyRuleConditionType :param source_addresses: List of source IP addresses for this rule. :type source_addresses: list[str] :param destination_addresses: List of destination IP addresses or Service Tags. :type destination_addresses: list[str] :param protocols: Array of Application Protocols. :type protocols: list[~azure.mgmt.network.v2019_07_01.models.FirewallPolicyRuleConditionApplicationProtocol] :param target_fqdns: List of FQDNs for this rule condition. :type target_fqdns: list[str] :param fqdn_tags: List of FQDN Tags for this rule condition. :type fqdn_tags: list[str] """ _validation = { 'rule_condition_type': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'rule_condition_type': {'key': 'ruleConditionType', 'type': 'str'}, 'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'}, 'destination_addresses': {'key': 'destinationAddresses', 'type': '[str]'}, 'protocols': {'key': 'protocols', 'type': '[FirewallPolicyRuleConditionApplicationProtocol]'}, 'target_fqdns': {'key': 'targetFqdns', 'type': '[str]'}, 'fqdn_tags': {'key': 'fqdnTags', 'type': '[str]'}, } def __init__( self, *, name: Optional[str] = None, description: Optional[str] = None, source_addresses: Optional[List[str]] = None, destination_addresses: Optional[List[str]] = None, protocols: Optional[List["FirewallPolicyRuleConditionApplicationProtocol"]] = None, target_fqdns: Optional[List[str]] = None, fqdn_tags: Optional[List[str]] = None, **kwargs ): super(ApplicationRuleCondition, self).__init__(name=name, description=description, **kwargs) self.rule_condition_type = 'ApplicationRuleCondition' # type: str self.source_addresses = source_addresses self.destination_addresses = destination_addresses self.protocols = protocols self.target_fqdns = target_fqdns self.fqdn_tags = fqdn_tags class ApplicationSecurityGroup(Resource): """An application security group in a resource group. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :ivar resource_guid: The resource GUID property of the application security group resource. It uniquely identifies a resource, even if the user changes its name or migrate the resource across subscriptions or resource groups. :vartype resource_guid: str :ivar provisioning_state: The provisioning state of the application security group resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, 'resource_guid': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'etag': {'key': 'etag', 'type': 'str'}, 'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, *, id: Optional[str] = None, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, **kwargs ): super(ApplicationSecurityGroup, self).__init__(id=id, location=location, tags=tags, **kwargs) self.etag = None self.resource_guid = None self.provisioning_state = None class ApplicationSecurityGroupListResult(msrest.serialization.Model): """A list of application security groups. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of application security groups. :type value: list[~azure.mgmt.network.v2019_07_01.models.ApplicationSecurityGroup] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[ApplicationSecurityGroup]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, *, value: Optional[List["ApplicationSecurityGroup"]] = None, **kwargs ): super(ApplicationSecurityGroupListResult, self).__init__(**kwargs) self.value = value self.next_link = None class AuthorizationListResult(msrest.serialization.Model): """Response for ListAuthorizations API service call retrieves all authorizations that belongs to an ExpressRouteCircuit. :param value: The authorizations in an ExpressRoute Circuit. :type value: list[~azure.mgmt.network.v2019_07_01.models.ExpressRouteCircuitAuthorization] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ExpressRouteCircuitAuthorization]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, *, value: Optional[List["ExpressRouteCircuitAuthorization"]] = None, next_link: Optional[str] = None, **kwargs ): super(AuthorizationListResult, self).__init__(**kwargs) self.value = value self.next_link = next_link class AutoApprovedPrivateLinkService(msrest.serialization.Model): """The information of an AutoApprovedPrivateLinkService. :param private_link_service: The id of the private link service resource. :type private_link_service: str """ _attribute_map = { 'private_link_service': {'key': 'privateLinkService', 'type': 'str'}, } def __init__( self, *, private_link_service: Optional[str] = None, **kwargs ): super(AutoApprovedPrivateLinkService, self).__init__(**kwargs) self.private_link_service = private_link_service class AutoApprovedPrivateLinkServicesResult(msrest.serialization.Model): """An array of private link service id that can be linked to a private end point with auto approved. Variables are only populated by the server, and will be ignored when sending a request. :param value: An array of auto approved private link service. :type value: list[~azure.mgmt.network.v2019_07_01.models.AutoApprovedPrivateLinkService] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[AutoApprovedPrivateLinkService]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, *, value: Optional[List["AutoApprovedPrivateLinkService"]] = None, **kwargs ): super(AutoApprovedPrivateLinkServicesResult, self).__init__(**kwargs) self.value = value self.next_link = None class Availability(msrest.serialization.Model): """Availability of the metric. :param time_grain: The time grain of the availability. :type time_grain: str :param retention: The retention of the availability. :type retention: str :param blob_duration: Duration of the availability blob. :type blob_duration: str """ _attribute_map = { 'time_grain': {'key': 'timeGrain', 'type': 'str'}, 'retention': {'key': 'retention', 'type': 'str'}, 'blob_duration': {'key': 'blobDuration', 'type': 'str'}, } def __init__( self, *, time_grain: Optional[str] = None, retention: Optional[str] = None, blob_duration: Optional[str] = None, **kwargs ): super(Availability, self).__init__(**kwargs) self.time_grain = time_grain self.retention = retention self.blob_duration = blob_duration class AvailableDelegation(msrest.serialization.Model): """The serviceName of an AvailableDelegation indicates a possible delegation for a subnet. :param name: The name of the AvailableDelegation resource. :type name: str :param id: A unique identifier of the AvailableDelegation resource. :type id: str :param type: Resource type. :type type: str :param service_name: The name of the service and resource. :type service_name: str :param actions: Describes the actions permitted to the service upon delegation. :type actions: list[str] """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'service_name': {'key': 'serviceName', 'type': 'str'}, 'actions': {'key': 'actions', 'type': '[str]'}, } def __init__( self, *, name: Optional[str] = None, id: Optional[str] = None, type: Optional[str] = None, service_name: Optional[str] = None, actions: Optional[List[str]] = None, **kwargs ): super(AvailableDelegation, self).__init__(**kwargs) self.name = name self.id = id self.type = type self.service_name = service_name self.actions = actions class AvailableDelegationsResult(msrest.serialization.Model): """An array of available delegations. Variables are only populated by the server, and will be ignored when sending a request. :param value: An array of available delegations. :type value: list[~azure.mgmt.network.v2019_07_01.models.AvailableDelegation] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[AvailableDelegation]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, *, value: Optional[List["AvailableDelegation"]] = None, **kwargs ): super(AvailableDelegationsResult, self).__init__(**kwargs) self.value = value self.next_link = None class AvailablePrivateEndpointType(msrest.serialization.Model): """The information of an AvailablePrivateEndpointType. :param name: The name of the service and resource. :type name: str :param id: A unique identifier of the AvailablePrivateEndpoint Type resource. :type id: str :param type: Resource type. :type type: str :param resource_name: The name of the service and resource. :type resource_name: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'resource_name': {'key': 'resourceName', 'type': 'str'}, } def __init__( self, *, name: Optional[str] = None, id: Optional[str] = None, type: Optional[str] = None, resource_name: Optional[str] = None, **kwargs ): super(AvailablePrivateEndpointType, self).__init__(**kwargs) self.name = name self.id = id self.type = type self.resource_name = resource_name class AvailablePrivateEndpointTypesResult(msrest.serialization.Model): """An array of available PrivateEndpoint types. Variables are only populated by the server, and will be ignored when sending a request. :param value: An array of available privateEndpoint type. :type value: list[~azure.mgmt.network.v2019_07_01.models.AvailablePrivateEndpointType] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[AvailablePrivateEndpointType]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, *, value: Optional[List["AvailablePrivateEndpointType"]] = None, **kwargs ): super(AvailablePrivateEndpointTypesResult, self).__init__(**kwargs) self.value = value self.next_link = None class AvailableProvidersList(msrest.serialization.Model): """List of available countries with details. All required parameters must be populated in order to send to Azure. :param countries: Required. List of available countries. :type countries: list[~azure.mgmt.network.v2019_07_01.models.AvailableProvidersListCountry] """ _validation = { 'countries': {'required': True}, } _attribute_map = { 'countries': {'key': 'countries', 'type': '[AvailableProvidersListCountry]'}, } def __init__( self, *, countries: List["AvailableProvidersListCountry"], **kwargs ): super(AvailableProvidersList, self).__init__(**kwargs) self.countries = countries class AvailableProvidersListCity(msrest.serialization.Model): """City or town details. :param city_name: The city or town name. :type city_name: str :param providers: A list of Internet service providers. :type providers: list[str] """ _attribute_map = { 'city_name': {'key': 'cityName', 'type': 'str'}, 'providers': {'key': 'providers', 'type': '[str]'}, } def __init__( self, *, city_name: Optional[str] = None, providers: Optional[List[str]] = None, **kwargs ): super(AvailableProvidersListCity, self).__init__(**kwargs) self.city_name = city_name self.providers = providers class AvailableProvidersListCountry(msrest.serialization.Model): """Country details. :param country_name: The country name. :type country_name: str :param providers: A list of Internet service providers. :type providers: list[str] :param states: List of available states in the country. :type states: list[~azure.mgmt.network.v2019_07_01.models.AvailableProvidersListState] """ _attribute_map = { 'country_name': {'key': 'countryName', 'type': 'str'}, 'providers': {'key': 'providers', 'type': '[str]'}, 'states': {'key': 'states', 'type': '[AvailableProvidersListState]'}, } def __init__( self, *, country_name: Optional[str] = None, providers: Optional[List[str]] = None, states: Optional[List["AvailableProvidersListState"]] = None, **kwargs ): super(AvailableProvidersListCountry, self).__init__(**kwargs) self.country_name = country_name self.providers = providers self.states = states class AvailableProvidersListParameters(msrest.serialization.Model): """Constraints that determine the list of available Internet service providers. :param azure_locations: A list of Azure regions. :type azure_locations: list[str] :param country: The country for available providers list. :type country: str :param state: The state for available providers list. :type state: str :param city: The city or town for available providers list. :type city: str """ _attribute_map = { 'azure_locations': {'key': 'azureLocations', 'type': '[str]'}, 'country': {'key': 'country', 'type': 'str'}, 'state': {'key': 'state', 'type': 'str'}, 'city': {'key': 'city', 'type': 'str'}, } def __init__( self, *, azure_locations: Optional[List[str]] = None, country: Optional[str] = None, state: Optional[str] = None, city: Optional[str] = None, **kwargs ): super(AvailableProvidersListParameters, self).__init__(**kwargs) self.azure_locations = azure_locations self.country = country self.state = state self.city = city class AvailableProvidersListState(msrest.serialization.Model): """State details. :param state_name: The state name. :type state_name: str :param providers: A list of Internet service providers. :type providers: list[str] :param cities: List of available cities or towns in the state. :type cities: list[~azure.mgmt.network.v2019_07_01.models.AvailableProvidersListCity] """ _attribute_map = { 'state_name': {'key': 'stateName', 'type': 'str'}, 'providers': {'key': 'providers', 'type': '[str]'}, 'cities': {'key': 'cities', 'type': '[AvailableProvidersListCity]'}, } def __init__( self, *, state_name: Optional[str] = None, providers: Optional[List[str]] = None, cities: Optional[List["AvailableProvidersListCity"]] = None, **kwargs ): super(AvailableProvidersListState, self).__init__(**kwargs) self.state_name = state_name self.providers = providers self.cities = cities class AzureAsyncOperationResult(msrest.serialization.Model): """The response body contains the status of the specified asynchronous operation, indicating whether it has succeeded, is in progress, or has failed. Note that this status is distinct from the HTTP status code returned for the Get Operation Status operation itself. If the asynchronous operation succeeded, the response body includes the HTTP status code for the successful request. If the asynchronous operation failed, the response body includes the HTTP status code for the failed request and error information regarding the failure. :param status: Status of the Azure async operation. Possible values include: "InProgress", "Succeeded", "Failed". :type status: str or ~azure.mgmt.network.v2019_07_01.models.NetworkOperationStatus :param error: Details of the error occurred during specified asynchronous operation. :type error: ~azure.mgmt.network.v2019_07_01.models.Error """ _attribute_map = { 'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'Error'}, } def __init__( self, *, status: Optional[Union[str, "NetworkOperationStatus"]] = None, error: Optional["Error"] = None, **kwargs ): super(AzureAsyncOperationResult, self).__init__(**kwargs) self.status = status self.error = error class AzureFirewall(Resource): """Azure Firewall resource. Variables are only populated by the server, and will be ignored when sending a request. :param id: Resource ID. :type id: str :ivar name: Resource name. :vartype name: str :ivar type: Resource type. :vartype type: str :param location: Resource location. :type location: str :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param zones: A list of availability zones denoting where the resource needs to come from. :type zones: list[str] :ivar etag: A unique read-only string that changes whenever the resource is updated. :vartype etag: str :param application_rule_collections: Collection of application rule collections used by Azure Firewall. :type application_rule_collections: list[~azure.mgmt.network.v2019_07_01.models.AzureFirewallApplicationRuleCollection] :param nat_rule_collections: Collection of NAT rule collections used by Azure Firewall. :type nat_rule_collections: list[~azure.mgmt.network.v2019_07_01.models.AzureFirewallNatRuleCollection] :param network_rule_collections: Collection of network rule collections used by Azure Firewall. :type network_rule_collections: list[~azure.mgmt.network.v2019_07_01.models.AzureFirewallNetworkRuleCollection] :param ip_configurations: IP configuration of the Azure Firewall resource. :type ip_configurations: list[~azure.mgmt.network.v2019_07_01.models.AzureFirewallIPConfiguration] :ivar provisioning_state: The provisioning state of the Azure firewall resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed". :vartype provisioning_state: str or ~azure.mgmt.network.v2019_07_01.models.ProvisioningState :param threat_intel_mode: The operation mode for Threat Intelligence. Possible values include: "Alert", "Deny", "Off". :type threat_intel_mode: str or ~azure.mgmt.network.v2019_07_01.models.AzureFirewallThreatIntelMode :param virtual_hub: The virtualHub to which the firewall belongs. :type virtual_hub: ~azure.mgmt.network.v2019_07_01.models.SubResource :param firewall_policy: The firewallPolicy associated with this azure firewall. :type firewall_policy: ~azure.mgmt.network.v2019_07_01.models.SubResource :ivar hub_ip_addresses: IP addresses associated with AzureFirewall. :vartype hub_ip_addresses: ~azure.mgmt.network.v2019_07_01.models.HubIPAddresses """ _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'hub_ip_addresses': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'zones': {'key': 'zones', 'type': '[str]'}, 'etag': {'key': 'etag', 'type': 'str'}, 'application_rule_collections': {'key': 'properties.applicationRuleCollections', 'type': '[AzureFirewallApplicationRuleCollection]'}, 'nat_rule_collections': {'key': 'properties.natRuleCollections', 'type': '[AzureFirewallNatRuleCollection]'}, 'network_rule_collections': {'key': 'properties.networkRuleCollections', 'type': '[AzureFirewallNetworkRuleCollection]'}, 'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[AzureFirewallIPConfiguration]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'threat_intel_mode': {'key': 'properties.threatIntelMode', 'type': 'str'}, 'virtual_hub': {'key': 'properties.virtualHub', 'type': 'SubResource'}, 'firewall_policy': {'key': 'properties.firewallPolicy', 'type': 'SubResource'}, 'hub_ip_addresses': {'key': 'properties.hubIpAddresses', 'type': 'HubIPAddresses'}, } def __init__( self, *, id: Optional[str] = None, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, zones: Optional[List[str]] = None, application_rule_collections: Optional[List["AzureFirewallApplicationRuleCollection"]] = None, nat_rule_collections: Optional[List["AzureFirewallNatRuleCollection"]] = None, network_rule_collections: Optional[List["AzureFirewallNetworkRuleCollection"]] = None, ip_configurations: Optional[List["AzureFirewallIPConfiguration"]] = None, threat_intel_mode: Optional[Union[str, "AzureFirewallThreatIntelMode"]] = None, virtual_hub: Optional["SubResource"] = None, firewall_policy: Optional["SubResource"] = None, **kwargs ): super(AzureFirewall, self).__init__(id=id, location=location, tags=tags, **kwargs) self.zones = zones self.etag = None self.application_rule_collections = application_rule_collections self.nat_rule_collections = nat_rule_collections self.network_rule_collections = network_rule_collections self.ip_configurations = ip_configurations self.provisioning_state = None self.threat_intel_mode = threat_intel_mode self.virtual_hub = virtual_hub self.firewall_policy = firewall_policy self.hub_ip_addresses = None class AzureFirewallApplicationRule(msrest.serialization.Model): """Properties of an application rule. :param name: Name of the application rule. :type name: str :param description: Description of the rule. :type description: str :param source_addresses: List of source IP addresses for this rule. :type source_addresses: list[str] :param protocols: Array of ApplicationRuleProtocols. :type protocols: list[~azure.mgmt.network.v2019_07_01.models.AzureFirewallApplicationRuleProtocol] :param target_fqdns: List of FQDNs for this rule. :type target_fqdns: list[str] :param fqdn_tags: List of FQDN Tags for this rule. :type fqdn_tags: list[str] """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'}, 'protocols': {'key': 'protocols', 'type': '[AzureFirewallApplicationRuleProtocol]'}, 'target_fqdns': {'key': 'targetFqdns', 'type': '[str]'}, 'fqdn_tags': {'key': 'fqdnTags', 'type': '[str]'}, } def __init__( self, *, name: Optional[str] = None, description: Optional[str] = None, source_addresses: Optional[List[str]] = None, protocols: Optional[List["AzureFirewallApplicationRuleProtocol"]] = None, target_fqdns: Optional[List[str]] = None, fqdn_tags: Optional[List[str]] = None, **kwargs ): super(AzureFirewallApplicationRule, self).__init__(**kwargs) self.name = name self.description = description
codeparrot/github-code-clean
##################################################################### # # # Refinement Script with SHELX and CCP4/REFMAC5 # # # # Copyright: Molecular Images 2005 # # # # This script is distributed under the same conditions as MIFit # # # ##################################################################### import sys import os import time import string import math import dircache import getopt import ccp4check def Usage(): print "Usage: %s [options]" % sys.argv[0] print "Options are:" print " -p,--pdbfile=FILE the pdb file" print " -m,--mtzfile=FILE the mtz file" print " -l,--libfile=FILE the library file. Default: no file" print " -d,--workdir=DIR The working directory" print " -e,--engine=ENGINE One of refmac5 (default), shelx, or rigid" print " -w,--weight=NUM The weighting factor. Default: 0.1" print " -c,--cycles=NUM Number of refinement cycles to run" print " --water_cycles=NUM Number of water cycles to run. Default: 0" print " -t,--tls_file=FILE TLS specification file. Default: no file" print " -s,--shelx_dir=DIR Path to shelx executables. Default: $SHELXBIN" print " -h,--mifithome=DIR Path to MIFit. Default: no path" print " --bref_type=TYPE B-factor refinement type: anisotropic or none. Default: none" print " --max_res=NUM Maximum resolution. Default: no value" print " -?,--help this help" def Run(argv=None): if argv is None: argv=sys.argv # Path to MIFit installation to find phi-psi data mifit_root = 'none' # Initialize quote = """'""" job_prefix = 'refine_' pdbfile = 'none' mtzfile = 'none' libfile = 'none' workingdir = 'none' runid = '1' projectlog = 'project_history.txt' ref_engine = 'refmac5' flabel = 'none' sigflabel = 'none' rfreelabel = 'none' anomlabel = 'none' siganomlabel = 'none' resolution_mtz = 'none' rwork = 'none' rfree = 'none' rmsd_bonds = 'none' percent_phi_psi = 'none' percent_rotamer = 'none' shelxpro = 'none' shelxh = 'none' max_res = 'none' weight = 'none' bref_type = 'none' freeflag = '0' number_molecules = '1' res_number_X_high = 0 int_res_number = 0 number_sym_ops = 1 cycles = '5' water_pick = 'no' water_cycles = 0 big_cycles = 1 water_count = 0 missing_protein_chain = 'no' validate = 'yes' max_conformers = 10 disorder = 'no' extra_links = 'no' resolution_output = '?' tlsfile = 'none' shelx_directory = 'none' seq_chain_prev = '?' filename_log_full = 'none' filename_pdb_full = 'none' filename_mtz_full = 'none' filename_refmac_full = 'none' filename_anom_full = 'none' number_chain_list = 0 aList_chains = [] aList_nterm = [] aList_cterm = [] parseLine = [] aLine = [] labelList = [] colList = [] aList_chain_store = [] aList_res_number_store = [] aList_res_name_store = [] aList_sequence_chain_id = [] pdb_annotate = [] aList_sequence_chain = [] aList_sequence_number = [] aList_sequence_resname = [] aList_missing_residues = [] aList_current_residue_atoms = [] aList_allatoms_chain = [] aList_allatoms_res_number = [] aList_allatoms_res_name = [] aList_allatoms_atom_name = [] aList_SEQRES = [] aList_rotamer_chain = [] aList_rotamer_resno = [] aList_rotamer_resname = [] aList_bonds_chain = [] aList_bonds_resno = [] aList_bonds_resname = [] aList_angles_chain = [] aList_angles_resno = [] aList_angles_resname = [] aList_contacts_chain = [] aList_contacts_resno = [] aList_contacts_resname = [] aList_chiral_chain = [] aList_chiral_resno = [] aList_chiral_resname = [] aList_cis_chain = [] aList_cis_resno = [] aList_cis_resname = [] aList_rama_chain = [] aList_rama_resno = [] aList_rama_resname = [] aList_omega_chain = [] aList_omega_resno = [] aList_omega_resname = [] aList_density_chain = [] aList_density_resno = [] aList_density_resname = [] aList_disorder_chain = [] aList_disorder_resno = [] aList_disorder_resname = [] aList_errors = [] aList_phi_all = [] aList_psi_all = [] aList_phipsi_prob_all = [] aList_phi_gly = [] aList_psi_gly = [] aList_phipsi_prob_gly = [] aList_phi_pro = [] aList_psi_pro = [] aList_phipsi_prob_pro = [] aList_peak_x = [] aList_peak_y = [] aList_peak_z = [] aList_x = [] aList_y = [] aList_z = [] bond_list = 'no' angle_list = 'no' contact_list = 'no' chiral_list = 'no' iteration_final = 'no' chain_id_prev = '?' res_number_prev = '?' num_residues = 0.0 amino_acid_count = 0.0 count_phipsi = 0.0 count_rotamer = 0.0 phipsi_gen_datafile = 'none' phipsi_gly_datafile = 'none' phipsi_pro_datafile = 'none' number_phipsi_gen_table = 0 number_phipsi_gly_table = 0 number_phipsi_pro_table = 0 # Omega threshhold is 4 x true sd from peak at 178.9 # allowed phi-psi: gen 99.95% data, 41.5% area, GLY 99.8% data, 63% area, PRO 99.8% data, 18.1% area omega_peak = 178.9 omega_thresh = 4.0 * 5.6 phipsi_thresh_gen = 0.00847 phipsi_thresh_gly = 0.00384 phipsi_thresh_pro = 0.0015 # Atom lists aList_GLY_atoms = ['N','CA','C','O'] aList_ALA_atoms = ['N','CA','C','O','CB'] aList_VAL_atoms = ['N','CA','C','O','CB','CG1','CG2'] aList_ILE_atoms = ['N','CA','C','O','CB','CG1','CG2','CD1'] aList_LEU_atoms = ['N','CA','C','O','CB','CG','CD1','CD2'] aList_PHE_atoms = ['N','CA','C','O','CB','CG','CD1','CE1','CZ','CE2','CD2'] aList_PRO_atoms = ['N','CA','C','O','CB','CG','CD'] aList_MET_atoms = ['N','CA','C','O','CB','CG','SD','CE'] aList_TRP_atoms = ['N','CA','C','O','CB','CG','CD1','NE1','CE2','CD2','CE3','CZ3','CH2','CZ2'] aList_CYS_atoms = ['N','CA','C','O','CB','SG'] aList_SER_atoms = ['N','CA','C','O','CB','OG'] aList_THR_atoms = ['N','CA','C','O','CB','OG1','CG2'] aList_ASN_atoms = ['N','CA','C','O','CB','CG','OD1','ND2'] aList_GLN_atoms = ['N','CA','C','O','CB','CD','CG','OE1','NE2'] aList_TYR_atoms = ['N','CA','C','O','CB','CG','CD1','CE1','CZ','OH','CE2','CD2'] aList_HIS_atoms = ['N','CA','C','O','CB','CG','ND1','CE1','NE2','CD2'] aList_ASP_atoms = ['N','CA','C','O','CB','CG','OD1','OD2'] aList_GLU_atoms = ['N','CA','C','O','CB','CD','CG','OE1','OE2'] aList_LYS_atoms = ['N','CA','C','O','CB','CG','CD','CE','NZ'] aList_ARG_atoms = ['N','CA','C','O','CB','CD','CG','NE','CZ','NH1','NH2'] # Platform test_platform = sys.platform # Read args number_of_args = len(argv) args = argv[1:] optlist, args = getopt.getopt( args,'p:m:l:d:e:w:c:t:s:h:?', ["pdbfile=","mtzfile=","libfile=","workdir=","engine=", "weight=","cycles=","water_cycles=", "tls_file=","shelx_dir=","mifithome=", "bref_type=","max_res=","help"]) number_of_inputs = len(optlist) if number_of_inputs==0: Usage() return count = 0 while count < number_of_inputs: aList = optlist[count] number_of_list_inputs = len(aList) arg_value="" if number_of_list_inputs >=1: arg_value = aList[0] if arg_value == '-?' or arg_value=='--help': Usage() return if number_of_list_inputs >=2: param_value = aList[1] if arg_value == '-p' or arg_value=="--pdbfile": pdbfile = param_value elif arg_value == '-m' or arg_value=="--mtzfile": mtzfile = param_value elif arg_value == '-l' or arg_value=="--libfile": libfile = param_value elif arg_value == '-d' or arg_value=="--workdir": workingdir = param_value elif arg_value == '-e' or arg_value=="--engine": ref_engine = param_value elif arg_value == '-w' or arg_value=="--weight": weight = param_value elif arg_value == '--max_res': max_res = param_value elif arg_value == '-bref_type': bref_type = param_value elif arg_value == '-c' or arg_value=="--cycles": cycles = param_value elif arg_value == '--water_cycles': water_cycles = int(param_value) elif arg_value == '-t' or arg_value=="--tls_file": tlsfile = param_value elif arg_value == '-s' or arg_value=="--shelx_dir": shelx_directory = param_value elif arg_value == '-h' or arg_value=="--mifithome": mifit_root = param_value count = count + 1 ccp4,error = ccp4check.ccp4check() if not ccp4: print '\n' + error + '\n' time.sleep(4) return 1 fileexists = os.path.exists(pdbfile) if fileexists == 0: print 'The PDB file was not found ',pdbfile time.sleep(4) return 1 fileexists = os.path.exists(mtzfile) if fileexists == 0: print 'The MTZ file was not found ',mtzfile time.sleep(4) return 1 fileexists = os.path.exists(workingdir) if fileexists == 0: print 'The working directory was not found ',workingdir time.sleep(4) return 1 fileexists = os.path.exists(libfile) if fileexists == 0 and os.path.basename(libfile) != 'none': print 'The library file was not found ',libfile time.sleep(4) return 1 fileexists = os.path.exists(tlsfile) if fileexists == 0 and os.path.basename(tlsfile) != 'none': print 'The TLS specification file was not found ',tlsfile time.sleep(4) return 1 if ref_engine != 'shelx' and ref_engine != 'rigid' and ref_engine != 'refmac5': print 'The refinement type must be one of shelx/rigid/refmac5' time.sleep(4) return 1 if weight == 'none': weight = '0.1' if water_cycles > 0: water_pick = 'yes' big_cycles = water_cycles + 1 if bref_type == 'none': bref_type = 'isotropic' # Check MIFit installation to access phi-psi data files (try environment variable, default and direct path) if mifit_root != 'none': mifit_root_data = os.path.join(mifit_root,'data') phipsi_gen_datafile = os.path.join(mifit_root_data,'rama500-general.data') phipsi_gly_datafile = os.path.join(mifit_root_data,'rama500-gly-sym-nosec.data') phipsi_pro_datafile = os.path.join(mifit_root_data,'rama500-pro.data') # If needed, get determine SHELX bin directory path if ref_engine == 'shelx': # Determine installation from a possible environment variable find_shelx_bin = os.environ.keys().count('SHELXBIN') if find_shelx_bin != 0: shelx_directory = os.environ['SHELXBIN'] # Determine from input parameter if shelx_directory != 'none': fileexists = os.path.exists(shelx_directory) if fileexists != 0: if test_platform.find('win') > -1: shelxpro = os.path.join(shelx_directory,'shelxpro.exe') shelxh = os.path.join(shelx_directory,'shelxh.exe') else: shelxpro = os.path.join(shelx_directory,'shelxpro') shelxh = os.path.join(shelx_directory,'shelxh') # Confirm we have SHELXH and SHELXPRO fileexists = os.path.exists(shelxpro) if fileexists == 0: print 'The SHELXPRO executable was not found ',shelxpro time.sleep(4) return 1 fileexists = os.path.exists(shelxh) if fileexists == 0: print 'The SHELXH executable was not found ',shelxh time.sleep(4) return 1 # Create a CCP4 temp space for temporary files idcode = '000000' gmt = time.gmtime(time.time()) fmt = '%H%M%S' idcode = time.strftime(fmt,gmt) path_scratch = 'temp_' + idcode working_ccp4_scratch = os.path.join(ccp4.scr,path_scratch) fileexists = os.path.exists(working_ccp4_scratch) if fileexists == 0: os.mkdir(working_ccp4_scratch) os.environ['CCP4_SCR'] = working_ccp4_scratch # Go to working area os.chdir(workingdir) # Copy MTZ file to working area file = open(mtzfile,'rb') allLines = file.readlines() file.close() file = open('mi_refine_unsorted.mtz','wb') file.writelines(allLines) file.close() # Use CCP4/CAD to ensure that data is sorted properly for subsequent CCP4/FFT process fileexists = os.path.exists('mi_refine.mtz') if fileexists != 0: os.remove('mi_refine.mtz') file = open('mi_cad.inp','w') file.write('LABIN FILE_NUMBER 1 ALL\n') file.write('END\n') file.close() runcad = 'cad HKLIN1 mi_refine_unsorted.mtz HKLOUT mi_refine.mtz < mi_cad.inp > mi_cad.log' os.system(runcad) fileexists = os.path.exists('mi_refine.mtz') if fileexists != 0: os.remove('mi_refine_unsorted.mtz') os.remove('mi_cad.log') os.remove('mi_cad.inp') else: print 'The CAD run to resort the data seems to have failed' time.sleep(4) return 1 # Extract MTZ labels for F and SD(F) and Rfree set file = open('mi_mtzdump.inp','w') file.write('HEADER\n') file.write('SYMMETRY\n') file.write('END\n') file.close() runmtz = 'mtzdump HKLIN mi_refine.mtz < mi_mtzdump.inp > mi_mtzdump.log' os.system(runmtz) file = open('mi_mtzdump.log','r') allLines = file.readlines() file.close() os.remove('mi_mtzdump.log') os.remove('mi_mtzdump.inp') read_columns = 'no' read_labels = 'no' read_resolution = 'no' read_cell = 'no' for eachLine in allLines: line_length = len(eachLine) if read_columns == 'yes' and line_length > 1: colList = eachLine.split() read_columns = 'no' if read_labels == 'yes' and line_length > 1: labelList = eachLine.split() read_labels = 'no' if read_resolution == 'yes' and line_length > 1: parseLine = eachLine.split() resolution_mtz = parseLine[5] read_resolution = 'no' if read_cell == 'yes' and line_length > 1: parseLine = eachLine.split() acell_mtz = parseLine[0] bcell_mtz = parseLine[1] ccell_mtz = parseLine[2] alpha_mtz = parseLine[3] beta_mtz = parseLine[4] gamma_mtz = parseLine[5] acell_mtz = float(acell_mtz) bcell_mtz = float(bcell_mtz) ccell_mtz = float(ccell_mtz) alpha_mtz = float(alpha_mtz) beta_mtz = float(beta_mtz) gamma_mtz = float(gamma_mtz) acell_mtz = round(acell_mtz,3) bcell_mtz = round(bcell_mtz,3) ccell_mtz = round(ccell_mtz,3) alpha_mtz = round(alpha_mtz,2) beta_mtz = round(beta_mtz,2) gamma_mtz = round(gamma_mtz,2) read_cell = 'no' if eachLine.find('* Number of Symmetry Operations') > -1: parseLine = eachLine.split('=') number_sym_ops = parseLine[1] number_sym_ops = int(number_sym_ops) if eachLine.find('* Column Labels :') > -1: read_columns = 'yes' if eachLine.find('* Column Types :') > -1: read_labels = 'yes' if eachLine.find('Resolution Range') > -1: read_resolution = 'yes' if eachLine.find('* Cell Dimensions :') > -1: read_cell = 'yes' if eachLine.find('* Space Group =') > -1: # SG number and name eachLine = eachLine.strip() parseLine = eachLine.split('=') space_group_out = parseLine[1] parseLine = space_group_out.split(quote) space_group = parseLine[0] space_group = space_group.strip() space_group_mtz = parseLine[1] space_group_mtz = space_group_mtz.strip() list_length = len(labelList) count = 0 while count < list_length: if labelList[count] == 'F' and flabel == 'none': flabel = colList[count] if labelList[count] == 'Q' and sigflabel == 'none': sigflabel = colList[count] if labelList[count] == 'I' and rfreelabel == 'none': rfreelabel = colList[count] if labelList[count] == 'D' and anomlabel == 'none': anomlabel = colList[count] if labelList[count] == 'Q' and siganomlabel == 'none': if anomlabel != 'none' and colList[count] != sigflabel: siganomlabel = colList[count] count = count + 1 if flabel == 'none' or sigflabel == 'none' or rfreelabel == 'none': print 'MTZ labels for F,sd(F) or Rfree-data could not be established' time.sleep(4) return 1 # Use CCP4/CAD to capture any anomalous difference data if anomlabel != 'none' and siganomlabel != 'none': fileexists = os.path.exists('mi_anommap.mtz') if fileexists != 0: os.remove('mi_anommap.mtz') file = open('mi_cad.inp','w') file.write('LABIN FILE_NUMBER 1 E1=') file.write(anomlabel) file.write(' E2=') file.write(siganomlabel) file.write('\n') # Avoid issues of the anomalous data exceeding the refinement resolution if max_res != 'none': file.write('RESOLUTION FILE_NUMBER 1 1000.0 ') file.write(max_res) file.write('/n') file.write('END\n') file.close() runcad = 'cad HKLIN1 mi_refine.mtz HKLOUT mi_anommap.mtz < mi_cad.inp > mi_cad.log' os.system(runcad) fileexists = os.path.exists('mi_anommap.mtz') if fileexists != 0: os.remove('mi_cad.log') os.remove('mi_cad.inp') else: print 'The CAD run to extract anomalous difference data seems to have failed' time.sleep(4) return 1 # Set resolution if max_res == 'none': resolution_output = resolution_mtz else: resolution_output = max_res # Copy coordinates to local working area and collect information on content offset = 0 res_count = 0 cryst_found = 'no' file = open(pdbfile,'r') allLines = file.readlines() file.close() # Precheck to look for CRYST1 record for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag == 'CRYST1': cryst_found = 'yes' file = open('mi_refine.pdb','w') # Add CRYST1 record if not present if cryst_found == 'no': acell_mtz = '%.3f'%(acell_mtz) acell_mtz = str(acell_mtz) acell_mtz = acell_mtz.rjust(9) bcell_mtz = '%.3f'%(bcell_mtz) bcell_mtz = str(bcell_mtz) bcell_mtz = bcell_mtz.rjust(9) ccell_mtz = '%.3f'%(ccell_mtz) ccell_mtz = str(ccell_mtz) ccell_mtz = ccell_mtz.rjust(9) alpha_mtz = '%.2f'%(alpha_mtz) alpha_mtz = str(alpha_mtz) alpha_mtz = alpha_mtz.rjust(7) beta_mtz = '%.2f'%(beta_mtz) beta_mtz = str(beta_mtz) beta_mtz = beta_mtz.rjust(7) gamma_mtz = '%.2f'%(gamma_mtz) gamma_mtz = str(gamma_mtz) gamma_mtz = gamma_mtz.rjust(7) aLine = 'CRYST1' + acell_mtz + bcell_mtz + ccell_mtz \ + alpha_mtz + beta_mtz + gamma_mtz + ' ' + space_group_mtz file.write(aLine) file.write('\n') # Read/write file contents seq_chain_prev = '?' for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() # Parse SEQRES records into chain,name,dummy-number lists if tag == 'SEQRES': SEQRES = eachLine.strip() aList_SEQRES.append(SEQRES) seq_chain = eachLine[11:12] seq_chain = seq_chain.strip() seqLine = eachLine[19:70] parseLine = seqLine.split() if seq_chain != seq_chain_prev: aList_sequence_chain_id.append(seq_chain) seq_chain_prev = seq_chain length = len(parseLine) count = 0 while count < length: res_name = parseLine[count] aList_sequence_chain.append(seq_chain) aList_sequence_number.append('?') aList_sequence_resname.append(res_name) count = count + 1 # Ensure pdb/mtz cell compatibility and check CRYST1 integrity if tag == 'CRYST1': parseLine = eachLine.split() length = len(parseLine) if length > 7: acell_pdb = parseLine[1] bcell_pdb = parseLine[2] ccell_pdb = parseLine[3] alpha_pdb = parseLine[4] beta_pdb = parseLine[5] gamma_pdb = parseLine[6] acell_pdb = float(acell_pdb) bcell_pdb = float(bcell_pdb) ccell_pdb = float(ccell_pdb) alpha_pdb = float(alpha_pdb) beta_pdb = float(beta_pdb) gamma_pdb = float(gamma_pdb) a_dif = acell_pdb - acell_mtz b_dif = bcell_pdb - bcell_mtz c_dif = ccell_pdb - ccell_mtz alpha_dif = alpha_pdb - alpha_mtz beta_dif = beta_pdb - beta_mtz gamma_dif = gamma_pdb - gamma_mtz a_dif = abs(a_dif) b_dif = abs(b_dif) c_dif = abs(c_dif) alpha_dif = abs(alpha_dif) beta_dif = abs(beta_dif) gamma_dif = abs(gamma_dif) # Fix to mtz values if disagreeing and write a new CRYST1 record if a_dif > 0.1 or b_dif > 0.1 or c_dif > 0.1 \ or alpha_dif > 0.1 or beta_dif > 0.1 or gamma_dif > 0.1: acell_mtz = '%.3f'%(acell_mtz) acell_mtz = str(acell_mtz) acell_mtz = acell_mtz.rjust(9) bcell_mtz = '%.3f'%(bcell_mtz) bcell_mtz = str(bcell_mtz) bcell_mtz = bcell_mtz.rjust(9) ccell_mtz = '%.3f'%(ccell_mtz) ccell_mtz = str(ccell_mtz) ccell_mtz = ccell_mtz.rjust(9) alpha_mtz = '%.2f'%(alpha_mtz) alpha_mtz = str(alpha_mtz) alpha_mtz = alpha_mtz.rjust(7) beta_mtz = '%.2f'%(beta_mtz) beta_mtz = str(beta_mtz) beta_mtz = beta_mtz.rjust(7) gamma_mtz = '%.2f'%(gamma_mtz) gamma_mtz = str(gamma_mtz) gamma_mtz = gamma_mtz.rjust(7) eachLine = 'CRYST1' + acell_mtz + bcell_mtz + ccell_mtz\ + alpha_mtz + beta_mtz + gamma_mtz + ' ' + space_group_mtz # Check atom information if tag == 'ATOM' or tag == 'HETATM': chain_id = eachLine[21:22] chain_id = chain_id.strip() res_number = eachLine[22:26] res_number = res_number.strip() res_name = eachLine[17:20] res_name = res_name.strip() atom_name = eachLine[12:16] # Check and fix potential atom justification issues for common ions atom_justify = 'OK' if atom_name == ' NA ': atom_name = 'NA ' atom_justify = 'notOK' if atom_name == ' MG ': atom_name = 'MG ' atom_justify = 'notOK' if atom_name == ' CL ': atom_name = 'CL ' atom_justify = 'notOK' if atom_name == ' CR ': atom_name = 'CR ' atom_justify = 'notOK' if atom_name == ' MN ': atom_name = 'MN ' atom_justify = 'notOK' if atom_name == ' FE ': atom_name = 'FE ' atom_justify = 'notOK' if atom_name == ' CO ': atom_name = 'CO ' atom_justify = 'notOK' if atom_name == ' NI ': atom_name = 'NI ' atom_justify = 'notOK' if atom_name == ' CU ': atom_name = 'CU ' atom_justify = 'notOK' if atom_name == ' ZN ': atom_name = 'ZN ' atom_justify = 'notOK' if atom_name == ' SE ': atom_name = 'SE ' atom_justify = 'notOK' if atom_name == ' BR ': atom_name = 'BR ' atom_justify = 'notOK' if atom_name == ' CS ': atom_name = 'CS ' atom_justify = 'notOK' if atom_justify == 'notOK': eachLine_fix = eachLine[0:12] + atom_name + eachLine[16:80] eachLine = eachLine_fix # Obtain residue (CA) count if eachLine.find(' CA ') > -1: res_count = res_count + 1 # Check for waters if res_name == 'HOH': water_count = water_count + 1 # Get highest residue number in the chain X we will assign for waters if chain_id == 'X': int_res_number = int(res_number) if int_res_number > res_number_X_high: res_number_X_high = int_res_number # Obtain protein chain names and terminii count = 0 found = 'no' if res_name != 'HOH': if chain_id == ' ': missing_protein_chain = 'yes' while count < number_chain_list: if chain_id == aList_chains[count]: found = 'yes' count = count + 1 if found == 'no': aList_nterm.append(res_number) aList_chains.append(chain_id) number_chain_list = len(aList_chains) if number_chain_list > 1: aList_cterm.append(res_number_prev) res_number_prev = res_number # Write record but eliminate SCALE records to avoid issues with changed cells and CISPEP # records since it it better if they are recomputed following refitting if tag != 'SCALE1' and tag != 'SCALE2' and tag != 'SCALE3' and tag != 'CISPEP': eachLine = eachLine.strip() file.write(eachLine) file.write('\n') file.close() aList_cterm.append(res_number_prev) number_chain_list = len(aList_chains) if cryst_found == 'no': print 'There was no CRYST1 record in the coordinate file - stopping\n' time.sleep(4) return 1 # Set water picking defaults depending on molecule size water_add = 0.25*res_count water_add = int(water_add) # Copy library file (if any) to temp area. REFMAC5 read requires a full path. if os.path.basename(libfile) != 'none': file = open(libfile,'r') allLines = file.readlines() file.close() # Check for extra protein-ligand covalent links for eachLine in allLines: if eachLine.find('data_link_list') > -1: extra_links = 'yes' temp_lib = os.path.join(ccp4.scr,'mi_templib.lib') file = open(temp_lib,'w') file.writelines(allLines) file.close() else: temp_lib = 'none' # Set general file names fileexists = os.path.exists(projectlog) if fileexists != 0: file = open(projectlog,'r') allLines = file.readlines() file.close() for eachLine in allLines: if eachLine.find('Job ID') > -1 and eachLine.find('refine') > -1: aList = eachLine.split('_') runid = aList[1] runid_int = int(runid) runid_int = runid_int + 1 runid = str(runid_int) job_id = job_prefix + runid # Fix case where there are unrecorded refinement files - get highest serial number filename_pdb = job_id + '.pdb' filename_pdb_full = os.path.join(workingdir,filename_pdb) fileexists = os.path.exists(filename_pdb_full) if fileexists != 0: runid_prev = 0 aList_dir = dircache.listdir(workingdir) number_files = len(aList_dir) count = 0 while count < number_files: afile = aList_dir[count] if afile.find('refine_') > -1 and afile.find('.pdb') > -1: afile_tag = afile.replace('refine_','') afile_tag = afile_tag.replace('.pdb','') if afile_tag.isdigit() == 1: runid_int = int(afile_tag) if runid_int > runid_prev: runid_prev = runid_int runid_int = runid_int + 1 runid = str(runid_int) job_id = job_prefix + runid count = count + 1 # filename_log = job_id + '.log' filename_pdb = job_id + '.pdb' filename_mtz = job_id + '.mtz' filename_tls = job_id + '.tls' errorfile = job_id + '_errors.txt' filename_log_full = os.path.join(workingdir,filename_log) filename_pdb_full = os.path.join(workingdir,filename_pdb) filename_mtz_full = os.path.join(workingdir,filename_mtz) filename_errors_full = os.path.join(workingdir,errorfile) filename_tls_full = os.path.join(workingdir,filename_tls) fileexists = os.path.exists(filename_log) if fileexists != 0: os.remove(filename_log) fileexists = os.path.exists(filename_pdb) if fileexists != 0: os.remove(filename_pdb) fileexists = os.path.exists(filename_mtz) if fileexists != 0: os.remove(filename_mtz) fileexists = os.path.exists(filename_tls) if fileexists != 0: os.remove(filename_tls) ################################################# # Start rigid-body refinement (REFMAC5) section # ################################################# if ref_engine == 'rigid': print '\nStarting REFMAC5 rigid-body refinement process' print 'CCP4 scratch space:',working_ccp4_scratch print 'Job-ID:',job_id print 'Using mtz data:',flabel,',',sigflabel,',',rfreelabel # REFMAC specific file names filename_in = job_id + '.inp' filename_refmac_temp = job_id + '.refmac' filename_refmac = job_id + '_cif.txt' filename_refmac_full = os.path.join(workingdir,filename_refmac) # Setup file = open(filename_in,'w') file.write('LABIN FP=') file.write(flabel) file.write(' SIGFP=') file.write(sigflabel) file.write(' FREE=') file.write(rfreelabel) file.write('\nLABOUT FC=FC PHIC=PHIC DELFWT=DELFWT PHDELWT=PHDELFWT FWT=FWT FOM=FOM\n') if max_res != 'none': file.write('RESOLUTION 100.0 ') file.write(max_res) file.write('\n') file.write('FREE ') file.write(freeflag) file.write('\nREFI TYPE RIGID\n') file.write('REFI RESI MLKF\n') file.write('REFI BREF OVERall METH CGMAT\n') file.write('SCAL TYPE BULK LSSC ANIS FIXBulk SCBUlk 0.78 BBULK 180.0\n') file.write('SOLVENT NO\n') file.write('MAKE_RESTRAINTS HYDR N\n') file.write('RIGIDbody NCYC 12\n') # Set group definitions by chain-id count = 0 while count < number_chain_list: group_number = count + 1 group_number = str(group_number) chain_id = aList_chains[count] nterm = aList_nterm[count] cterm = aList_cterm[count] file.write('RIGIDbody GROUP ') file.write(group_number) file.write(' FROM ') file.write(nterm) file.write(' ') file.write(chain_id) file.write(' TO ') file.write(cterm) file.write(' ') file.write(chain_id) file.write('\n') count = count + 1 # file.write('MONI FEW\n') file.write('BINS 10\n') file.write('USECWD\n') file.write('PNAME noid\n') file.write('DNAME ') file.write(job_id) file.write('\n') file.write('END\n') file.close() # Run runrefine = 'refmac5 HKLIN mi_refine.mtz XYZIN mi_refine.pdb XYZOUT mi_refine_out.pdb HKLOUT '\ + filename_mtz + ' < ' + filename_in + ' > ' + filename_log os.system(runrefine) # Clean-up and rename os.remove(filename_in) fileexists = os.path.exists('mi_refine_out.pdb') if fileexists != 0: os.rename('mi_refine_out.pdb',filename_pdb) print 'Output PDB file:',filename_pdb else: 'REFMAC5 rigid-body o/p coordinate file was not found' time.sleep(4) return 1 fileexists = os.path.exists(filename_mtz) if fileexists != 0: print 'Output MTZ file:',filename_mtz else: 'REFMAC rigid-body o/p phased data file was not found' time.sleep(4) return 1 fileexists = os.path.exists(filename_refmac) if fileexists != 0: os.remove(filename_refmac) fileexists = os.path.exists('mi_refine.mtz') if fileexists != 0: os.remove('mi_refine.mtz') fileexists = os.path.exists(filename_refmac_temp) if fileexists != 0: os.rename(filename_refmac_temp,filename_refmac) print 'Output CIF log file:',filename_refmac # Parse global summary file = open(filename_refmac,'r') allLines = file.readlines() file.close() for eachLine in allLines: if eachLine.find('_refine.ls_R_factor_R_work') > -1: parseLine = eachLine.split() rwork = parseLine[1] if eachLine.find('_refine.ls_R_factor_R_free') > -1: parseLine = eachLine.split() rfree = parseLine[1] else: print 'REFMAC o/p CIF log file was not found' return 1 fileexists = os.path.exists(filename_log) if fileexists != 0: print 'Output REFMAC5 log:',filename_log else: print 'The REFMAC5 log file was not found' time.sleep(4) return 1 print 'Rwork=',rwork,' Rfree=',rfree ########################### # Start REFMAC5 section # ########################### if ref_engine == 'refmac5': print '\nStarting REFMAC5 process' print 'CCP4 scratch space:',working_ccp4_scratch print 'Job-ID:',job_id print 'Using mtz data:',flabel,',',sigflabel,',',rfreelabel if os.path.basename(libfile) != 'none': print 'Using library file:',libfile # REFMAC specific file names filename_in = job_id + '.inp' filename_refmac_temp = job_id + '.refmac' filename_refmac = job_id + '_cif.txt' filename_refmac_full = os.path.join(workingdir,filename_refmac) # Establish TLS file if os.path.basename(tlsfile) != 'none': file = open(tlsfile,'r') allLines = file.readlines() file.close() file = open('mi_temp.tls','w') file.writelines(allLines) file.close() tls_files = ' TLSIN mi_temp.tls TLSOUT ' + filename_tls + ' ' else: tls_files = ' ' # Setup file = open(filename_in,'w') file.write('LABIN FP=') file.write(flabel) file.write(' SIGFP=') file.write(sigflabel) file.write(' FREE=') file.write(rfreelabel) file.write('\nLABOUT FC=FC PHIC=PHIC DELFWT=DELFWT PHDELWT=PHDELFWT FWT=FWT FOM=FOM\n') file.write('FREE ') file.write(freeflag) file.write('\n') # Options/defaults if max_res != 'none': file.write('RESOLUTION 100.0 ') file.write(max_res) file.write('\n') if bref_type == 'anisotropic': file.write('REFI BREF ANISotropic METH CGMAT\n') else: file.write('REFI BREF ISOT METH CGMAT\n') # Standard setup file.write('SCAL TYPE SIMPLE LSSC ANIS\n') file.write('SOLVENT YES\n') file.write('REFI TYPE RESTtrained\n') file.write('REFI RESI MLKF\n') # TLS option - set uniform B, establish TLS then refine residual B-factors if os.path.basename(tlsfile) != 'none': file.write('REFI TLSC 20\n') file.write('BFAC SET 30.0\n') file.write('WEIGH MATRIX ') file.write(weight) file.write('\n') if extra_links == 'yes': file.write('MAKE_RESTRAINTS LINK Y\n') file.write('MAKE_RESTRAINTS CISP Y\n') file.write('MAKE_RESTRAINTS SS Y\n') file.write('MAKE_RESTRAINTS HYDR N\n') file.write('BFAC 1 2.0 2.5 3.0 4.5\n') file.write('NCYC ') file.write(cycles) file.write('\n') # validation monitor file.write('MONI DIST 6.0\n') file.write('MONI ANGL 8.0\n') file.write('MONI TORSION 10.0\n') file.write('MONI PLANE 10.0\n') file.write('MONI VANderwaals 4.25\n') file.write('MONI CHIRAL 8.0\n') file.write('MONI BFACTOR 4.0\n') file.write('BINS 20\n') file.write('USECWD\n') file.write('PNAME noid\n') file.write('DNAME ') file.write(job_id) file.write('\n') file.write('END\n') file.close() # Run process over number of big cycles count = 0 while count < big_cycles: print 'Refining' if os.path.basename(libfile) == 'none': runrefine = 'refmac5 HKLIN mi_refine.mtz XYZIN mi_refine.pdb XYZOUT mi_refine_out.pdb HKLOUT '\ + filename_mtz + tls_files + ' < ' + filename_in + ' > ' + filename_log else: runrefine = 'refmac5 HKLIN mi_refine.mtz XYZIN mi_refine.pdb LIBIN ' + temp_lib + \ ' XYZOUT mi_refine_out.pdb HKLOUT ' + filename_mtz + tls_files + ' < ' + filename_in + ' > ' + filename_log os.system(runrefine) # Check run completed fileexists = os.path.exists(filename_mtz) if fileexists == 0: print 'REFMAC o/p phased data file was not found' time.sleep(4) return 1 fileexists = os.path.exists('mi_refine_out.pdb') if fileexists == 0: print 'REFMAC o/p coordinate file was not found' time.sleep(4) return 1 # # Apply water picking option # if water_pick == 'yes' and count < water_cycles: print 'Water picking' # 1FF map file = open('mi_fft.inp','w') file.write('LABIN F1=DELFWT PHI=PHDELFWT\n') file.write('END\n') file.close() runfft = 'fft HKLIN ' + filename_mtz + ' MAPOUT mi_1ff.map < mi_fft.inp > mi_fft.log' os.system(runfft) fileexists = os.path.exists('mi_1ff.map') if fileexists == 0: print 'FFT for water picking failed' time.sleep(4) return 1 else: os.remove('mi_fft.inp') os.remove('mi_fft.log') # Setup crystal araound the protein file = open('mi_mapmask.inp','w') file.write('BORDER 5.0\n') file.write('EXTEND XTAL\n') file.write('END\n') file.close() runmapmask = 'mapmask XYZIN mi_refine_out.pdb MAPIN mi_1ff.map MAPOUT mi_1ff_masked.map < mi_mapmask.inp > mi_mapmask.log' os.system(runmapmask) fileexists = os.path.exists('mi_1ff_masked.map') if fileexists == 0: print 'MAPMASK for water picking failed' time.sleep(4) return 1 else: os.remove('mi_mapmask.inp') os.remove('mi_mapmask.log') os.remove('mi_1ff.map') # Water peak picking file = open('mi_peakmax.inp','w') file.write('THRESHOLD RMS 4.0\n') file.write('OUTPUT PDB\n') file.write('BFACTOR 30.0 1.0\n') file.write('RESIDUE HOH\n') file.write('ATNAME O\n') file.write('CHAIN X\n') file.write('NUMPEAKS 500\n') file.write('EXCLUDE EDGE\n') file.write('END\n') file.close() runpeakmax = 'peakmax MAPIN mi_1ff_masked.map XYZOUT mi_refine_peaks.pdb < mi_peakmax.inp > mi_peakmax_wat.log' os.system(runpeakmax) fileexists = os.path.exists('mi_refine_peaks.pdb') if fileexists == 0: print 'PEAKMAX run failed' time.sleep(4) return 1 # Water peak reduction by symmetry and protein proximity file = open('mi_watpeak.inp','w') file.write('DISTANCE 2.3 3.5\n') file.write('CHAIN X\n') file.write('SYMMETRY ') file.write(space_group) file.write('\nEND\n') file.close() runwatpeak = 'watpeak XYZIN mi_refine_out.pdb PEAKS mi_refine_peaks.pdb XYZOUT mi_refine_wat.pdb < mi_watpeak.inp > mi_watpeak.log' os.system(runwatpeak) fileexists = os.path.exists('mi_refine_wat.pdb') if fileexists == 0: print 'WATPEAK run failed' time.sleep(4) return 1 # Capture water atom records up to limit # Adjust for ascending numbering within the water chain file = open('mi_refine_wat.pdb','r') allLines = file.readlines() file.close() aList_waters = [] water_pick_counter = 1 for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag == 'ATOM' or tag == 'HETATM': water_pick_counter = water_pick_counter + 1 if water_pick_counter < water_add: res_number_X_high = res_number_X_high + 1 str_res_number = str(res_number_X_high) str_res_number = str_res_number.rjust(4) atom_water_record = eachLine[0:22] + str_res_number + eachLine[26:80] atom_water_record = atom_water_record.strip() aList_waters.append(atom_water_record) # Clean-up coordinate file debris from water picking fileexists = os.path.exists('mi_refine_peaks.pdb') if fileexists != 0: os.remove('mi_refine_peaks.pdb') fileexists = os.path.exists('mi_refine_wat.pdb') if fileexists != 0: os.remove('mi_refine_wat.pdb') # Rewrite current PDB file ready to append new waters file = open('mi_refine_out.pdb','r') allLines = file.readlines() file.close() os.remove('mi_refine_out.pdb') file = open('mi_refine_out.pdb','w') for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag != 'END' and tag != 'CONECT': file.write(eachLine) # Add new waters number_water_list = len(aList_waters) count_rec = 0 while count_rec < number_water_list: aLine = aList_waters[count_rec] file.write(aLine) file.write('\n') count_rec = count_rec + 1 file.write('END\n') file.close() print 'Number of waters added: ',number_water_list # Clean-up os.remove('mi_1ff_masked.map') os.remove('mi_peakmax.inp') os.remove('mi_peakmax_wat.log') os.remove('mi_watpeak.inp') os.remove('mi_watpeak.log') # # end of water picking option # os.remove('mi_refine.pdb') os.rename('mi_refine_out.pdb','mi_refine.pdb') count = count + 1 # Clean-up and rename os.remove(filename_in) print 'Output MTZ file:',filename_mtz fileexists = os.path.exists('mi_refine.pdb') if fileexists != 0: os.rename('mi_refine.pdb',filename_pdb) print 'Output PDB file:',filename_pdb else: 'REFMAC5 o/p coordinate file was not found' time.sleep(4) return 1 fileexists = os.path.exists(filename_refmac) if fileexists != 0: os.remove(filename_refmac) fileexists = os.path.exists('mi_refine.mtz') if fileexists != 0: os.remove('mi_refine.mtz') fileexists = os.path.exists('mi_temp.tls') if fileexists != 0: os.remove('mi_temp.tls') fileexists = os.path.exists(filename_refmac_temp) if fileexists != 0: os.rename(filename_refmac_temp,filename_refmac) print 'Output CIF log file:',filename_refmac # Parse global summary file = open(filename_refmac,'r') allLines = file.readlines() file.close() for eachLine in allLines: if eachLine.find('_refine.ls_R_factor_R_work') > -1: parseLine = eachLine.split() rwork = parseLine[1] if eachLine.find('_refine.ls_R_factor_R_free') > -1: parseLine = eachLine.split() rfree = parseLine[1] if eachLine.find('r_bond_refined_d') > -1: parseLine = eachLine.split() rmsd_bonds = parseLine[2] else: 'REFMAC o/p CIF log file was not found' return 1 fileexists = os.path.exists(filename_log) if fileexists != 0: print 'Output REFMAC5 log:',filename_log else: print 'The REFMAC5 log file was not found' time.sleep(4) return 1 print 'Rwork=',rwork,' Rfree=',rfree,' RMSD(bonds)=',rmsd_bonds ########################## # End of REFMAC5 section # ########################## ####################### # Start SHELX section # ####################### if ref_engine == 'shelx': print '\nStarting SHELX refinement process' print 'CCP4 scratch space:',working_ccp4_scratch print 'Job-ID:',job_id print 'Using mtz data:',flabel,',',sigflabel if missing_protein_chain == 'yes': print 'Chain identifiers must be assigned for all protein atoms' time.sleep(4) return 1 if os.path.basename(libfile) != 'none': print 'Using library file:',libfile filelib = open(libfile,'r') allLiblines = filelib.readlines() filelib.close() # Base .hkl and .ins root files name on pdb file name ins_file = job_id + '.ins' hkl_file = job_id + '.hkl' ins_file_full = os.path.join(workingdir,ins_file) hkl_file_full = os.path.join(workingdir,hkl_file) filename_lst = job_id + '.lst' filename_res = job_id + '.res' filename_fcf = job_id + '.fcf' filename_mtz = job_id + '.mtz' filename_lst_full = os.path.join(workingdir,filename_lst) filename_res_full = os.path.join(workingdir,filename_res) filename_fcf_full = os.path.join(workingdir,filename_fcf) filename_mtz_full = os.path.join(workingdir,filename_mtz) fileexists = os.path.exists(ins_file) if fileexists != 0: os.remove(ins_file) fileexists = os.path.exists(hkl_file) if fileexists != 0: os.remove(hkl_file) # Setup reflection file in SHELX format file = open('mi_mtz2various.inp','w') file.write('LABIN FP=') file.write(flabel) file.write(' SIGFP=') file.write(sigflabel) file.write(' FREE=') file.write(rfreelabel) file.write('\n') if max_res != 'none': file.write('RESOLUTION 1000.0 ') file.write(max_res) file.write('\n') file.write('OUTPUT SHELX\n') file.write('EXCLUDE FREER 0\n') file.write('END\n') file.close() runmtz = 'mtz2various HKLIN mi_refine.mtz HKLOUT mi_refine.hkl < mi_mtz2various.inp > mi_mtz2various.log' os.system(runmtz) fileexists = os.path.exists('mi_mtz2various.inp') if fileexists != 0: os.remove('mi_mtz2various.inp') fileexists = os.path.exists('mi_mtz2various.log') if fileexists != 0: os.remove('mi_mtz2various.log') fileexists = os.path.exists('mi_refine.hkl') if fileexists != 0: file = open('mi_refine.hkl','r') allLines = file.readlines() file.close() os.remove('mi_refine.hkl') file = open(hkl_file,'w') for eachLine in allLines: write_reflection = 'yes' if eachLine.find('TITLE') > -1 or eachLine.find('CELL') > -1 or eachLine.find('ZERR') > -1\ or eachLine.find('LATT') > -1 or eachLine.find('SYMM') > -1 or eachLine.find('HKLF') > -1: write_reflection = 'no' if write_reflection == 'yes': file.write(eachLine) file.close() else: print 'File format conversion for SHELXH seems to have failed' time.sleep(4) return 1 # Setup input coordinates/restraints file (.ins) file with SHELXPRO print 'Running SHELXPRO' fileexists = os.path.exists('mi_shelxpro.inp') if fileexists != 0: os.remove('mi_shelxpro.inp') file = open('mi_shelxpro.inp','w') if test_platform.find('win') > -1: file.write('mi_shelxpro\n') file.write('I\n') file.write('\n') file.write(ins_file) file.write('\nmi_refine.pdb\n') file.write('Written by MIFit\n') file.write('\n') file.write('\n') file.write('\n') file.write('\n') file.write('C\n') # Chain offsets (+1000 etc) since SHELX does not support chains if number_chain_list > 0: count = 0 while count < number_chain_list: file.write('\n') count = count + 1 # List N-terminii final_chain = number_chain_list - 1 count = 0 while count < number_chain_list: nterm = aList_nterm[count] file.write(nterm) if count < final_chain: file.write('=\n') else: file.write('\n') count = count + 1 # List C-terminii count = 0 while count < number_chain_list: cterm = aList_cterm[count] file.write(cterm) if count < final_chain: file.write('=\n') else: file.write('\n') count = count + 1 file.write('\n') file.write('\n') file.write('N\n') file.write('3\n') file.write('\n') file.write('Q\n') file.write('\n') file.close() # Execute SHELXPRO to obtain mi_refine.ins runshelxpro = '"' + shelxpro + '"' + ' < mi_shelxpro.inp > mi_shelxpro.log' os.system(runshelxpro) os.remove('mi_shelxpro.inp') os.remove('mi_shelxpro.log') # Adjust run parameters, insert restraints and rename fileexists = os.path.exists(ins_file) if fileexists != 0: file = open(ins_file,'r') allLines = file.readlines() file.close() os.remove(ins_file) # Read/write to adjust ins file file = open(ins_file,'w') for eachLine in allLines: write_flag = 'no' # Capture number of molecules for PDB write later if eachLine.find('ZERR') > -1: parseLine = eachLine.split() number_molecules = parseLine[1] if eachLine.find('WGHT') > -1: # Insert any extra restraints if os.path.basename(libfile) != 'none': for eachLibline in allLiblines: file.write(eachLibline) file.write('\n') # Insert weight file.write('WGHT ') file.write(weight) if bref_type == 'anisotropic': file.write('\nANIS\n') write_flag = 'yes' # Number of refinement cycles if eachLine.find('CGLS') > -1: file.write('CGLS ') file.write(cycles) file.write('\n') write_flag = 'yes' if write_flag == 'no': file.write(eachLine) file.close() else: print 'SHELXPRO run failed to generate .ins file' time.sleep(4) return 1 fileexists = os.path.exists('mi_shelxpro.pro') if fileexists != 0: os.remove('mi_shelxpro.pro') fileexists = os.path.exists('mi_shelxpro.ps') if fileexists != 0: os.remove('mi_shelxpro.ps') # Execute SHELXH refinement job print 'Running SHELXH' runshelx = '"' + shelxh + '"' + ' ' + job_id + ' > mi_shelxh.log' os.system(runshelx) fileexists = os.path.exists('mi_shelxh.log') if fileexists != 0: file = open('mi_shelxh.log','r') allLines = file.readlines() file.close() for eachLine in allLines: if eachLine.find('R1') > -1: parseLine = eachLine.split() rwork = parseLine[2] os.rename('mi_shelxh.log',filename_log) else: print 'SHELXH job did not run' return 1 fileexists = os.path.exists(filename_lst) if fileexists == 0: print 'SHELXH lst file was not created' time.sleep(4) return 1 fileexists = os.path.exists(filename_res) if fileexists == 0: print 'SHELXH res file was not created' time.sleep(4) return 1 fileexists = os.path.exists(filename_fcf) if fileexists == 0: print 'SHELXH fcf file was not created' time.sleep(4) return 1 print 'Rwork=',rwork # Append water information (note that waters are renumbered 1,2,3..) if water_count > 0: pr_water_count = str(water_count) aList_chains.append(' ') aList_nterm.append('1') aList_cterm.append(pr_water_count) number_chain_list = number_chain_list + 1 # Back convert to PDB format with SHELXPRO print 'Running SHELXPRO' file = open('mi_shelxpro.inp','w') if test_platform.find('win') > -1: file.write('mi_shelxpro\n') file.write('G\n') file.write('\n') file.write('S\n') file.write(filename_res) file.write('\n') file.write('N\n') file.write('Y\n') file.write('Y\n') file.write('N\n') file.write('K\n') file.write(number_molecules) file.write('\n') file.write('\n') file.write(filename_pdb) file.write('\n') file.write('Written by a MIFit application\n') # Loop over chains to put back correct chain-number pairs if number_chain_list > 0: count = 0 while count < number_chain_list: file.write('$\n') chain_id = aList_chains[count] nterm = aList_nterm[count] cterm = aList_cterm[count] nterm_current = int(nterm) + (count + 1) * 1000 cterm_current = int(cterm) + (count + 1) * 1000 nterm_current = str(nterm_current) cterm_current = str(cterm_current) file.write(chain_id) file.write('\n') file.write('\n') file.write(nterm_current) file.write(' ') file.write(cterm_current) file.write('\n') file.write(nterm) file.write('\n') count = count + 1 file.write('\n') file.write('Q\n') file.write('\n') file.close() # Execute SHELXPRO runshelxpro = '"' + shelxpro + '"' + ' < mi_shelxpro.inp > mi_shelxpro.log' os.system(runshelxpro) fileexists = os.path.exists(filename_pdb) if fileexists == 0: print 'SHELXPRO job failed to generate PDB file' time.sleep(4) return 1 # Back convert the fcf file phased data information into mtz format for easy MIFit load print 'Converting output data to mtz format for MIFit input' print 'Note:FWT contains pre computed 2Fo-Fc map coefficients' print ' and DELFWT contains precomputed Fo-Fc map coefficients' file = open(filename_fcf,'r') allLines = file.readlines() file.close() file = open(hkl_file,'w') for eachLine in allLines: tag = eachLine[0:1] tag = tag.strip() if tag != ' ' and tag != '_' and tag != '#': parseLine = eachLine.split() num_args = len(parseLine) if num_args == 7: h = parseLine[0] k = parseLine[1] l = parseLine[2] fobs_sq = parseLine[3] fcalc = parseLine[5] phase = parseLine[6] h = int(h) k = int(k) l = int(l) fobs_sq = float(fobs_sq) fcalc = float(fcalc) phase = float(phase) fobs = math.sqrt(fobs_sq) twofofc = 2.0*fobs - fcalc twofofc = round(twofofc,3) fofc = fobs - fcalc fofc = round(fofc,3) aLine = str(h) + ' ' + str(k) + ' ' + str(l) + \ ' ' + str(twofofc) + ' ' + str(fofc) + ' ' + str(phase) + ' ' + str(phase) file.write(aLine) file.write('\n') file.close() # Step 2, convert ascii to mtz aLine = str(acell_mtz) + ' ' + str(bcell_mtz) + ' ' + str(ccell_mtz)\ + ' ' + str(alpha_mtz) + ' ' + str(beta_mtz) + ' ' + str(gamma_mtz) file = open('mi_f2mtz.inp','w') file.write('NAME PROJECT Shelx_map_coeffs CRYSTAL 1 DATASET 1\n') file.write('CELL ') file.write(aLine) file.write('\n') file.write('SYMMETRY ') file.write(space_group) file.write('\n') file.write('LABOUT H K L FWT DELFWT PHWT PHDELFWT\n') file.write('CTYPOUT H H H F F P P\n') file.write('END\n') file.close() runf2mtz = 'f2mtz HKLIN ' + hkl_file + ' HKLOUT ' + filename_mtz + ' < mi_f2mtz.inp > mi_f2mtz.log' os.system(runf2mtz) fileexists = os.path.exists(filename_mtz) if fileexists == 0: print 'F2MTZ failed to convert to output mtz file' time.sleep(4) return 1 else: os.remove('mi_f2mtz.inp') os.remove('mi_f2mtz.log') # Clean-up various intermediate files fileexists = os.path.exists(ins_file_full) if fileexists != 0: os.remove(ins_file_full) fileexists = os.path.exists(hkl_file_full) if fileexists != 0: os.remove(hkl_file_full) fileexists = os.path.exists(filename_fcf_full) if fileexists != 0: os.remove(filename_fcf_full) fileexists = os.path.exists(filename_res_full) if fileexists != 0: os.remove(filename_res_full) fileexists = os.path.exists('mi_refine.pdb') if fileexists != 0: os.remove('mi_refine.pdb') fileexists = os.path.exists('mi_refine.mtz') if fileexists != 0: os.remove('mi_refine.mtz') fileexists = os.path.exists('mi_shelxpro.pro') if fileexists != 0: os.remove('mi_shelxpro.pro') fileexists = os.path.exists('mi_shelxpro.ps') if fileexists != 0: os.remove('mi_shelxpro.ps') fileexists = os.path.exists('shelxpro.pro') if fileexists != 0: os.remove('shelxpro.pro') fileexists = os.path.exists('shelxpro.ps') if fileexists != 0: os.remove('shelxpro.ps') fileexists = os.path.exists('mi_shelxpro.inp') if fileexists != 0: os.remove('mi_shelxpro.inp') fileexists = os.path.exists('mi_shelxpro.log') if fileexists != 0: os.remove('mi_shelxpro.log') ######################## # End of SHELX section # ######################## ######################## # Structure validation # ######################## if validate == 'yes' and ref_engine == 'refmac5': print 'Checking structure' # Get entity list of current model file = open(filename_pdb,'r') allLines = file.readlines() file.close() chain_id_prev = '?' res_number_prev = '?' for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag == 'ATOM' or tag == 'HETATM': chain_id = eachLine[21:22] res_number = eachLine[22:26] res_number = res_number.strip() res_name = eachLine[17:20] res_name = res_name.strip() atom_name = eachLine[12:16] atom_name = atom_name.strip() disorder_id = eachLine[16:17] disorder_id = disorder_id.strip() if disorder_id != '': aList_disorder_chain.append(chain_id) aList_disorder_resno.append(res_number) aList_disorder_resname.append(res_name) # Form all atom list aList_allatoms_chain.append(chain_id) aList_allatoms_res_number.append(res_number) aList_allatoms_res_name.append(res_name) aList_allatoms_atom_name.append(atom_name) # Form residue list if res_name != 'HOH': if chain_id != chain_id_prev or res_number != res_number_prev: aList_chain_store.append(chain_id) aList_res_number_store.append(res_number) aList_res_name_store.append(res_name) chain_id_prev = chain_id res_number_prev = res_number # Identify any non-PRO cis peptide links if tag == 'CISPEP': chain = eachLine[29:30] resnumber = eachLine[32:35] resname = eachLine[25:28] resnumber = resnumber.strip() resname = resname.strip() if resname != 'PRO': aList_cis_chain.append(chain) aList_cis_resno.append(resnumber) aList_cis_resname.append(resname) ####################################### # Parse refmac stereochemical scores # ####################################### file = open(filename_log,'r') allLines = file.readlines() file.close() for eachLine in allLines: # Parse section limits if eachLine.find('****') > -1 or eachLine.find('----') > -1: bond_list = 'no' angle_list = 'no' chiral_list = 'no' contact_list = 'no' chiral_list = 'no' # Start logging on finding final iteration number if eachLine.find('CGMAT cycle number') > -1 and eachLine.find(cycles) > -1: iteration_final = 'yes' # get abnormal bond list if bond_list == 'yes' and iteration_final == 'yes': chain = eachLine[0:1] chain = chain.strip() if chain != '': aList_split = eachLine.split() num_aLine = len(aList_split) if num_aLine > 2: resnumber = aList_split[1] resname = aList_split[2] aList_bonds_chain.append(chain) aList_bonds_resno.append(resnumber) aList_bonds_resname.append(resname) if eachLine.find('****') or eachLine.find('Limits'): bond_list == 'no' if eachLine.find('Bond distance deviations ') > -1: bond_list = 'yes' # get abnormal bond angle list if angle_list == 'yes' and iteration_final == 'yes': chain = eachLine[0:1] chain = chain.strip() if chain != '': aList_split = eachLine.split() num_aLine = len(aList_split) if num_aLine > 2: resnumber = aList_split[1] resname = aList_split[2] aList_angles_chain.append(chain) aList_angles_resno.append(resnumber) aList_angles_resname.append(resname) if eachLine.find('****') or eachLine.find('Limits'): angle_list == 'no' if eachLine.find('Bond angle deviations ') > -1: angle_list = 'yes' # get abnormal contacts list if contact_list == 'yes' and iteration_final == 'yes': chain = eachLine[0:1] chain = chain.strip() if chain != '': aList_split = eachLine.split() num_aLine = len(aList_split) if num_aLine > 10: resnumber = aList_split[1] resname = aList_split[2] disorder1 = aList_split[4] chain2 = aList_split[6] resname2 = aList_split[8] resnumber2 = aList_split[7] disorder2 = aList_split[10] # Skip intra-residue interactions if chain != chain2 or resnumber != resnumber2: if disorder1 == '.' and disorder2 == '.': aList_contacts_chain.append(chain) aList_contacts_resno.append(resnumber) aList_contacts_resname.append(resname) aList_contacts_chain.append(chain2) aList_contacts_resno.append(resnumber2) aList_contacts_resname.append(resname2) if eachLine.find('****') or eachLine.find('Limits'): contact_list == 'no' if eachLine.find('VDW deviations ') > -1: contact_list = 'yes' # get severe chiral center violations if chiral_list == 'yes' and iteration_final == 'yes': chain = eachLine[0:1] chain = chain.strip() if chain != '': resname = resname.strip() aList_split = eachLine.split() num_aLine = len(aList_split) if num_aLine > 2: resnumber = aList_split[1] resname = aList_split[2] aList_chiral_chain.append(chain) aList_chiral_resno.append(resnumber) aList_chiral_resname.append(resname) if eachLine.find('****') or eachLine.find('Limits'): chiral_list == 'no' if eachLine.find('Chiral volume deviations') > -1: chiral_list = 'yes' ########################################################### # Run omega check and phi-psi check using Richardson data # ########################################################### # Read Richardson data # General (non-GLY, non-PRO) data fileexists = os.path.exists(phipsi_gen_datafile) if fileexists == 0: print 'WARNING - Unable to locate general phi-psi validation data' else: file = open(phipsi_gen_datafile,'r') allLines = file.readlines() file.close for eachLine in allLines: tag = eachLine[0:1] if tag != '#': aLine = eachLine.split() phi = aLine[0] psi = aLine[1] phipsi_prob = aLine[2] phi = float(phi) psi = float(psi) phipsi_prob = float(phipsi_prob) aList_phi_all.append(phi) aList_psi_all.append(psi) aList_phipsi_prob_all.append(phipsi_prob) number_phipsi_gen_table = len(aList_phi_all) # GLY data fileexists = os.path.exists(phipsi_gly_datafile) if fileexists == 0: print 'WARNING - Unable to locate GLY phi-psi validation data' else: file = open(phipsi_gly_datafile,'r') allLines = file.readlines() file.close for eachLine in allLines: tag = eachLine[0:1] if tag != '#': aLine = eachLine.split() phi = aLine[0] psi = aLine[1] phipsi_prob = aLine[2] phi = float(phi) psi = float(psi) phipsi_prob = float(phipsi_prob) aList_phi_gly.append(phi) aList_psi_gly.append(psi) aList_phipsi_prob_gly.append(phipsi_prob) number_phipsi_gly_table = len(aList_phi_gly) # PRO data fileexists = os.path.exists(phipsi_pro_datafile) if fileexists == 0: print 'WARNING - Unable to locate PRO phi-psi validation data' else: file = open(phipsi_pro_datafile,'r') allLines = file.readlines() file.close for eachLine in allLines: tag = eachLine[0:1] if tag != '#': aLine = eachLine.split() phi = aLine[0] psi = aLine[1] phipsi_prob = aLine[2] phi = float(phi) psi = float(psi) phipsi_prob = float(phipsi_prob) aList_phi_pro.append(phi) aList_psi_pro.append(psi) aList_phipsi_prob_pro.append(phipsi_prob) number_phipsi_pro_table = len(aList_phi_pro) # run SECSTR to compute phi,psi,omega file = open(filename_pdb) allLines = file.readlines() file.close() file = open('mi_secstr.new','w') for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag == 'ATOM' or tag == 'HETATM': file.write(eachLine) file.close() file = open('mi_secstr.inp','w') file.write('mi_secstr.new\n') file.close() runsecstr = 'secstr < mi_secstr.inp > mi_secstr.log' os.system(runsecstr) fileexists = os.path.exists('mi_secstr.rin') if fileexists == 0: print 'Phi-psi calculation failed' time.sleep(4) return 1 else: file = open('mi_secstr.rin','r') allLines = file.readlines() file.close() os.remove('mi_secstr.new') os.remove('mi_secstr.inp') os.remove('mi_secstr.log') os.remove('mi_secstr.rin') for eachLine in allLines: res_name = eachLine[4:7] res_number = eachLine[9:13] chain_id = eachLine[8:9] phi = eachLine[15:22] psi = eachLine[22:29] omega = eachLine[29:36] res_name = res_name.strip() res_number = res_number.strip() chain_id = chain_id.strip() phi = phi.strip() psi = psi.strip() omega = omega.strip() omega = float(omega) phi = float(phi) psi = float(psi) amino_acid_count = amino_acid_count + 1.0 ####################################################### # Search for outliers versus phi-psi probability data # ####################################################### if phi < 180 and psi < 180: lookup = 'yes' else: lookup = 'no' # Jump to useful region of table (minus safety margin) phi_point = phi + 180 phi_point = phi_point * 9.0 phi_point = math.floor(phi_point) - 19 phi_point = int(phi_point) if phi_point > 0: count = phi_point else: count = 0 if res_name != 'GLY' and res_name != 'PRO': while count < number_phipsi_gen_table and lookup == 'yes': phi_table = aList_phi_all[count] phi_diff = phi - phi_table phi_diff = abs(phi_diff) if phi_diff < 2.0: psi_table = aList_psi_all[count] psi_diff = psi - psi_table psi_diff = abs(psi_diff) if psi_diff < 2.0: phipsi_prob = aList_phipsi_prob_all[count] if phipsi_prob < phipsi_thresh_gen: aList_rama_chain.append(chain_id) aList_rama_resno.append(res_number) aList_rama_resname.append(res_name) lookup = 'no' count = count + 1 if res_name == 'GLY': while count < number_phipsi_gly_table and lookup == 'yes': phi_table = aList_phi_gly[count] phi_diff = phi - phi_table phi_diff = abs(phi_diff) if phi_diff < 2.0: psi_table = aList_psi_gly[count] psi_diff = psi - psi_table psi_diff = abs(psi_diff) if psi_diff < 2.0: phipsi_prob = aList_phipsi_prob_gly[count] if phipsi_prob < phipsi_thresh_gly: aList_rama_chain.append(chain_id) aList_rama_resno.append(res_number) aList_rama_resname.append(res_name) lookup = 'no' count = count + 1 if res_name == 'PRO': while count < number_phipsi_pro_table and lookup == 'yes': phi_table = aList_phi_pro[count] phi_diff = phi - phi_table phi_diff = abs(phi_diff) if phi_diff < 2.0: psi_table = aList_psi_pro[count] psi_diff = psi - psi_table psi_diff = abs(psi_diff) if psi_diff < 2.0: phipsi_prob = aList_phipsi_prob_pro[count] if phipsi_prob < phipsi_thresh_pro: aList_rama_chain.append(chain_id) aList_rama_resno.append(res_number) aList_rama_resname.append(res_name) lookup = 'no' count = count + 1 ############################# # Search for omega outliers # ############################# omega = float(omega) if omega < 180.0: if omega < 0.0: omega = -omega omega_deviation = omega_peak - omega omega_deviation = abs(omega_deviation) if omega_deviation > omega_thresh: aList_omega_chain.append(chain_id) aList_omega_resno.append(res_name) aList_omega_resname.append(res_number) #################################### # Run sidechain check with ROTAMER # #################################### file = open('mi_rotamer.inp','w') file.write('DELT 45\n') file.write('END\n') file.close() runrotamer = 'rotamer XYZIN ' + filename_pdb + ' < mi_rotamer.inp > mi_rotamer.log' os.system(runrotamer) fileexists = os.path.exists('mi_rotamer.log') if fileexists == 0: print 'Rotamer validation check failed' time.sleep(4) return 1 else: # Parse for chi-1 deviations (greater than 45 degrees) file = open('mi_rotamer.log','r') allLines = file.readlines() file.close() for eachLine in allLines: if eachLine.find(')') > -1 and eachLine.find('(') > -1: if eachLine[11:12] == '*': chain_id = eachLine[0:1] residue_id = eachLine[1:5] residue_name = eachLine[6:9] chain_id = chain_id.strip() residue_id = residue_id.strip() residue_name = residue_name.strip() aList_rotamer_chain.append(chain_id) aList_rotamer_resno.append(residue_id) aList_rotamer_resname.append(residue_name) os.remove('mi_rotamer.log') os.remove('mi_rotamer.inp') ################################################# # Locate difference density features on protein # ################################################# # Calculate 1FF map file = open('mi_fft.inp','w') file.write('LABIN F1=DELFWT PHI=PHDELFWT\n') file.write('END\n') file.close() runfft = 'fft HKLIN ' + filename_mtz + ' MAPOUT mi_1ff.map < mi_fft.inp > mi_fft.log' os.system(runfft) fileexists = os.path.exists('mi_1ff.map') if fileexists == 0: print 'FFT for density test failed' time.sleep(4) return 1 else: os.remove('mi_fft.inp') os.remove('mi_fft.log') # Build density around protein file = open('mi_mapmask.inp','w') file.write('EXTEND XTAL\n') file.write('BORDER 2.0\n') file.write('END\n') file.close() runmapmask = 'mapmask MAPIN mi_1ff.map XYZIN ' + filename_pdb + ' MAPOUT mi_1ff_masked.map < mi_mapmask.inp > mi_mapmask.log' os.system(runmapmask) fileexists = os.path.exists('mi_1ff_masked.map') if fileexists == 0: print 'MAPMASK for density test failed' return 1 else: os.remove('mi_mapmask.inp') os.remove('mi_mapmask.log') os.remove('mi_1ff.map') # Peak/hole pick near protein file = open('mi_peakmax.inp','w') file.write('THRESHOLD RMS 4.0 NEGATIVE\n') file.write('END\n') file.close() runpeakmax = 'peakmax MAPIN mi_1ff_masked.map XYZOUT mi_peakmax.pdb < mi_peakmax.inp > mi_peakmax.log 2> mi_peakmax_err.log' os.system(runpeakmax) fileexists = os.path.exists('mi_peakmax_err.log') if fileexists != 0: os.remove('mi_peakmax_err.log') fileexists = os.path.exists('mi_peakmax.log') if fileexists == 0: print 'PEAKMAX for density test failed' time.sleep(4) return 1 else: os.remove('mi_peakmax.inp') os.remove('mi_1ff_masked.map') os.remove('mi_peakmax.log') # Identify amino acids within 2.0A of any 4 sigma peaks/holes fileexists = os.path.exists('mi_peakmax.pdb') if fileexists != 0: file = open('mi_peakmax.pdb','r') allLines = file.readlines() file.close() os.remove('mi_peakmax.pdb') for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag == 'ATOM' or tag == 'HETATM': x = eachLine[30:38] y = eachLine[38:46] z = eachLine[46:54] x = float(x) y = float(y) z = float(z) aList_peak_x.append(x) aList_peak_y.append(y) aList_peak_z.append(z) number_peaks = len(aList_peak_x) file = open(filename_pdb,'r') allLines = file.readlines() file.close() count = 0 while count < number_peaks: xp = aList_peak_x[count] yp = aList_peak_y[count] zp = aList_peak_z[count] for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag == 'ATOM' or tag == 'HETATM': x = eachLine[30:38] y = eachLine[38:46] z = eachLine[46:54] x = float(x) y = float(y) z = float(z) dist = (xp - x) ** 2 + (yp - y)**2 + (zp - z)** 2 if dist < 4.0: chain = eachLine[21:22] res_number = eachLine[22:26] res_number = res_number.strip() res_name = eachLine[17:20] res_name = res_name.strip() aList_density_chain.append(chain) aList_density_resno.append(res_number) aList_density_resname.append(res_name) count = count + 1 ################################################################### # Build tidy error lists by combining error types for each entity # ################################################################### entity_count = len(aList_chain_store) bond_count = len(aList_bonds_chain) angles_count = len(aList_angles_chain) chiral_count = len(aList_chiral_chain) contacts_count = len(aList_contacts_chain) cis_count = len(aList_cis_chain) rotamer_count = len(aList_rotamer_chain) omega_count = len(aList_omega_chain) rama_count = len(aList_rama_chain) density_count = len(aList_density_chain) count = 0 while count < entity_count: geom_error = '.' contacts_error = '.' omega_error = '.' phipsi_error = '.' rotamer_error = '.' cis_error = '.' density_error = '.' error_flag = 'no' chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] res_name_store = aList_res_name_store[count] # bonds count1 = 0 while count1 < bond_count: chain = aList_bonds_chain[count1] res_number = aList_bonds_resno[count1] res_name = aList_bonds_resname[count1] if chain == chain_store and res_number == res_number_store: geom_error = 'G' error_flag = 'yes' count1 = count1 + 1 # angles count1 = 0 while count1 < angles_count: chain = aList_angles_chain[count1] res_number = aList_angles_resno[count1] res_name = aList_angles_resname[count1] if chain == chain_store and res_number == res_number_store: geom_error = 'G' error_flag = 'yes' count1 = count1 + 1 # contacts count1 = 0 while count1 < contacts_count: chain = aList_contacts_chain[count1] res_number = aList_contacts_resno[count1] res_name = aList_contacts_resname[count1] if chain == chain_store and res_number == res_number_store: contacts_error = 'V' error_flag = 'yes' count1 = count1 + 1 # chiral count1 = 0 while count1 < chiral_count: chain = aList_chiral_chain[count1] res_number = aList_chiral_resno[count1] res_name = aList_chiral_resname[count1] if chain == chain_store and res_number == res_number_store: geom_error = 'G' error_flag = 'yes' count1 = count1 + 1 # cis peptide count1 = 0 while count1 < cis_count: chain = aList_cis_chain[count1] res_number = aList_cis_resno[count1] res_name = aList_cis_resname[count1] if chain == chain_store and res_number == res_number_store: cis_error = 'C' error_flag = 'yes' count1 = count1 + 1 # rotamer count1 = 0 while count1 < rotamer_count: chain = aList_rotamer_chain[count1] res_number = aList_rotamer_resno[count1] res_name = aList_rotamer_resname[count1] if chain == chain_store and res_number == res_number_store: rotamer_error = 'R' error_flag = 'yes' count1 = count1 + 1 # omega angles count1 = 0 while count1 < omega_count: chain = aList_omega_chain[count1] res_number = aList_omega_resno[count1] res_name = aList_omega_resname[count1] if chain == chain_store and res_number == res_number_store: omega_error = 'O' error_flag = 'yes' count1 = count1 + 1 # phi-psi count1 = 0 while count1 < rama_count: chain = aList_rama_chain[count1] res_number = aList_rama_resno[count1] res_name = aList_rama_resname[count1] if chain == chain_store and res_number == res_number_store: phipsi_error = 'P' error_flag = 'yes' count1 = count1 + 1 # Density count1 = 0 while count1 < density_count: chain = aList_density_chain[count1] res_number = aList_density_resno[count1] res_name = aList_density_resname[count1] if chain == chain_store and res_number == res_number_store: density_error = 'D' error_flag = 'yes' count1 = count1 + 1 # Write all error types for this residue if error_flag == 'yes': # count phi-psi and sidechain errors if phipsi_error == 'P': count_phipsi = count_phipsi + 1.0 if rotamer_error == 'R': count_rotamer = count_rotamer + 1.0 # Tidy output res_number_field = len(res_number_store) print_res_number = res_number_store if res_number_field == 1: print_res_number = ' ' + res_number_store if res_number_field == 2: print_res_number = ' ' + res_number_store if res_number_field == 3: print_res_number = ' ' + res_number_store aLine = ' ' + chain_store + ' ' + print_res_number + ' ' + res_name_store + ' ' + geom_error + ' ' \ + contacts_error + ' ' + omega_error + ' ' + phipsi_error + ' ' + cis_error + ' ' \ + rotamer_error + ' ' + density_error aList_errors.append(aLine) count = count + 1 # Local error counts percent_phi_psi = 100.0 * count_phipsi / amino_acid_count percent_phi_psi = round(percent_phi_psi,2) percent_phi_psi = str(percent_phi_psi) percent_rotamer = 100.0 * count_rotamer / amino_acid_count percent_rotamer = round(percent_rotamer,2) percent_rotamer = str(percent_rotamer) # Write error list number_errors = len(aList_errors) percent_errors = 100.0 * float(number_errors)/entity_count percent_errors = round(percent_errors,1) percent_errors = str(percent_errors) print 'Output putative error list:',errorfile print 'Percentage of residues in error list:', percent_errors file = open(errorfile,'w') file.write('#\n') file.write('# Working directory: ') file.write(workingdir) file.write('\n# Coordinates: ') file.write(filename_pdb) file.write('\n# Data: ') file.write(filename_mtz) file.write('\n#\n') file.write('# Rwork: ') file.write(rwork) file.write('\n# Rfree: ') file.write(rfree) file.write('\n# Percentage of residues outside Richardson phi-psi core: ') file.write(percent_phi_psi) file.write('\n# Percentage of residues with abnormal rotamers: ') file.write(percent_rotamer) file.write('\n# Percentage of residues flagged: ') file.write(percent_errors) file.write('\n#\n') file.write('# Residue list codes for severe abnormality types:\n') file.write('# (G)eometry, (V)an der Waals, (O)mega, (P)hi-psi, (C)is peptide,\n') file.write('# (R)otamer chi-1, (D)ensity\n') file.write('#\n') count = 0 while count < number_errors: aLine = aList_errors[count] file.write(aLine) file.write('\n') count = count + 1 file.write('#\n') file.close() ##################################################################### # Establish records diagnostics in PDB REMARK 465,470,500, format # ##################################################################### # Determination of missing amino acids from SEQRES records if there were any number_sequence = len(aList_sequence_resname) number_chains = len(aList_sequence_chain_id) print '\nNumber amino acids in SEQRES:',number_sequence,'over',number_chains,'chains\n' if number_sequence > 0: # Load data for a particular chain count_chains = 0 while count_chains < number_chains: # initialize lists that will be used for this sequence/structure comparison aList_sequence_resname_temp = [] aList_sequence_resnumber_temp = [] aList_structure_resname_temp = [] aList_structure_resnumber_temp = [] working_seq_chain = aList_sequence_chain_id[count_chains] sequence_match = 'no' # Load sequence data for the current chain count1 = 0 while count1 < number_sequence: seq_chain = aList_sequence_chain[count1] if working_seq_chain == seq_chain: resname = aList_sequence_resname[count1] aList_sequence_resname_temp.append(resname) aList_sequence_resnumber_temp.append('?') count1 = count1 + 1 # Load structure data for the current chain count1 = 0 while count1 < entity_count: structure_chain = aList_chain_store[count1] if working_seq_chain == structure_chain: resname = aList_res_name_store[count1] resnumber = aList_res_number_store[count1] aList_structure_resname_temp.append(resname) aList_structure_resnumber_temp.append(resnumber) count1 = count1 + 1 # Algorithm for establishing numbering in sequence-structure comparison # match to leading pentamer in this structure along sequence number_structure_temp = len(aList_structure_resname_temp) number_sequence_temp = len(aList_sequence_resname_temp) number_sequence_search = number_sequence_temp - 6 test_structure_resname_1 = aList_structure_resname_temp[0] test_structure_resname_2 = aList_structure_resname_temp[1] test_structure_resname_3 = aList_structure_resname_temp[2] test_structure_resname_4 = aList_structure_resname_temp[3] test_structure_resname_5 = aList_structure_resname_temp[4] structure_resnumber_start = aList_structure_resnumber_temp[0] structure_resnumber_start = int(structure_resnumber_start) count1 = 0 while count1 < number_sequence_search: count2 = count1 + 1 count3 = count1 + 2 count4 = count1 + 3 count5 = count1 + 4 test_sequence_resname_1 = aList_sequence_resname_temp[count1] test_sequence_resname_2 = aList_sequence_resname_temp[count2] test_sequence_resname_3 = aList_sequence_resname_temp[count3] test_sequence_resname_4 = aList_sequence_resname_temp[count4] test_sequence_resname_5 = aList_sequence_resname_temp[count5] if test_structure_resname_1 == test_sequence_resname_1: if test_structure_resname_2 == test_sequence_resname_2: if test_structure_resname_3 == test_sequence_resname_3: if test_structure_resname_4 == test_sequence_resname_4: if test_structure_resname_5 == test_sequence_resname_5: sequence_resnumber_start = structure_resnumber_start - count1 count1 = number_sequence_search sequence_match = 'yes' count1 = count1 + 1 # Now setup sequence numbering List if sequence_match == 'yes': count1=0 while count1 < number_sequence_temp: sequence_resnumber_put = sequence_resnumber_start + count1 sequence_resnumber_put = str(sequence_resnumber_put) aList_sequence_resnumber_temp[count1] = sequence_resnumber_put count1 = count1 + 1 # Now analyse and catch missing residues count1=0 while count1 < number_sequence_temp: sequence_resname_put = aList_sequence_resname_temp[count1] sequence_resnumber_put = aList_sequence_resnumber_temp[count1] count2=0 while count2 < number_structure_temp: structure_resnumber_put = aList_structure_resnumber_temp[count2] find_error = 'yes' if sequence_resnumber_put == structure_resnumber_put: find_error = 'no' count2 = number_structure_temp count2= count2 + 1 if find_error == 'yes': out_line = 'REMARK 465 1 ' + sequence_resname_put + ' ' + working_seq_chain + ' ' + sequence_resnumber_put aList_missing_residues.append(out_line) count1 = count1 + 1 # End of loop over chains count_chains = count_chains + 1 # Write missing residues number_missing_residues = len(aList_missing_residues) if number_missing_residues > 0: pdb_annotate.append('REMARK 465') pdb_annotate.append('REMARK 465 MISSING RESIDUES') pdb_annotate.append('REMARK 465 THE FOLLOWING RESIDUES WERE NOT LOCATED IN THE') pdb_annotate.append('REMARK 465 EXPERIMENT. (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 465 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 465') pdb_annotate.append('REMARK 465 M RES C SSSEQI') count1 = 0 while count1 < number_missing_residues: out_line = aList_missing_residues[count1] pdb_annotate.append(out_line) count1 = count1 + 1 ################################################################ # Obtain a list of missing atoms in each residue (REMARK 470) # ################################################################ pdb_annotate.append('REMARK 470') pdb_annotate.append('REMARK 470 MISSING ATOM') pdb_annotate.append('REMARK 470 THE FOLLOWING RESIDUES HAVE MISSING ATOMS (M=MODEL NUMBER;') pdb_annotate.append('REMARK 470 RES=RESIDUE NAME; C=CHAIN IDENTIFIER; SSEQ=SEQUENCE NUMBER;') pdb_annotate.append('REMARK 470 I=INSERTION CODE):') pdb_annotate.append('REMARK 470 M RES CSSEQI ATOMS') number_allatoms = len(aList_allatoms_chain) count1 = 0 while count1 < entity_count: chain_store = aList_chain_store[count1] res_number_store = aList_res_number_store[count1] res_name_store = aList_res_name_store[count1] # Get all atoms for this residue aList_current_residue_atoms = [] count = 0 while count < number_allatoms: chain_all = aList_allatoms_chain[count] res_number_all = aList_allatoms_res_number[count] res_name_all = aList_allatoms_res_name[count] atom_name_all = aList_allatoms_atom_name[count] if chain_store == chain_all and res_number_store == res_number_all: aList_current_residue_atoms.append(atom_name_all) count = count + 1 # Process to find missing atoms in this residue number_residue_atoms = len(aList_current_residue_atoms) aList_atoms_expected = [] if res_name_store == 'GLY': aList_atoms_expected = aList_GLY_atoms if res_name_store == 'ALA': aList_atoms_expected = aList_ALA_atoms if res_name_store == 'VAL': aList_atoms_expected = aList_VAL_atoms if res_name_store == 'ILE': aList_atoms_expected = aList_ILE_atoms if res_name_store == 'LEU': aList_atoms_expected = aList_LEU_atoms if res_name_store == 'PHE': aList_atoms_expected = aList_PHE_atoms if res_name_store == 'PRO': aList_atoms_expected = aList_PRO_atoms if res_name_store == 'MET': aList_atoms_expected = aList_MET_atoms if res_name_store == 'TRP': aList_atoms_expected = aList_TRP_atoms if res_name_store == 'CYS': aList_atoms_expected = aList_CYS_atoms if res_name_store == 'SER': aList_atoms_expected = aList_SER_atoms if res_name_store == 'THR': aList_atoms_expected = aList_THR_atoms if res_name_store == 'ASN': aList_atoms_expected = aList_ASN_atoms if res_name_store == 'GLN': aList_atoms_expected = aList_GLN_atoms if res_name_store == 'TYR': aList_atoms_expected = aList_TYR_atoms if res_name_store == 'HIS': aList_atoms_expected = aList_HIS_atoms if res_name_store == 'ASP': aList_atoms_expected = aList_ASP_atoms if res_name_store == 'GLU': aList_atoms_expected = aList_GLU_atoms if res_name_store == 'LYS': aList_atoms_expected = aList_LYS_atoms if res_name_store == 'ARG': aList_atoms_expected = aList_ARG_atoms number_atoms_expected = len(aList_atoms_expected) if number_atoms_expected > 0: # Check each expected atomname to see if it is found aList_atoms_expected_flag = [] count2 = 0 while count2 < number_atoms_expected: atom_name_expected = aList_atoms_expected[count2] found = 'no' count_current_atoms = 0 while count_current_atoms < number_residue_atoms: atom_name = aList_current_residue_atoms[count_current_atoms] if atom_name_expected == atom_name: found = 'yes' count_current_atoms = count_current_atoms + 1 if found == 'yes': aList_atoms_expected_flag.append('yes') else: aList_atoms_expected_flag.append('no') count2 = count2 + 1 # Collect missing atoms for this residue into a list and create formatted REMARK 470 write_flag = 'no' out_list = '' count2 = 0 while count2 < number_atoms_expected: found = aList_atoms_expected_flag[count2] if found == 'no': atom_name = aList_atoms_expected[count2] number_chars = len(atom_name) if number_chars == 1: atom_name = atom_name + ' ' if number_chars == 2: atom_name = atom_name + ' ' out_list = out_list + ' ' + atom_name write_flag = 'yes' count2 = count2 + 1 if write_flag == 'yes': str_res_number_store = str(res_number_store) number_chars = len(str_res_number_store) if number_chars == 1: str_res_number_store = str_res_number_store + ' ' if number_chars == 2: str_res_number_store = str_res_number_store + ' ' if number_chars == 3: str_res_number_store = str_res_number_store + ' ' out_line = 'REMARK 470 1 ' + res_name_store + ' ' + chain_store + ' ' + str_res_number_store + ' ' + out_list pdb_annotate.append(out_line) # End of loop over entities count1 = count1 + 1 ########################################################################## # MI stereochemistry subtopic to identify discrete and errors REMARK 500 # ########################################################################## pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: DISCRETE DISORDER') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 RESIDUES IN MULTIPLE CONFORMATIONS') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES WERE DESCRIBED BY MULTIPLE') pdb_annotate.append('REMARK 500 CONFORMATIONS.(M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') # Reduce disorder atom list to residue list disorder_count = len(aList_disorder_chain) write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < disorder_count: chain = aList_disorder_chain[count1] res_number = aList_disorder_resno[count1] res_name = aList_disorder_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO RESIDUES IN MULTIPLE CONFORMATIONS') # Covalent bond lengths pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: COVALENT BOND LENGTHS (MI ERROR LIST)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES CONTAINED ABNORMAL BOND LENGTHS') pdb_annotate.append('REMARK 500 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < bond_count: chain = aList_bonds_chain[count1] res_number = aList_bonds_resno[count1] res_name = aList_bonds_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # Covalent bond angles pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: COVALENT BOND ANGLES (MI ERROR LIST)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES CONTAINED ABNORMAL BOND ANGLES') pdb_annotate.append('REMARK 500 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < angles_count: chain = aList_angles_chain[count1] res_number = aList_angles_resno[count1] res_name = aList_angles_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # Chiral centers pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: CHIRAL CENTERS (MI ERROR LIST)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES CONTAINED ABNORMAL CHIRAL CENTERS') pdb_annotate.append('REMARK 500 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < chiral_count: chain = aList_chiral_chain[count1] res_number = aList_chiral_resno[count1] res_name = aList_chiral_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # Abnormal omega pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: NON-CIS, NON-TRANS (MI ERROR LIST)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES CONTAINED ABNORMAL OMEGA ANGLES') pdb_annotate.append('REMARK 500 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < omega_count: chain = aList_omega_chain[count1] res_number = aList_omega_resno[count1] res_name = aList_omega_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # Close contacts (note - all) pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: CLOSE CONTACTS (MI ERROR LIST)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES CONTAINED ABNORMAL CONTACT DISTANCES') pdb_annotate.append('REMARK 500 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < contacts_count: chain = aList_contacts_chain[count1] res_number = aList_contacts_resno[count1] res_name = aList_contacts_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # PHI-PSI data pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: TORSION ANGLES (MI ERROR LIST)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES CONTAINED ABNORMAL PHI-PSI ANGLES') pdb_annotate.append('REMARK 500 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < rama_count: chain = aList_rama_chain[count1] res_number = aList_rama_resno[count1] res_name = aList_rama_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # invented MI remark to identify density issues pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 OTHER VALIDATION') pdb_annotate.append('REMARK 501 SUBTOPIC: ELECTRON DENSITY (MI ERROR LIST)') pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 THE FOLLOWING RESIDUES ARE NEAR DENSITY DIFFERENCE FEATURES') pdb_annotate.append('REMARK 501 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 501 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 M RES C SSSEQI') pdb_annotate.append('REMARK 501') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < density_count: chain = aList_density_chain[count1] res_number = aList_density_resno[count1] res_name = aList_density_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 501 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 501 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # invented MI remark to identify cis-pep pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 OTHER VALIDATION') pdb_annotate.append('REMARK 501 SUBTOPIC: CIS PEPTIDE (MI ERROR LIST)') pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 THE FOLLOWING RESIDUES HAVE CIS PEPTIDE BONDS') pdb_annotate.append('REMARK 501 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 501 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 M RES C SSSEQI') pdb_annotate.append('REMARK 501') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < cis_count: chain = aList_cis_chain[count1] res_number = aList_cis_resno[count1] res_name = aList_cis_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 501 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 501 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # invented MI remark to identify rotamer (chi-1) pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 OTHER VALIDATION') pdb_annotate.append('REMARK 501 SUBTOPIC: ROTAMER (MI ERROR LIST)') pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 THE FOLLOWING RESIDUES HAVE CHI-1 ANGLES WHICH DEVIATE MORE') pdb_annotate.append('REMARK 501 THAN 45 DEGREES FROM A KNOWN ROTAMER') pdb_annotate.append('REMARK 501 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 501 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 M RES C SSSEQI') pdb_annotate.append('REMARK 501') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < rotamer_count: chain = aList_rotamer_chain[count1] res_number = aList_rotamer_resno[count1] res_name = aList_rotamer_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 501 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 501 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') ################################ # Insert annotation into PDB # ################################ num_lines = len(pdb_annotate) num_SEQRES = len(aList_SEQRES) file = open(filename_pdb_full,'r') allLines = file.readlines() file.close() os.remove(filename_pdb_full) file = open(filename_pdb_full,'w') tag_prev = '?' for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag != 'REMARK' and tag_prev == 'REMARK': count = 0 while count < num_lines: out_line = pdb_annotate[count] file.write(out_line) file.write('\n') count = count + 1 count = 0 while count < num_SEQRES: out_line = aList_SEQRES[count] file.write(out_line) file.write('\n') count = count + 1 file.write(eachLine) tag_prev = tag file.close() else: print 'Structure checking only enabled for REFMAC5 refinement' file = open(errorfile,'w') file.write('#\n') file.write('# Generation of error lists requires REFMAC5 refinement\n') file.write('#\n') file.close() ############################################################ # Standard file for nomalous difference map when available # ############################################################ if anomlabel != 'none' and siganomlabel != 'none' and ref_engine == 'refmac5': fileexists = os.path.exists('anom_diffmap') if fileexists != 0: os.remove('anom_diffmap.map') fileexists = os.path.exists('mi_anommap_out.mtz') if fileexists != 0: os.remove('mi_anommap_out.mtz') # Combine anomalous coefficients with refined phases back onto the refined mtz file = open('mi_cad.inp','w') file.write('LABIN FILE_NUMBER 1 ALL\n') file.write('LABIN FILE_NUMBER 2 ALL\n') file.write('END\n') file.close() runcad = 'cad HKLIN1 ' + filename_mtz + ' HKLIN2 mi_anommap.mtz HKLOUT mi_anommap_out.mtz < mi_cad.inp > mi_cad.log' os.system(runcad) fileexists = os.path.exists('mi_anommap_out.mtz') if fileexists != 0: os.remove('mi_cad.log') os.remove('mi_cad.inp') os.remove('mi_anommap.mtz') else: print 'The CAD run to reattach anomalous difference data seems to have failed' time.sleep(4) return 1 # Use special CCP4/FFT condition that rotates phases for anomalous difference maps file = open('mi_fft.inp','w') file.write('LABIN DANO=') file.write(anomlabel) file.write(' PHI=PHIC\n') file.write('END\n') file.close() runfft = 'fft HKLIN mi_anommap_out.mtz MAPOUT mi_1ff.map < mi_fft.inp 1> mi_fft.log 2>mi_fft_err.log' os.system(runfft) os.remove('mi_fft.inp') fileexists = os.path.exists('mi_fft.log') if fileexists != 0: os.remove('mi_fft.log') fileexists = os.path.exists('mi_fft_err.log') if fileexists != 0: os.remove('mi_fft_err.log') # Note that FFT may fail if anom columns are present but unfilled so not a stop fileexists = os.path.exists('mi_1ff.map') if fileexists != 0: print 'Creating anomalous difference map file: anom_diffmap.map' # Build cell around the protein in the anomalous difference map with CCP4/MAPMASK file = open('mi_mapmask.inp','w') file.write('BORDER 5.0\n') file.write('EXTEND XTAL\n') file.write('END\n') file.close() runmapmask = 'mapmask XYZIN ' + filename_pdb + ' MAPIN mi_1ff.map MAPOUT anom_diffmap.map < mi_mapmask.inp > mi_mapmask.log' os.system(runmapmask) fileexists = os.path.exists('anom_diffmap.map') if fileexists == 0: print 'MAPMASK for anomalous difference map failed' time.sleep(4) return 1 else: os.remove('mi_mapmask.inp') os.remove('mi_mapmask.log') os.remove('mi_1ff.map') # Rename mtz carrying anomalous data to standard refinement output os.remove(filename_mtz) os.rename('mi_anommap_out.mtz',filename_mtz) filename_anom_full = os.path.join(workingdir,'anom_diffmap.map') ###################### # Append project log # ###################### print 'Writing project log' runtime = time.ctime(time.time()) file = open(projectlog,'a') file.seek(0,2) file.write('Job ID: ') file.write(job_id) file.write('\nDate: ') file.write(runtime) file.write('\nInput atoms: ') file.write(pdbfile) file.write('\nInput data: ') file.write(mtzfile) file.write('\nInput library: ') file.write(libfile) if ref_engine == 'rigid': file.write('\nOutput atoms: ') file.write(filename_pdb_full) file.write('\nOutput phased data: ') file.write(filename_mtz_full) file.write('\nOutput log: ') file.write(filename_log_full) file.write('\nOutput CIF log: ') file.write(filename_refmac_full) file.write('\nOptions: none\n') file.write('Summary: REFMAC5 rigid-body Rwork=') file.write(rwork) file.write(' Rfree=') file.write(rfree) file.write(' Resolution=') file.write(resolution_output) if ref_engine == 'refmac5': file.write('\nOutput atoms: ') file.write(filename_pdb_full) file.write('\nOutput phased data: ') file.write(filename_mtz_full) file.write('\nOutput log: ') file.write(filename_log_full) file.write('\nOutput CIF log: ') file.write(filename_refmac_full) file.write('\nOutput error list: ') file.write(filename_errors_full) file.write('\nOutput anomalous difference map: ') file.write(filename_anom_full) if water_pick == 'yes': file.write('\nOptions: water-pick\n') else: file.write('\nOptions: none\n') file.write('Summary: REFMAC5 Rwork=') file.write(rwork) file.write(' Rfree=') file.write(rfree) file.write(' RMSD(bonds)=') file.write(rmsd_bonds) file.write(' Resolution=') file.write(resolution_output) if ref_engine == 'shelx': file.write('\nOutput pdb file: ') file.write(filename_pdb_full) file.write('\nOutput precomputed map data file: ') file.write(filename_mtz_full) file.write('\nOutput log file: ') file.write(filename_log_full) file.write('\nOutput lst file: ') file.write(filename_lst_full) file.write('\nOptions: none\n') file.write('Summary: SHELXH Rwork=') file.write(rwork) file.write(' Resolution=') file.write(resolution_output) if ref_engine == 'refmac5' or ref_engine == 'shelx': file.write('\nParameters: Weight=') file.write(weight) file.write(' Cycles=') file.write(cycles) file.write(' Bfactor=') file.write(bref_type) file.write(' TLS_input_file=') file.write(tlsfile) file.write('\n---------------\n') file.close() # Clean-up job-specific temporary CCP4_SCR space fileexists = os.path.exists(temp_lib) if fileexists != 0: os.remove(temp_lib) fileexists = os.path.exists(working_ccp4_scratch) if fileexists != 0: dir_list = os.listdir(working_ccp4_scratch) number_files = len(dir_list) count = 0 while count < number_files: target_file = dir_list[count] target_file_full_path = os.path.join(working_ccp4_scratch,target_file) os.remove(target_file_full_path) count = count + 1 os.rmdir(working_ccp4_scratch) time.sleep(4) # return 0 if __name__ == "__main__": sys.exit(Run())
codeparrot/github-code-clean
# This file is part of HamsiManager. # # Copyright (c) 2010 - 2015 Murat Demir <mopened@gmail.com> # # Hamsi Manager is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Hamsi Manager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HamsiManager; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import os import sys import shutil import stat import re import tempfile import mimetypes from Core.MyObjects import * from Core import Universals as uni from Core import Records from Core import Organizer from Core import Dialogs appendingDirectories = [] defaultFileSystemEncoding = None fileSystemEncoding = None willCheckIconDirectories = [] willCheckEmptyDirectories = [] willCheckEmptyDirectoriesSubDirectoryStatus = [] isSmartCheckIcon = False isSmartCheckEmptyDirectories = False sep = os.sep executableAppPath, userDirectoryPath, HamsiManagerDirectory = None, None, None themePath, pathOfSettingsDirectory, recordFilePath, oldRecordsDirectoryPath = None, None, None, None def initStartupVariables(): global executableAppPath, userDirectoryPath, HamsiManagerDirectory global themePath, pathOfSettingsDirectory, recordFilePath, oldRecordsDirectoryPath initFileSystemEncoding() executableAppPath = str(os.path.abspath(sys.argv[0])) if isLink(executableAppPath): executableAppPath = readLink(executableAppPath) HamsiManagerDirectory = getDirName(executableAppPath) userDirectoryPath = os.path.expanduser("~") try: userDirectoryPath = uni.trDecode(userDirectoryPath, fileSystemEncoding) except: pass themePath = joinPath(HamsiManagerDirectory, "Themes", "Default") pathOfSettingsDirectory = joinPath(userDirectoryPath, ".HamsiApps", "HamsiManager") recordFilePath = joinPath(pathOfSettingsDirectory, "logs.txt") oldRecordsDirectoryPath = joinPath(pathOfSettingsDirectory, "OldRecords") def initFileSystemEncoding(): global defaultFileSystemEncoding, fileSystemEncoding defaultFileSystemEncoding = sys.getfilesystemencoding() if defaultFileSystemEncoding is None: defaultFileSystemEncoding = sys.getdefaultencoding() defaultFileSystemEncoding = defaultFileSystemEncoding.lower() from encodings import aliases if defaultFileSystemEncoding == "iso-8859-1": defaultFileSystemEncoding = "latin-1" if [str(v).lower().replace("_", "-") for k, v in aliases.aliases.items()].count(defaultFileSystemEncoding) == 0: defaultFileSystemEncoding = sys.getfilesystemencoding().lower() fileSystemEncoding = str(defaultFileSystemEncoding) def joinPath(_a, *_b): _a = str(_a) c = [] for x in _b: try: c.append(uni.trEncode(str(x), fileSystemEncoding)) except: c.append(str(x)) c = tuple(c) try: returnValue = os.path.join(uni.trEncode(_a, fileSystemEncoding), *c) except: returnValue = os.path.join(_a, *c) try: return uni.trDecode(returnValue, fileSystemEncoding) except: return returnValue def splitPath(_a): _a = str(_a) try: returnValue = os.path.split(uni.trEncode(_a, fileSystemEncoding)) except: returnValue = os.path.split(_a) c = [] for x in returnValue: try: c.append(uni.trDecode(x, fileSystemEncoding)) except: c.append(x) return c def isFile(_oldPath): _oldPath = str(_oldPath) try: return os.path.isfile(uni.trEncode(_oldPath, fileSystemEncoding)) except: return os.path.isfile(_oldPath) def isDir(_oldPath): _oldPath = str(_oldPath) try: return os.path.isdir(uni.trEncode(_oldPath, fileSystemEncoding)) except: return os.path.isdir(_oldPath) def isLink(_oldPath): _oldPath = str(_oldPath) try: return os.path.islink(uni.trEncode(_oldPath, fileSystemEncoding)) except: return os.path.islink(_oldPath) def isDirEmpty(_oldPath): _oldPath = str(_oldPath) if isDir(_oldPath): if len(listDir(_oldPath)) == 0: return True return False def isExist(_oldPath): if isFile(_oldPath): return True elif isDir(_oldPath): return True elif isLink(_oldPath): return True return False def isHidden(_path, _name=None): if _name is None: _name = getBaseName(_path) if _name.startswith('.'): return True if _path.count(sep + '.') > 0: return True if uni.isWindows: try: import win32api, win32con try: attr = win32api.GetFileAttributes(uni.trEncode(_path, fileSystemEncoding)) except: attr = win32api.GetFileAttributes(_path) if attr & win32con.FILE_ATTRIBUTE_HIDDEN: return True except: return False return False def isBinary(_path): _path = str(_path) try: f = open(uni.trEncode(_path, fileSystemEncoding), 'rb') except: f = open(_path, 'rb') try: chunk = f.read(1024) f.close() if '\0' in chunk: # found null byte return True else: printableExtendedAscii = b'\n\r\t\f\b' if bytes is str: printableExtendedAscii += b''.join(map(chr, range(32, 256))) else: printableExtendedAscii += bytes(range(32, 256)) control_chars = chunk.translate(None, printableExtendedAscii) nontext_ratio = float(len(control_chars)) / float(len(chunk)) return nontext_ratio > 0.3 except: pass return False def isAvailableNameForEncoding(_newPath): try: _newPath = str(_newPath) t = uni.trEncode(_newPath, fileSystemEncoding) return True except: return False def getAvailablePathByPath(_newPath): _newPath = getRealPath(str(_newPath)) newPath = "" isFirstPart = True for pathPart in _newPath.split(sep): if pathPart != "": badchars = re.compile(r'[/]') pathPart = badchars.sub('_', pathPart) if uni.isWindows: if isFirstPart: pathPart += sep else: badchars = re.compile(r'[^A-Za-z0-9_.\- \w\s]+|\.$|^ | $|^$', re.U) pathPart = re.sub(badchars, '_', uni.trUnicode(pathPart), re.U) badnames = re.compile(r'(aux|com[1-9]|con|lpt[1-9]|prn)(\.|$)') if badnames.match(pathPart): pathPart = "_" + pathPart newPath = joinPath(newPath, pathPart) else: newPath += sep isFirstPart = False return newPath def getAvailableNameByName(_newPath): _newPath = str(_newPath) newPath = "" pathParts = _newPath.split(sep) for pathPart in pathParts: if pathPart != "": badchars = re.compile(r'[/]') pathPart = badchars.sub('_', pathPart) if uni.isWindows: badchars = re.compile(r'[^A-Za-z0-9_.\- \w\s]+|\.$|^ | $|^$', re.U) pathPart = re.sub(badchars, '_', uni.trUnicode(pathPart), re.U) badnames = re.compile(r'(aux|com[1-9]|con|lpt[1-9]|prn)(\.|$)') if badnames.match(pathPart): pathPart = "_" + pathPart newPath = joinPath(newPath, pathPart) else: newPath += sep return newPath def getSize(_oldPath): try: return os.path.getsize(uni.trEncode(_oldPath, fileSystemEncoding)) except: return os.path.getsize(_oldPath) def getMimeType(_oldPath): try: return mimetypes.guess_type(uni.trEncode(_oldPath, fileSystemEncoding)) except: return mimetypes.guess_type(_oldPath) def getDirectorySize(_oldPath): total_size = 0 names = walk(_oldPath) if names is not None: for dirpath, dirnames, filenames in names: for f in filenames: total_size += getSize(joinPath(dirpath, f)) return total_size def getDetails(_oldPath): try: return os.stat(uni.trEncode(_oldPath, fileSystemEncoding)) except: try: return os.stat(_oldPath) except: return None def getExtendedDetails(_oldPath): details = getDetails(_oldPath) extendedDetails = {} if details is not None: extendedDetails["accessRights"] = oct(stat.S_IMODE(details[stat.ST_MODE])) extendedDetails["numberOfHardLinks"] = details[stat.ST_NLINK] extendedDetails["userIDOfOwner"] = details[stat.ST_UID] extendedDetails["groupIDOfOwner"] = details[stat.ST_GID] extendedDetails["size"] = details[stat.ST_SIZE] extendedDetails["lastAccessed"] = details[stat.ST_ATIME] extendedDetails["lastModified"] = details[stat.ST_MTIME] extendedDetails["lastMetadataChanged"] = details[stat.ST_CTIME] extendedDetails["userNameOfOwner"] = uni.getUserNameByID(details[stat.ST_UID]) extendedDetails["longUserNameOfOwner"] = uni.getUserLongNameByID(details[stat.ST_UID]) extendedDetails["groupNameOfOwner"] = uni.getGroupNameByID(details[stat.ST_GID]) else: extendedDetails["accessRights"] = "" extendedDetails["numberOfHardLinks"] = "" extendedDetails["userIDOfOwner"] = "" extendedDetails["groupIDOfOwner"] = "" extendedDetails["size"] = "0" extendedDetails["lastAccessed"] = None extendedDetails["lastModified"] = None extendedDetails["lastMetadataChanged"] = None extendedDetails["userNameOfOwner"] = "" extendedDetails["longUserNameOfOwner"] = "" extendedDetails["groupNameOfOwner"] = "" return extendedDetails def getObjectType(_oldPath): objectType = "file" if isDir(_oldPath): objectType = "directory" return objectType def getDirName(_oldPath): _oldPath = str(_oldPath) try: returnValue = os.path.dirname(uni.trEncode(_oldPath, fileSystemEncoding)) except: returnValue = os.path.dirname(_oldPath) try: return uni.trDecode(returnValue, fileSystemEncoding) except: return returnValue def readLink(_oldPath): _oldPath = str(_oldPath) try: returnValue = os.readlink(uni.trEncode(_oldPath, fileSystemEncoding)) except: returnValue = os.readlink(_oldPath) try: return uni.trDecode(returnValue, fileSystemEncoding) except: return returnValue def getRealDirName(_oldPath, isGetParent=False): _oldPath = str(_oldPath) if len(_oldPath) == 0: if uni.isWindows: return "C:" + sep return sep if _oldPath[-1] == sep: _oldPath = _oldPath[:-1] if isGetParent: realDirName = getDirName(str(_oldPath)) else: realDirName = str(_oldPath) while 1: if isDir(realDirName): break if realDirName == "": if uni.isWindows: realDirName = "C:" + sep else: realDirName = sep break realDirName = getDirName(realDirName) return realDirName def getRealPath(_path, _parentPath=None): _path = str(_path) if uni.isWindows: _path = _path.replace("\\", sep).replace("/", sep) if len(_path) == 0: if uni.isWindows: return "C:" + sep return sep if _parentPath is not None: _parentPath = getRealPath(_parentPath) if _path[:2] == "." + sep: _path = _parentPath + _path[1:] if _path[:3] == ".." + sep: _path = getDirName(_parentPath) + _path[2:] return os.path.abspath(_path) def getShortPath(_path, _parentPath): _path = str(_path) _parentPath = str(_parentPath) _path = _path.replace(_parentPath, ".") return _path def getBaseName(_oldPath): _oldPath = str(_oldPath) try: returnValue = os.path.basename(uni.trEncode(_oldPath, fileSystemEncoding)) except: returnValue = os.path.basename(_oldPath) try: return uni.trDecode(returnValue, fileSystemEncoding) except: return returnValue def getTempDir(): returnValue = tempfile.gettempdir() try: return uni.trDecode(returnValue, fileSystemEncoding) except: return returnValue def checkExtension(_oldPath, _extension): _oldPath = str(_oldPath).lower() _extension = str(_extension).lower() if _extension.strip() != "": if _extension[0] == ".": _extension = _extension[1:] extIndex = _oldPath.find("." + _extension) if extIndex != -1: if _oldPath[extIndex:] == "." + _extension: return True return False def getFileExtension(_fileName): _fileName = str(_fileName).lower() if _fileName.find(".") != -1: if uni.MySettings["fileExtensionIs"] == uni.fileExtensionIsKeys[0]: return _fileName.split(".", 1)[1] elif uni.MySettings["fileExtensionIs"] == uni.fileExtensionIsKeys[1]: return _fileName.rsplit(".", 1)[1] elif uni.MySettings["fileExtensionIs"] == uni.fileExtensionIsKeys[2]: try: m = re.compile(r'^.*?[.](?P<ext>tar\.gz|tar\.bz2|\w+)$').match(_fileName) if m is not None: return m.group('ext') except: return _fileName.rsplit(".", 1)[1] return _fileName.rsplit(".", 1)[1] return "" def getFileNameParts(_fileNameOrPath): _fileName = getBaseName(str(_fileNameOrPath)) fileName, fileExtension = "", "" if _fileName.find(".") != -1: fParts = [_fileName, fileExtension] if uni.MySettings["fileExtensionIs"] == uni.fileExtensionIsKeys[0]: fParts = _fileName.split(".", 1) elif uni.MySettings["fileExtensionIs"] == uni.fileExtensionIsKeys[1]: fParts = _fileName.rsplit(".", 1) elif uni.MySettings["fileExtensionIs"] == uni.fileExtensionIsKeys[2]: try: m = re.compile(r'^.*?[.](?P<ext>tar\.gz|tar\.bz2|\w+)$').match(_fileName) if m is not None: ext = m.group('ext') else: ext = _fileName.rsplit(".", 1)[1] fParts = [_fileName.replace("." + ext, ""), ext] except: fParts = _fileName.rsplit(".", 1) fileName = fParts[0] fileExtension = fParts[1] else: fileName = _fileName return fileName, fileExtension.lower() def moveFileOrDir(_oldPath, _newPath, _isQuiet=True): _oldPath, _newPath = str(_oldPath), str(_newPath) if uni.isWindows: _oldPath = _oldPath.replace("\\", sep).replace("/", sep) _newPath = _newPath.replace("\\", sep).replace("/", sep) try: if getDirName(_oldPath) == getDirName(_newPath) or ( uni.isWindows and Organizer.makeCorrectCaseSensitive(_oldPath, uni.validSentenceStructureKeys[ 1]) == Organizer.makeCorrectCaseSensitive(_newPath, uni.validSentenceStructureKeys[1])): try: os.rename(uni.trEncode(_oldPath, fileSystemEncoding), uni.trEncode(_newPath, fileSystemEncoding)) except: os.rename(_oldPath, _newPath) else: if isDir(getDirName(_newPath)) is False: makeDirs(getDirName(_newPath)) try: shutil.move(uni.trEncode(_oldPath, fileSystemEncoding), uni.trEncode(_newPath, fileSystemEncoding)) except: shutil.move(_oldPath, _newPath) Records.add("Moved", _oldPath, _newPath) except: if _isQuiet is False: answer = Dialogs.askSpecial(translate("FileUtils", "An Error Has Occurred"), str(translate("FileUtils", "\"%s\" > \"%s\" : an unknown error has occurred.<br>Please check it and try again.")) % ( Organizer.getLink(_oldPath), Organizer.getLink(_newPath)), translate("Dialogs", "Cancel"), translate("Dialogs", "Show Error Details"), translate("Dialogs", "Retry")) if answer == translate("Dialogs", "Retry"): moveFileOrDir(_oldPath, _newPath, _isQuiet) if answer == translate("Dialogs", "Show Error Details"): from Core import ReportBug ReportBug.ReportBug() else: from Core import ReportBug ReportBug.ReportBug() def copyFileOrDir(_oldPath, _newPath): _oldPath, _newPath = str(_oldPath), str(_newPath) if isDir(getDirName(_newPath)) is False: makeDirs(getDirName(_newPath)) if isFile(_oldPath): try: shutil.copy(uni.trEncode(_oldPath, fileSystemEncoding), uni.trEncode(_newPath, fileSystemEncoding)) except: shutil.copy(_oldPath, _newPath) else: copyDirTree(_oldPath, _newPath) Records.add("Copied", _oldPath, _newPath) def copyDirTree(_oldPath, _newPath): _oldPath, _newPath = str(_oldPath), str(_newPath) try: shutil.copytree(uni.trEncode(_oldPath, fileSystemEncoding), uni.trEncode(_newPath, fileSystemEncoding)) except: shutil.copytree(_oldPath, _newPath) Records.add("Copied", _oldPath, _newPath) def copyDirContent(_oldPath, _newPath): _oldPath, _newPath = str(_oldPath), str(_newPath) if isDir(_newPath) is False: makeDirs(_newPath) for contentPath in listDir(_oldPath): if isDir(joinPath(_oldPath, contentPath)): copyDirContent(joinPath(_oldPath, contentPath), joinPath(_newPath, contentPath)) else: copyFileOrDir(joinPath(_oldPath, contentPath), joinPath(_newPath, contentPath)) def createSymLink(_oldPath, _newPath): _oldPath, _newPath = str(_oldPath), str(_newPath) if uni.isAvailableSymLink(): from os import symlink if isExist(_newPath): removeFileOrDir(_newPath) try: symlink(uni.trEncode(_oldPath, fileSystemEncoding), uni.trEncode(_newPath, fileSystemEncoding)) except: symlink(_oldPath, _newPath) Records.add("Created Link", _oldPath, _newPath) return True else: Records.add("Can Not Created Link", _oldPath, _newPath) copyOrChange(_oldPath, _newPath, getObjectType(_oldPath)) return False def listDir(_oldPath): names = [] _oldPath = checkSource(_oldPath, "directory") if _oldPath is not None: try: names = os.listdir(uni.trEncode(_oldPath, fileSystemEncoding)) except: names = os.listdir(_oldPath) names.sort(key=trSort) return names def walk(_oldPath): names = None _oldPath = checkSource(_oldPath, "directory") if _oldPath is not None: try: names = os.walk(uni.trEncode(_oldPath, fileSystemEncoding)) except: names = os.walk(_oldPath) return names def makeDirs(_newPath): if isWritableFileOrDir(getRealDirName(_newPath)): try: os.makedirs(uni.trEncode(_newPath, fileSystemEncoding)) except: os.makedirs(_newPath) Records.add("Created", _newPath) return True return False def onRMTreeError(_func, _path, _excInfo): try: os.chmod(uni.trEncode(getDirName(_path), fileSystemEncoding), stat.S_IWRITE | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) except: os.chmod(getDirName(_path), stat.S_IWRITE | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) Records.add("CHmod Changed To Remove", getDirName(_path)) try: os.chmod(uni.trEncode(_path, fileSystemEncoding), stat.S_IWRITE | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) except: os.chmod(_path, stat.S_IWRITE | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) Records.add("CHmod Changed To Remove", _path) try: os.unlink(uni.trEncode(_path, fileSystemEncoding)) except: os.unlink(_path) Records.add("Removed", _path) def removeDir(_oldPath): if uni.getBoolValue("isDontDeleteFileAndDirectory"): moveToPathOfDeleted(_oldPath) else: try: shutil.rmtree(uni.trEncode(_oldPath, fileSystemEncoding), ignore_errors=False, onerror=onRMTreeError) except: shutil.rmtree(_oldPath, ignore_errors=False, onerror=onRMTreeError) Records.add("Removed", _oldPath) return True def removeFile(_oldPath): if uni.getBoolValue("isDontDeleteFileAndDirectory"): moveToPathOfDeleted(_oldPath) else: try: os.remove(uni.trEncode(_oldPath, fileSystemEncoding)) except: os.remove(_oldPath) Records.add("Removed", _oldPath) return True def moveToPathOfDeleted(_oldPath): from time import strftime import random moveFileOrDir(_oldPath, joinPath(uni.MySettings["pathOfDeletedFilesAndDirectories"], strftime("%Y%m%d_%H%M%S") + "_" + str( random.randrange(0, 9999999)) + "_" + getBaseName(_oldPath))) def trSort(_info): import locale if uni.isPython3k: _info = str(_info) try: return locale.strxfrm(uni.trEncode(_info, fileSystemEncoding)) except: return locale.strxfrm(_info) def isReadableFileOrDir(_newPath, _isOnlyCheck=False, _isInLoop=False): realPath = _newPath if isFile(realPath) is False: realPath = getRealDirName(realPath) try: if os.access(uni.trEncode(realPath, fileSystemEncoding), os.R_OK): return True except: if os.access(realPath, os.R_OK): return True if _isOnlyCheck is False: if _isInLoop: okButtonLabel = translate("Dialogs", "Continue") else: okButtonLabel = translate("Dialogs", "OK") if isDir(realPath): answer = Dialogs.askSpecial(translate("FileUtils", "Access Denied"), str(translate("FileUtils", "\"%s\" : you do not have the necessary permissions to read this directory.<br>Please check your access controls and retry.")) % Organizer.getLink( realPath), okButtonLabel, translate("Dialogs", "Retry")) if answer == translate("Dialogs", "Retry"): return isReadableFileOrDir(_newPath, _isOnlyCheck, _isInLoop) else: answer = Dialogs.askSpecial(translate("FileUtils", "Access Denied"), str(translate("FileUtils", "\"%s\" : you do not have the necessary permissions to read this file.<br>Please check your access controls and retry.")) % Organizer.getLink( realPath), okButtonLabel, translate("Dialogs", "Retry")) if answer == translate("Dialogs", "Retry"): return isReadableFileOrDir(_newPath, _isOnlyCheck, _isInLoop) return False def isWritableFileOrDir(_newPath, _isOnlyCheck=False, _isInLoop=False): realPath = _newPath if isFile(realPath) is False: realPath = getRealDirName(realPath) try: if os.access(uni.trEncode(realPath, fileSystemEncoding), os.W_OK): return True except: if os.access(realPath, os.W_OK): return True if _isOnlyCheck is False: if _isInLoop: okButtonLabel = translate("Dialogs", "Continue") else: okButtonLabel = translate("Dialogs", "OK") if isDir(realPath): answer = Dialogs.askSpecial(translate("FileUtils", "Access Denied"), str(translate("FileUtils", "\"%s\" : you do not have the necessary permissions to change this directory.<br>Please check your access controls and retry.")) % Organizer.getLink( realPath), okButtonLabel, translate("Dialogs", "Retry")) if answer == translate("Dialogs", "Retry"): return isWritableFileOrDir(_newPath, _isOnlyCheck, _isInLoop) else: answer = Dialogs.askSpecial(translate("FileUtils", "Access Denied"), str(translate("FileUtils", "\"%s\" : you do not have the necessary permissions to change this file.<br>Please check your access controls and retry.")) % Organizer.getLink( realPath), okButtonLabel, translate("Dialogs", "Retry")) if answer == translate("Dialogs", "Retry"): return isWritableFileOrDir(_newPath, _isOnlyCheck, _isInLoop) return False def checkSource(_oldPath, _objectType="fileAndDirectory", _isShowAlert=True): oldPath = str(_oldPath) if uni.isWindows: _oldPath = _oldPath.replace("\\", sep).replace("/", sep) if _objectType == "file" and isFile(oldPath): return oldPath elif _objectType == "directory" and isDir(oldPath): return oldPath elif _objectType == "fileAndDirectory" and (isDir(oldPath) or isFile(oldPath)): return oldPath if uni.isWindows: oldPath = "\\\\?\\" + oldPath # for wrong name such as "C:\Temp \test.txt", "C:\Temp\test.txt " if _objectType == "file" and isFile(oldPath): return oldPath elif _objectType == "directory" and isDir(oldPath): return oldPath elif _objectType == "fileAndDirectory" and (isDir(oldPath) or isFile(oldPath)): return oldPath _rPath = getRealPath(str(oldPath)) if _rPath != oldPath: oldPath = _rPath if _objectType == "file" and isFile(oldPath): return oldPath elif _objectType == "directory" and isDir(oldPath): return oldPath elif _objectType == "fileAndDirectory" and (isDir(oldPath) or isFile(oldPath)): return oldPath if _isShowAlert: if _objectType == "file": Dialogs.showError(translate("FileUtils", "Cannot Find File"), str(translate("FileUtils", "\"%s\" : cannot find a file with this name.<br>Please make sure that it exists and retry.")) % Organizer.getLink( _oldPath)) elif _objectType == "directory": Dialogs.showError(translate("FileUtils", "Cannot Find Directory"), str(translate("FileUtils", "\"%s\" : cannot find a folder with this name.<br>Please make sure that it exists and retry.")) % Organizer.getLink( _oldPath)) else: Dialogs.showError(translate("FileUtils", "Cannot Find File Or Directory"), str(translate("FileUtils", "\"%s\" : cannot find a file or directory with this name.<br>Please make sure that it exists and retry.")) % Organizer.getLink( _oldPath)) return None def checkDestination(_oldPath, _newPath, _isQuiet=False): _oldPath, _newPath = str(_oldPath), str(_newPath) if uni.isWindows: _oldPath = _oldPath.replace("\\", sep).replace("/", sep) _newPath = _newPath.replace("\\", sep).replace("/", sep) while isAvailableNameForEncoding(_newPath) is False: _newPath = Dialogs.getText(translate("FileUtils", "Unavailable Name"), str(translate("FileUtils", "\"%s\" : can not encoded by %s.<br>Please review and correct the name!<br>You can correct your file system encoding name in Options/Advanced, If you want.<br>You can click cancel to cancel this action.")) % ( _newPath, fileSystemEncoding), _newPath) if _newPath is None: return False availableNameByName = getAvailablePathByPath(_newPath) while _newPath != availableNameByName: _newPath = Dialogs.getText(translate("FileUtils", "Unavailable Name"), str(translate("FileUtils", "\"%s\" : this file path is not valid.<br>Please review and correct the path of file!<br>You can click cancel to cancel this action.")) % ( _newPath), availableNameByName) if _newPath is None: return False availableNameByName = getAvailablePathByPath(_newPath) if isExist(_newPath): if isWritableFileOrDir(_newPath): if uni.isWindows and Organizer.makeCorrectCaseSensitive(_oldPath, uni.validSentenceStructureKeys[ 1]) == Organizer.makeCorrectCaseSensitive(_newPath, uni.validSentenceStructureKeys[1]): return _newPath else: if isFile(_newPath): if _isQuiet: return _newPath else: answer = Dialogs.askSpecial(translate("FileUtils", "Current File Name"), str(translate("FileUtils", "\"%s\" : there already exists a file with the same name.<br>Replace it with the current one?")) % Organizer.getLink( _newPath), translate("Dialogs", "Replace"), translate("Dialogs", "Rename"), translate("Dialogs", "Cancel")) if answer == translate("Dialogs", "Replace"): removeFile(_newPath) return _newPath elif answer == translate("Dialogs", "Rename"): newPath = Dialogs.getSaveFileName(translate("FileUtils", "Select A New Name For File"), _newPath, translate("FileUtils", "All Files") + " (*)", 0) if newPath is not None: return checkDestination(_oldPath, newPath, _isQuiet) return False else: return False elif isDir(_newPath): if isFile(_oldPath): answer = Dialogs.askSpecial(translate("FileUtils", "Current Directory Name"), str(translate("FileUtils", "\"%s\" : there already exists a folder with the same name.<br>\"%s\" Add this file to the current folder?")) % ( Organizer.getLink(_newPath), Organizer.getLink(_newPath)), translate("Dialogs", "Yes, Add Into"), translate("Dialogs", "Rename"), translate("Dialogs", "Cancel")) if answer == translate("Dialogs", "Yes, Add Into"): return joinPath(_newPath, getBaseName(_newPath)) elif answer == translate("Dialogs", "Rename"): newPath = Dialogs.getSaveFileName(translate("FileUtils", "Select A New Name For File"), _newPath, translate("FileUtils", "All Files") + " (*)", 0) if newPath is not None: return checkDestination(_oldPath, newPath, _isQuiet) return False else: return False else: isAllowed = False for tDir in appendingDirectories: if _newPath == tDir: isAllowed = True return _newPath if isAllowed is False: if _isQuiet: appendingDirectories.append(_newPath) return _newPath else: answer = Dialogs.askSpecial(translate("FileUtils", "Current Directory Name"), str(translate("FileUtils", "\"%s\" : there already exists a directory with the same name.<br>Add your files to the current directory?")) % Organizer.getLink( _newPath), translate("Dialogs", "Yes, Add Into"), translate("Dialogs", "Rename"), translate("Dialogs", "Cancel")) if answer == translate("Dialogs", "Yes, Add Into"): appendingDirectories.append(_newPath) return _newPath elif answer == translate("Dialogs", "Rename"): newPath = Dialogs.getExistingDirectory(translate("FileUtils", "Select A Directory"), _newPath, 0) if newPath is not None: return checkDestination(_oldPath, newPath, _isQuiet) return False else: return False else: return False else: return False else: if isWritableFileOrDir(getDirName(_newPath)): return _newPath else: return False return False def checkNewDestination(_newPath, _isQuiet=False): _newPath = str(_newPath) if uni.isWindows: _newPath = _newPath.replace("\\", sep).replace("/", sep) while isAvailableNameForEncoding(_newPath) is False: _newPath = Dialogs.getText(translate("FileUtils", "Unavailable Name"), str(translate("FileUtils", "\"%s\" : can not encoded by %s.<br>Please review and correct the name!<br>You can correct your file system encoding name in Options/Advanced, If you want.<br>You can click cancel to cancel this action.")) % ( _newPath, fileSystemEncoding), _newPath) if _newPath is None: return False availableNameByName = getAvailablePathByPath(_newPath) while _newPath != availableNameByName: _newPath = Dialogs.getText(translate("FileUtils", "Unavailable Name"), str(translate("FileUtils", "\"%s\" : this file path is not valid.<br>Please review and correct the path of file!<br>You can click cancel to cancel this action.")) % ( _newPath), availableNameByName) if _newPath is None: return False availableNameByName = getAvailablePathByPath(_newPath) if isExist(_newPath): if isWritableFileOrDir(_newPath): if isFile(_newPath): if _isQuiet: return _newPath else: answer = Dialogs.askSpecial(translate("FileUtils", "Current File Name"), str(translate("FileUtils", "\"%s\" : there already exists a file with the same name.<br>Replace it with the current one?")) % Organizer.getLink( _newPath), translate("Dialogs", "Replace"), translate("Dialogs", "Rename"), translate("Dialogs", "Cancel")) if answer == translate("Dialogs", "Replace"): removeFile(_newPath) return _newPath elif answer == translate("Dialogs", "Rename"): newPath = Dialogs.getSaveFileName(translate("FileUtils", "Select A New Name For File"), _newPath, translate("FileUtils", "All Files") + " (*)", 0) if newPath is not None: return checkNewDestination(newPath, _isQuiet) elif isDir(_newPath): if not _isQuiet: answer = Dialogs.ask(translate("FileUtils", "Current Directory Name"), str(translate("FileUtils", "\"%s\" : there already exists a directory with the same name.<br>Are you want to choose another name?")) % Organizer.getLink( _newPath)) if answer == Dialogs.Yes: newPath = Dialogs.getText(translate("FileUtils", "Choose Another Name"), translate("FileUtils", "Choose Another Name"), _newPath) if newPath is not None: return checkNewDestination(newPath, _isQuiet) else: if isWritableFileOrDir(getDirName(_newPath)): return _newPath return False def readDirectory(_path, _objectType="fileAndDirectory", _isShowHiddens=True): global appendingDirectories appendingDirectories = [] musicExtensions = [] musicFileNames = [] fileAndDirectoryNames, fileNames, directoryNames = [], [], [] if _objectType == "music": musicFileNames = [] musicExtensions = uni.getListValue("musicExtensions") for name in listDir(_path): if _isShowHiddens or isHidden(joinPath(_path, name), name) is False: try: fileAndDirectoryNames.append(uni.trDecode(name, fileSystemEncoding)) except: fileAndDirectoryNames.append(name) for name in fileAndDirectoryNames: if isDir(joinPath(_path, name)): directoryNames.append(name) else: fileNames.append(name) if _objectType == "music": for ext in musicExtensions: try: if name.split(".")[-1].lower() == str(ext).lower(): musicFileNames.append(name) except: pass if _objectType == "file": return fileNames elif _objectType == "directory": return directoryNames elif _objectType == "fileAndDirectory": return fileAndDirectoryNames elif _objectType == "music": return musicFileNames else: return [] def readDirectoryAll(_path): tFileAndDirs = [] for name in listDir(_path): try: tFileAndDirs.append(str(uni.trDecode(name, fileSystemEncoding))) except: try: tFileAndDirs.append(str(name)) except: tFileAndDirs.append(name) return tFileAndDirs def readDirectoryWithSubDirectories(_path, _subDirectoryDeep=-1, _objectType="fileAndDirectory", _isShowHiddens=True, _currentSubDeep=0): global appendingDirectories _subDirectoryDeep = int(_subDirectoryDeep) allFilesAndDirectories, files, directories, appendingDirectories = [], [], [], [] try: namesList = readDirectoryAll(_path) except: return [] for name in namesList: if _isShowHiddens or isHidden(joinPath(_path, name), name) is False: if isDir(joinPath(_path, name)): directories.append(name) else: files.append(name) for name in directories: if _subDirectoryDeep == -1 or _subDirectoryDeep > _currentSubDeep: if _objectType == "fileAndDirectory" or _objectType == "directory": allFilesAndDirectories.append(joinPath(_path, name)) for dd in readDirectoryWithSubDirectories(joinPath(_path, name), _subDirectoryDeep, _objectType, _isShowHiddens, _currentSubDeep + 1): allFilesAndDirectories.append(dd) if _objectType != "directory": if _objectType == "file" or _objectType == "fileAndDirectory": for name in files: allFilesAndDirectories.append(joinPath(_path, name)) elif _objectType == "music": musicExtensions = uni.getListValue("musicExtensions") for name in files: for ext in musicExtensions: try: if name.split(".")[-1].lower() == str(ext).lower(): allFilesAndDirectories.append(joinPath(_path, name)) except: pass return allFilesAndDirectories def readDirectoryWithSubDirectoriesThread(_path, _subDirectoryDeep=-1, _objectType="fileAndDirectory", _isShowHiddens=True, _currentSubDeep=0): from Core import MyThread global appendingDirectories allFilesAndDirectories, appendingDirectories = [], [] infoProcess = MyThread.MyWaitThread(translate("FileUtils", "Reading Directory...")) myProcs = MyThread.MyThread(readDirectoryWithSubDirectories, infoProcess.finish, args=[_path, _subDirectoryDeep, _objectType, _isShowHiddens, _currentSubDeep]) myProcs.start() infoProcess.run() allFilesAndDirectories = myProcs.data return allFilesAndDirectories def readFromFile(_path, _contentEncoding=fileSystemEncoding): _path = str(_path) if _contentEncoding is not None: if uni.isPython3k: try: f = open(uni.trEncode(_path, fileSystemEncoding), encoding=_contentEncoding) except: f = open(_path, encoding=_contentEncoding) else: import codecs try: f = codecs.open(uni.trEncode(_path, fileSystemEncoding), encoding=_contentEncoding) except: f = codecs.open(_path, encoding=_contentEncoding) try: info = f.read() f.close() except: info = readFromFile(_path, None) else: try: f = open(uni.trEncode(_path, fileSystemEncoding)) except: f = open(_path) info = f.read() f.close() return info def readLinesFromFile(_path, _contentEncoding=fileSystemEncoding): _path = str(_path) if _contentEncoding is not None: if uni.isPython3k: try: f = open(uni.trEncode(_path, fileSystemEncoding), encoding=_contentEncoding) except: f = open(_path, encoding=_contentEncoding) else: import codecs try: f = codecs.open(uni.trEncode(_path, fileSystemEncoding), encoding=_contentEncoding) except: f = codecs.open(_path, encoding=_contentEncoding) try: info = f.readlines() f.close() except: info = readLinesFromFile(_path, None) else: try: f = open(uni.trEncode(_path, fileSystemEncoding)) except: f = open(_path) info = f.readlines() f.close() return info def readFromBinaryFile(_path): _path = str(_path) try: f = open(uni.trEncode(_path, fileSystemEncoding), "rb") except: f = open(_path, "rb") info = f.read() f.close() return info def writeToFile(_path, _contents=""): _path = str(_path) if isDir(getDirName(_path)) is False: makeDirs(getDirName(_path)) try: f = open(uni.trEncode(_path, fileSystemEncoding), "w") except: f = open(_path, "w") f.write(_contents) f.close() Records.add("Writed", _path) def writeToBinaryFile(_path, _contents=""): _path = str(_path) if isDir(getDirName(_path)) is False: makeDirs(getDirName(_path)) try: f = open(uni.trEncode(_path, fileSystemEncoding), "wb") except: f = open(_path, "wb") f.write(_contents) f.flush() f.close() Records.add("Writed", _path) def addToFile(_path, _contents=""): _path = str(_path) try: f = open(uni.trEncode(_path, fileSystemEncoding), "a") except: f = open(_path, "a") f.write(_contents) f.close() Records.add("Added", _path) def readTextFile(_path, _contentEncoding=fileSystemEncoding): fileDetails = {} fileDetails["path"] = _path fileDetails["content"] = readFromFile(_path, _contentEncoding) #return [getDirName(_path), getBaseName(_path), readFromFile(_path)] return fileDetails def writeTextFile(_oldFileValues, _newFileValues, _charSet="utf-8"): if _oldFileValues["content"] != _newFileValues["content"] or _charSet != "utf-8": writeToFile(_oldFileValues["path"], uni.trEncode(_newFileValues["content"], _charSet)) if getRealPath(_oldFileValues["path"]) != getRealPath(_newFileValues["path"]): return moveOrChange(_oldFileValues["path"], _newFileValues["path"]) return _oldFileValues["path"] def clearEmptyDirectories(_path, _isShowState=False, _isCloseState=False, _isAutoCleanSubFolder=True): #If directory will deleted : returns True #If directory will cleaned : returns False clearUnneededs(_path) dontRemovingFilesCount = 0 filesAndDirectories = readDirectoryAll(_path) filesAndDirectoriesCount = len(filesAndDirectories) if _isShowState and _isCloseState: uni.startThreadAction() for nameNo, name in enumerate(filesAndDirectories): if _isShowState: isContinueThreadAction = uni.isContinueThreadAction() else: isContinueThreadAction = True if isContinueThreadAction: if _isShowState: Dialogs.showState(translate("FileUtils", "Checking Empty Directories"), nameNo, filesAndDirectoriesCount, True) if isFile(joinPath(_path, name)): dontRemovingFilesCount += 1 if uni.getBoolValue("isDeleteEmptyDirectories"): for f in uni.getListValue("ignoredFiles"): try: if str(f) == name: dontRemovingFilesCount -= 1 break except: pass for ext in uni.getListValue("ignoredFileExtensions"): try: if checkExtension(name, ext): dontRemovingFilesCount -= 1 break except: pass if isDir(joinPath(_path, name)): dontRemovingFilesCount += 1 if _isAutoCleanSubFolder is False: break if uni.getBoolValue("isDeleteEmptyDirectories"): for f in uni.getListValue("ignoredDirectories"): try: if str(f) == name: dontRemovingFilesCount -= 1 break except: pass if clearEmptyDirectories(joinPath(_path, name), _isShowState, False, _isAutoCleanSubFolder): dontRemovingFilesCount -= 1 else: if _isShowState: Dialogs.showState(translate("FileUtils", "Checked Empty Directories"), filesAndDirectoriesCount, filesAndDirectoriesCount, True) if _isShowState and _isCloseState: uni.finishThreadAction() if dontRemovingFilesCount == 0 and uni.getBoolValue("isDeleteEmptyDirectories"): if _isShowState: Dialogs.showState(translate("FileUtils", "Cleaning Empty Directories"), 0, 1, True) clearIgnoreds(_path) removeDir(_path) if _isCloseState: Dialogs.showState(translate("FileUtils", "Directory Deleted"), 1, 1, True) Dialogs.show(translate("FileUtils", "Directory Deleted"), str( translate("FileUtils", "\"%s\" deleted.Because this directory is empty.")) % Organizer.getLink(_path)) return True if _isCloseState: Dialogs.showState(translate("FileUtils", "Directories Cleaned"), 1, 1, True) return False def clearUnneededs(_path): _path = checkSource(_path, "directory", False) if _path is not None: for f in uni.getListValue("unneededFiles"): try: if isFile(joinPath(_path, str(f))): removeFile(joinPath(_path, str(f))) except: pass for f in uni.getListValue("unneededDirectoriesIfIsEmpty"): try: if isDirEmpty(joinPath(_path, str(f))) and f.strip() != "": removeDir(joinPath(_path, str(f))) except: pass for f in uni.getListValue("unneededDirectories"): try: if isDir(joinPath(_path, str(f))) and f.strip() != "": removeFileOrDir(joinPath(_path, str(f))) except: pass for name in readDirectoryAll(_path): if isFile(joinPath(_path, name)): for ext in uni.getListValue("unneededFileExtensions"): try: if checkExtension(name, ext): removeFile(joinPath(_path, name)) except: pass def clearIgnoreds(_path): _path = checkSource(_path, "directory", False) if _path is not None: for f in uni.getListValue("ignoredFiles"): try: if isFile(joinPath(_path, str(f))): removeFile(joinPath(_path, str(f))) except: pass for f in uni.getListValue("ignoredDirectories"): try: if isDir(joinPath(_path, str(f))) and f.strip() != "": removeFileOrDir(joinPath(_path, str(f))) except: pass for name in readDirectoryAll(_path): if isFile(joinPath(_path, name)): for ext in uni.getListValue("ignoredFileExtensions"): try: if checkExtension(name, ext): removeFile(joinPath(_path, name)) except: pass def removeFileOrDir(_path): if isWritableFileOrDir(getDirName(_path)): if isFile(_path): removeFile(_path) else: if isWritableFileOrDir(_path): removeDir(_path) def removeOnlySubFiles(_path): if isWritableFileOrDir(_path): for f in readDirectoryAll(_path): if isFile(joinPath(_path, f)): removeFile(joinPath(_path, f)) elif isDir(joinPath(_path, f)): removeOnlySubFiles(joinPath(_path, f)) def moveOrChange(_oldPath, _newPath, _objectType="file", _actionType="auto", _isQuiet=False): _oldPath, _newPath = str(_oldPath), str(_newPath) isChange = False _oldPath = checkSource(_oldPath, _objectType) if _oldPath is not None: isChange = True _newPath = checkDestination(_oldPath, _newPath, _isQuiet) if isChange and _newPath: if _objectType == "directory" and _actionType == "auto": if uni.getBoolValue("isClearEmptyDirectoriesWhenMoveOrChange"): if checkEmptyDirectories(_oldPath, True, True, uni.getBoolValue("isAutoCleanSubFolderWhenMoveOrChange")): return _oldPath for tDir in appendingDirectories: if _newPath == tDir: for name in readDirectoryAll(_oldPath): moveOrChange(joinPath(_oldPath, name), joinPath(_newPath, name), getObjectType(joinPath(_oldPath, name)), _actionType, _isQuiet) isChange = False if isChange: moveFileOrDir(_oldPath, _newPath, _isQuiet) if _objectType == "directory" and _actionType == "auto": if uni.getBoolValue("isClearEmptyDirectoriesWhenMoveOrChange"): if checkEmptyDirectories(_newPath, True, True, uni.getBoolValue("isAutoCleanSubFolderWhenMoveOrChange")): return _newPath if isDir(_newPath) and _actionType == "auto": if uni.isActiveDirectoryCover and uni.getBoolValue("isActiveAutoMakeIconToDirectory") and uni.getBoolValue( "isAutoMakeIconToDirectoryWhenMoveOrChange"): checkIcon(_newPath) elif _actionType == "auto": if uni.isActiveDirectoryCover and uni.getBoolValue("isActiveAutoMakeIconToDirectory") and uni.getBoolValue( "isAutoMakeIconToDirectoryWhenFileMove"): if isDir(getDirName(_oldPath)): checkIcon(getDirName(_oldPath)) if isDir(getDirName(_newPath)): checkIcon(getDirName(_newPath)) return _newPath else: return _oldPath def copyOrChange(_oldPath, _newPath, _objectType="file", _actionType="auto", _isQuiet=False): _oldPath, _newPath = str(_oldPath), str(_newPath) isChange = False _oldPath = checkSource(_oldPath, _objectType) if _oldPath is not None: isChange = True _newPath = checkDestination(_oldPath, _newPath, _isQuiet) if isChange and _newPath: if _objectType == "directory" and _actionType == "auto": if uni.getBoolValue("isClearEmptyDirectoriesWhenCopyOrChange"): if checkEmptyDirectories(_oldPath, True, True, uni.getBoolValue("isAutoCleanSubFolderWhenCopyOrChange")): return _oldPath for tDir in appendingDirectories: if _newPath == tDir: for name in readDirectoryAll(_oldPath): copyOrChange(joinPath(_oldPath, name), joinPath(_newPath, name), getObjectType(joinPath(_oldPath, name)), _actionType, _isQuiet) isChange = False if isChange: copyFileOrDir(_oldPath, _newPath) if isDir(_newPath) and _actionType == "auto": if uni.isActiveDirectoryCover and uni.getBoolValue("isActiveAutoMakeIconToDirectory") and uni.getBoolValue( "isAutoMakeIconToDirectoryWhenCopyOrChange"): checkIcon(_newPath) return _newPath else: return _oldPath def activateSmartCheckEmptyDirectories(): global isSmartCheckEmptyDirectories, willCheckEmptyDirectories, willCheckEmptyDirectoriesSubDirectoryStatus isSmartCheckEmptyDirectories = True willCheckEmptyDirectories = [] willCheckEmptyDirectoriesSubDirectoryStatus = [] def completeSmartCheckEmptyDirectories(_isShowState=False, _isCloseState=False): global isSmartCheckEmptyDirectories, willCheckEmptyDirectories, willCheckEmptyDirectoriesSubDirectoryStatus isSmartCheckEmptyDirectories = False for x in range(0, len(willCheckEmptyDirectories)): clearEmptyDirectories(willCheckEmptyDirectories[x], _isShowState, _isCloseState, willCheckEmptyDirectoriesSubDirectoryStatus[x]) willCheckEmptyDirectories = [] willCheckEmptyDirectoriesSubDirectoryStatus = [] def checkEmptyDirectories(_path, _isShowState=False, _isCloseState=False, _isAutoCleanSubFolder=True, _isClear=False): global isSmartCheckEmptyDirectories, willCheckEmptyDirectories, willCheckEmptyDirectoriesSubDirectoryStatus if uni.getBoolValue("isActiveClearGeneral") or _isClear: if isSmartCheckEmptyDirectories: if willCheckEmptyDirectories.count(_path) == 0: willCheckEmptyDirectories.append(_path) willCheckEmptyDirectoriesSubDirectoryStatus.append(_isAutoCleanSubFolder) else: _path = checkSource(_path, "directory", False) if _path is not None: return clearEmptyDirectories(_path, _isShowState, _isCloseState, _isAutoCleanSubFolder) def activateSmartCheckIcon(): global isSmartCheckIcon, willCheckIconDirectories isSmartCheckIcon = True willCheckIconDirectories = [] def completeSmartCheckIcon(): global isSmartCheckIcon, willCheckIconDirectories isSmartCheckIcon = False for iconDir in willCheckIconDirectories: iconDir = checkSource(iconDir, "directory", False) if iconDir is not None: checkIcon(iconDir) willCheckIconDirectories = [] def checkIcon(_path, _isClear=False): global isSmartCheckIcon, willCheckIconDirectories if isSmartCheckIcon and _isClear is False: if willCheckIconDirectories.count(_path) == 0: willCheckIconDirectories.append(_path) else: if _isClear is False: coverPath = "" coverName = getFirstImageInDirectory(_path) if coverName is not None: coverPath = joinPath(_path, coverName) return setIconToDirectory(_path, coverPath) elif _isClear: return setIconToDirectory(_path) def getFirstImageInDirectory(_path, _coverNameIfExist=None, _isCheckDelete=False, _isAsk=True): _path = str(_path) cover = None imageFiles = [] if isReadableFileOrDir(_path, True): for fileName in readDirectoryAll(_path): if isFile(joinPath(_path, fileName)): if str(fileName.split(".")[0]).lower() == str(_coverNameIfExist).lower(): cover = fileName if uni.getListValue("imageExtensions").count((fileName.split(".")[-1]).lower()) != 0: imageFiles.append(fileName) if cover is None: for coverName in uni.getListValue("priorityIconNames"): if str(fileName.split(".")[0]).lower() == str(coverName).lower(): cover = fileName break if _isAsk and eval(uni.MySettings["isAskIfHasManyImagesInAlbumDirectory"].title()) and len(imageFiles) > 1: selectedIndex = 0 if cover is not None: selectedIndex = imageFiles.index(cover) cover = Dialogs.getItem(translate("FileUtils", "Select A Cover"), str(translate("FileUtils", "Please select a cover for \"%s\".")) % ( Organizer.getLink(_path)), imageFiles, selectedIndex) else: if cover is None and len(imageFiles) > 0: for imgFile in imageFiles: cover = imgFile break if _isCheckDelete and cover is not None: if isWritableFileOrDir(_path): if eval(uni.MySettings["isDeleteOtherImages"].title()): for imgFile in imageFiles: if cover != imgFile: removeFile(joinPath(_path, imgFile)) return cover def setIconToDirectory(_path, _iconName=""): _path = str(_path) if isDir(_path): if isWritableFileOrDir(joinPath(_path, ".directory")): if _iconName is None: return False _iconName = str(_iconName).strip() returnValue, isChanging, isChange, isCorrectFileContent, rows = False, False, True, False, [] if isFile(_iconName): if str(_path) == str(getDirName(_iconName)): _iconName = "." + sep + getBaseName(_iconName) try: info = readFromFile(joinPath(_path, ".directory")) if info.find("[Desktop Entry]") == -1: info = "[Desktop Entry]\n" + info isCorrectFileContent = True except: info = "[Desktop Entry]" rows = info.split("\n") for rowNo in range(len(rows)): if rows[rowNo][:5] == "Icon=": if len(rows[rowNo]) > 5: isFileExist = False if rows[rowNo][5] == "." and isFile(_path + str(rows[rowNo][6:])): isFileExist = True elif rows[rowNo][5] != "." and isFile(rows[rowNo][5:]): isFileExist = True if isFileExist: if uni.getBoolValue("isChangeExistIcon") is False: isChange = False isChanging = True rows[rowNo] = "Icon=" + _iconName returnValue = True if isChange: if isChanging is False: rows.append("Icon=" + _iconName) returnValue = True if isCorrectFileContent: rowNoStrDesktopEntry = -1 rowNoStrIcon = -1 for rowNo in range(len(rows)): if rows[rowNo].find("[Desktop Entry]") != -1: rowNoStrDesktopEntry = rowNo elif rows[rowNo].find("Icon=") != -1: rowNoStrIcon = rowNo if rowNoStrDesktopEntry != rowNoStrIcon - 1: rows[rowNoStrDesktopEntry] += "\n" + rows[rowNoStrIcon] rows[rowNoStrIcon] = "" else: if isFile(joinPath(_path, ".directory")): info = readFromFile(joinPath(_path, ".directory")) rows = info.split("\n") for rowNo in range(len(rows)): if len(rows[rowNo]) > 4: if rows[rowNo][:5] == "Icon=": rows[rowNo] = "" break info = "" for row in rows: if row.strip() != "": info += row + "\n" writeToFile(joinPath(_path, ".directory"), info) return returnValue else: return False else: return False def getIconFromDirectory(_path): iconPath, isCorrectedFileContent = None, True if isFile(joinPath(_path, ".directory")): info = readFromFile(joinPath(_path, ".directory")) if info.find("[Desktop Entry]") == -1 and len(info) > 0: isCorrectedFileContent = False if info.find("[Desktop Entry]") > info.find("Icon=") > -1: isCorrectedFileContent = False rows = info.split("\n") for rowNo in range(len(rows)): if rows[rowNo][:5] == "Icon=": if len(rows[rowNo]) > 5: if rows[rowNo][5] == "." and isFile(_path + str(rows[rowNo][6:])): iconPath = _path + str(rows[rowNo][6:]) elif rows[rowNo][5] != "." and isFile(rows[rowNo][5:]): iconPath = rows[rowNo][5:] elif rows[rowNo][5] == ".": iconPath = _path + str(rows[rowNo][6:]) isCorrectedFileContent = False else: iconPath = rows[rowNo][5:] isCorrectedFileContent = False return iconPath, isCorrectedFileContent def clearPackagingDirectory(_path, _isShowState=False, _isCloseState=False): _path = checkSource(_path, "directory", False) if _path is not None: if uni.getBoolValue("isClearEmptyDirectoriesWhenPath"): checkEmptyDirectories(_path, _isShowState, _isShowState, uni.getBoolValue("isAutoCleanSubFolderWhenPath")) for f in uni.getListValue("packagerUnneededFiles"): if isFile(joinPath(_path, f)): removeFile(joinPath(_path, f)) for d in uni.getListValue("packagerUnneededDirectories"): if isExist(joinPath(_path, d)): removeFileOrDir(joinPath(_path, d)) dontRemovingFilesCount = 0 filesAndDirectories = readDirectoryAll(_path) for nameNo, name in enumerate(filesAndDirectories): if _isShowState: Dialogs.showState(translate("FileUtils", "Checking Empty Directories"), nameNo, len(filesAndDirectories)) if isFile(joinPath(_path, name)): dontRemovingFilesCount += 1 isDeleted = False for ext in uni.getListValue("packagerUnneededFileExtensions"): if checkExtension(name, ext): removeFile(joinPath(_path, name)) dontRemovingFilesCount -= 1 isDeleted = True break if isDeleted is False: if name[-1:] == "~": removeFile(joinPath(_path, name)) dontRemovingFilesCount -= 1 continue if isDir(joinPath(_path, name)): dontRemovingFilesCount += 1 if clearPackagingDirectory(joinPath(_path, name)) is False: dontRemovingFilesCount -= 1 if dontRemovingFilesCount == 0 and uni.getBoolValue("isPackagerDeleteEmptyDirectories"): if _isShowState: Dialogs.showState(translate("FileUtils", "Deleting Empty Directories"), 0, 1) removeDir(_path) if _isCloseState: Dialogs.showState(translate("FileUtils", "Empty Directories Deleted"), 1, 1) Dialogs.show(translate("FileUtils", "Project Directory Deleted"), str(translate("FileUtils", "\"%s\" deleted.Because this directory is empty.")) % Organizer.getLink(_path)) if _isCloseState: Dialogs.showState(translate("FileUtils", "Empty Directories Deleted"), 1, 1) return True return False def clearCleaningDirectory(_path, _isShowState=False, _isCloseState=False): _path = checkSource(_path, "directory", False) if _path is not None: if uni.getBoolValue("isClearEmptyDirectoriesWhenClear"): checkEmptyDirectories(_path, _isShowState, _isShowState, uni.getBoolValue("isAutoCleanSubFolderWhenClear")) for f in uni.getListValue("cleanerUnneededFiles"): if isFile(joinPath(_path, f)): removeFile(joinPath(_path, f)) for d in uni.getListValue("cleanerUnneededDirectories"): if isExist(joinPath(_path, d)): removeFileOrDir(joinPath(_path, d)) dontRemovingFilesCount = 0 filesAndDirectories = readDirectoryAll(_path) for nameNo, name in enumerate(filesAndDirectories): if _isShowState: Dialogs.showState(translate("FileUtils", "Checking Empty Directories"), nameNo, len(filesAndDirectories)) if isFile(joinPath(_path, name)): dontRemovingFilesCount += 1 for ext in uni.getListValue("cleanerUnneededFileExtensions"): try: if checkExtension(name, ext): removeFile(joinPath(_path, name)) dontRemovingFilesCount -= 1 continue except: pass try: if name[-1:] == "~": removeFile(joinPath(_path, name)) dontRemovingFilesCount -= 1 continue except: pass if isDir(joinPath(_path, name)): dontRemovingFilesCount += 1 if clearCleaningDirectory(joinPath(_path, name)) is False: dontRemovingFilesCount -= 1 if dontRemovingFilesCount == 0 and uni.getBoolValue("isCleanerDeleteEmptyDirectories"): if _isShowState: Dialogs.showState(translate("FileUtils", "Deleting Empty Directories"), 0, 1) removeDir(_path) if _isCloseState: Dialogs.showState(translate("FileUtils", "Empty Directories Deleted"), 1, 1) Dialogs.show(translate("FileUtils", "Project Directory Deleted"), str( translate("FileUtils", "\"%s\" deleted.Because this directory is empty.")) % Organizer.getLink( _path)) if _isCloseState: Dialogs.showState(translate("FileUtils", "Project Directory Cleaned"), 1, 1) return True return False def makePack(_filePath, _packageType, _sourcePath, _realSourceBaseName): from Core import MyThread _filePath, _sourcePath = str(_filePath), str(_sourcePath) if isDir(_filePath): Dialogs.showError(translate("FileUtils", "Current Directory Name"), str(translate("FileUtils", "\"%s\" : there already exists a folder with the same name.<br>Please choose another file name!")) % Organizer.getLink( _filePath)) return False import tarfile try: tar = tarfile.open(uni.trEncode(_filePath, fileSystemEncoding), "w:" + _packageType) except: tar = tarfile.open(_filePath, "w:" + _packageType) maxMembers = len(readDirectoryWithSubDirectoriesThread(_sourcePath, -1, "fileAndDirectory", True)) + 1 infoProcess = MyThread.MyTarPackStateThread(translate("FileUtils", "Creating Tar File"), tar, maxMembers) try: myProcs = MyThread.MyThread(tar.add, infoProcess.finish, args=[uni.trEncode(_sourcePath, fileSystemEncoding)], kwargs={"arcname": uni.trEncode(_realSourceBaseName, fileSystemEncoding)}) myProcs.start() except: myProcs = MyThread.MyThread(tar.add, infoProcess.finish, args=[_sourcePath], kwargs={"arcname": _realSourceBaseName}) myProcs.start() infoProcess.run() tar.close() Records.add("Packed", _filePath) return True def extractPack(_oldPath, _newPath): _oldPath, _newPath = str(_oldPath), str(_newPath) import tarfile try: tar = tarfile.open(uni.trEncode(_oldPath, fileSystemEncoding), "r:gz") except: tar = tarfile.open(_oldPath, "r:gz") try: tar.extractall(uni.trEncode(_newPath, fileSystemEncoding), members=tar.getmembers()) except: tar.extractall(_newPath, members=tar.getmembers()) tar.close() Records.add("Extracted", _oldPath, _newPath) def clearTempFiles(): tempDirPath = getTempDir() for fileName in readDirectoryAll(tempDirPath): if fileName[:15] == "HamsiManager": if isDir(joinPath(tempDirPath, fileName)): removeFileOrDir(joinPath(tempDirPath, fileName)) else: removeFileOrDir(joinPath(tempDirPath, fileName)) def getFileTree(_path, _subDirectoryDeep=-1, _outputTarget="return", _outputType="html", _contentType="fileTree", _extInfo="no"): _path = str(_path) files = readDirectoryWithSubDirectories(_path, _subDirectoryDeep, "fileAndDirectory", uni.getBoolValue("isShowHiddensInFileTree")) info = "" if _contentType == "fileTree": if _outputType == "html": if _extInfo == "no": pass elif _extInfo == "title": info += " \n <h3>%s </h3> \n" % (str(translate("FileUtils", "File Tree"))) info += " %s<br> \n" % (_path) dirNumber = _path.count(sep) findStrings, replaceStrings = [], [] for x, tFile in enumerate(files): if isDir(tFile): findStrings.append(tFile) replaceStrings.append((uni.getUtf8Data("upright") + "&nbsp;&nbsp;&nbsp;" * ( tFile.count(sep) - dirNumber)) + uni.getUtf8Data("upright+right") + "&nbsp;") findStrings.reverse() replaceStrings.reverse() fileList = list(files) for x, tFile in enumerate(files): fileList[x] = tFile for y, fstr in enumerate(findStrings): if tFile != fstr: fileList[x] = fileList[x].replace(fstr + sep, replaceStrings[y]) if x > 0: tin = fileList[x - 1].find(uni.getUtf8Data("upright+right")) tin2 = fileList[x].find(uni.getUtf8Data("upright+right")) if tin > tin2: fileList[x - 1] = fileList[x - 1].replace(uni.getUtf8Data("upright+right"), uni.getUtf8Data("up+right")) for x, fileName in enumerate(fileList): if x != len(fileList) - 1: info += fileName.replace(_path + sep, uni.getUtf8Data("upright+right") + "&nbsp;") else: info += fileName.replace(_path + sep, uni.getUtf8Data("up+right") + "&nbsp;") if uni.getBoolValue("isAppendFileSizeToFileTree") or uni.getBoolValue("isAppendLastModifiedToFileTree"): details = getDetails(files[x]) if details is not None: info += " ( " if uni.getBoolValue("isAppendFileSizeToFileTree"): info += Organizer.getCorrectedFileSize(details[stat.ST_SIZE]) if uni.getBoolValue("isAppendLastModifiedToFileTree"): info += ", " if uni.getBoolValue("isAppendLastModifiedToFileTree"): info += str(translate("FileUtils", "Last Modified : ")) + Organizer.getCorrectedTime( details[stat.ST_MTIME]) info += " )" else: info += " ( " + str(translate("FileUtils", "inaccessible")) + " ) " info += "<br> \n" elif _outputType == "plainText": if _extInfo == "no": pass elif _extInfo == "title": info += " %s \n" % (str(translate("FileUtils", "File Tree"))) info += _path + "\n" dirNumber = _path.count(sep) findStrings, replaceStrings = [], [] for x, tFile in enumerate(files): if isDir(tFile): findStrings.append(tFile) replaceStrings.append( (uni.getUtf8Data("upright") + " " * (tFile.count(sep) - dirNumber)) + uni.getUtf8Data( "upright+right") + " ") findStrings.reverse() replaceStrings.reverse() fileList = list(files) for x, tFile in enumerate(files): fileList[x] = tFile for y, fstr in enumerate(findStrings): if tFile != fstr: fileList[x] = fileList[x].replace(fstr + sep, replaceStrings[y]) if x > 0: tin = fileList[x - 1].find(uni.getUtf8Data("upright+right")) tin2 = fileList[x].find(uni.getUtf8Data("upright+right")) if tin > tin2: fileList[x - 1] = fileList[x - 1].replace(uni.getUtf8Data("upright+right"), uni.getUtf8Data("up+right")) for x, fileName in enumerate(fileList): if x != len(fileList) - 1: info += fileName.replace(_path + sep, uni.getUtf8Data("upright+right") + " ") else: info += fileName.replace(_path + sep, uni.getUtf8Data("up+right") + " ") if uni.getBoolValue("isAppendFileSizeToFileTree") or uni.getBoolValue("isAppendLastModifiedToFileTree"): details = getDetails(files[x]) if details is not None: info += " ( " if uni.getBoolValue("isAppendFileSizeToFileTree"): info += Organizer.getCorrectedFileSize(details[stat.ST_SIZE]) if uni.getBoolValue("isAppendLastModifiedToFileTree"): info += ", " if uni.getBoolValue("isAppendLastModifiedToFileTree"): info += str(translate("FileUtils", "Last Modified : ")) + Organizer.getCorrectedTime( details[stat.ST_MTIME]) info += " )" else: info += " ( " + str(translate("FileUtils", "inaccessible")) + " ) " info += "\n" elif _contentType == "fileList": if _outputType == "html": if _extInfo == "no": pass elif _extInfo == "title": info += " \n <h3>%s </h3> \n" % (str(translate("FileUtils", "File List"))) info += " %s<br> \n" % (_path) for x, fileName in enumerate(files): info += fileName if uni.getBoolValue("isAppendFileSizeToFileTree") or uni.getBoolValue("isAppendLastModifiedToFileTree"): details = getDetails(files[x]) if details is not None: info += " ( " if uni.getBoolValue("isAppendFileSizeToFileTree"): info += Organizer.getCorrectedFileSize(details[stat.ST_SIZE]) if uni.getBoolValue("isAppendLastModifiedToFileTree"): info += ", " if uni.getBoolValue("isAppendLastModifiedToFileTree"): info += str(translate("FileUtils", "Last Modified : ")) + Organizer.getCorrectedTime( details[stat.ST_MTIME]) info += " )" else: info += " ( " + str(translate("FileUtils", "inaccessible")) + " ) " info += "<br> \n" elif _outputType == "plainText": if _extInfo == "no": pass elif _extInfo == "title": info += " %s \n" % (str(translate("FileUtils", "File Tree"))) info += _path + "\n" for x, fileName in enumerate(files): info += fileName if uni.getBoolValue("isAppendFileSizeToFileTree") or uni.getBoolValue("isAppendLastModifiedToFileTree"): details = getDetails(files[x]) if details is not None: info += " ( " if uni.getBoolValue("isAppendFileSizeToFileTree"): info += Organizer.getCorrectedFileSize(details[stat.ST_SIZE]) if uni.getBoolValue("isAppendLastModifiedToFileTree"): info += ", " if uni.getBoolValue("isAppendLastModifiedToFileTree"): info += str(translate("FileUtils", "Last Modified : ")) + Organizer.getCorrectedTime( details[stat.ST_MTIME]) info += " )" else: info += " ( " + str(translate("FileUtils", "inaccessible")) + " ) " info += "\n" info = str(info) if _outputTarget == "return": return info elif _outputTarget == "file": fileExt = None formatTypeName = None if _outputType == "html": if _extInfo != "no": strHeader = ("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \n" + "\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\"> \n" + "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"tr\" lang=\"tr\" dir=\"ltr\"> \n" + "<head> \n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" /> \n</head> \n<body> \n") strFooter = " \n</body> \n</html>" info = strHeader + info + strFooter formatTypeName = translate("FileUtils", "HTML") fileExt = "html" elif _outputType == "plainText": formatTypeName = translate("FileUtils", "Plain Text") fileExt = "txt" filePath = Dialogs.getSaveFileName(translate("FileUtils", "Save As"), joinPath(userDirectoryPath, getBaseName(_path) + "." + fileExt), formatTypeName + " (*." + fileExt + ")", 2) if filePath is not None: if _outputType == "html" and filePath[-5:] != ".html": filePath += ".html" elif _outputType == "plainText" and filePath[-4:] != ".txt": filePath += ".txt" writeToFile(filePath, info) Dialogs.show(translate("FileUtils", "File Tree Created"), str(translate("FileUtils", "File tree created in file: \"%s\".")) % Organizer.getLink( filePath)) elif _outputTarget == "dialog": dDialog = MDialog(getMainWindow()) if isActivePyKDE4: dDialog.setButtons(MDialog.NoDefault) dDialog.setWindowTitle(translate("FileUtils", "File Tree")) mainPanel = MWidget(dDialog) vblMain = MVBoxLayout(mainPanel) if _outputType == "html": QtWebKit = getMyObject("QtWebKit") wvWeb = QtWebKit.QWebView() wvWeb.setHtml(str(info)) vblMain.addWidget(wvWeb) elif _outputType == "plainText": teContent = MTextEdit() teContent.setPlainText(str(info)) vblMain.addWidget(teContent) pbtnClose = MPushButton(translate("FileUtils", "OK")) MObject.connect(pbtnClose, SIGNAL("clicked()"), dDialog.close) vblMain.addWidget(pbtnClose) if isActivePyKDE4: dDialog.setMainWidget(mainPanel) else: dDialog.setLayout(vblMain) dDialog.setMinimumWidth(600) dDialog.setMinimumHeight(400) dDialog.show() elif _outputTarget == "clipboard": MApplication.clipboard().setText(str(info)) def fixToSize(_path, _size, _clearFrom="head"): if isFile(_path): while getSize(_path) > _size: if _clearFrom == "head": try: contents = readFromFile(_path)[500:] except: try: contents = readFromFile(_path)[200:] except: contents = readFromFile(_path)[20:] else: try: contents = readFromFile(_path)[:-500] except: try: contents = readFromFile(_path)[:-200] except: contents = readFromFile(_path)[:-20] writeToFile(_path, contents) def getHashDigest(_filePath, _hashType="MD5"): import hashlib m = None if _hashType == "MD5": m = hashlib.md5() elif _hashType == "SHA1": m = hashlib.sha1() elif _hashType == "SHA224": m = hashlib.sha224() elif _hashType == "SHA256": m = hashlib.sha256() elif _hashType == "SHA384": m = hashlib.sha384() elif _hashType == "SHA512": m = hashlib.sha512() m.update(readFromBinaryFile(_filePath)) return m.hexdigest() def createHashDigestFile(_filePath, _digestFilePath=None, _hashType="MD5", _isAddFileExtension=True, _digestContent=None): if _digestContent is None: _digestContent = getHashDigest(_filePath, _hashType) fileExtension = "" if _isAddFileExtension: fileExtension = _hashType.lower() if _digestFilePath is None: _digestFilePath = _filePath writeToFile(_digestFilePath + fileExtension, _digestContent) return True def checkSizeOfDeletedFiles(): pathOfDeletedFilesAndDirectories = uni.MySettings["pathOfDeletedFilesAndDirectories"] pathOfDeletedFilesAndDirectories = checkSource(pathOfDeletedFilesAndDirectories, "directory", False) if pathOfDeletedFilesAndDirectories is not None: deletedDirectorySize = getDirectorySize(pathOfDeletedFilesAndDirectories) if deletedDirectorySize > (int(uni.MySettings["maxDeletedDirectorySize"]) * 1024 * 1024): answer = Dialogs.askSpecial(translate("FileUtils", "Size Of Directory Of Deleted Is Over"), str(translate("FileUtils", "Size of directory of deleted is over. You can check and remove them. <br> Directory Of Deleted : \"%s\" ( %s )")) % ( Organizer.getLink(pathOfDeletedFilesAndDirectories), Organizer.getCorrectedFileSize(deletedDirectorySize)), translate("FileUtils", "Open With Default File Manager"), translate("FileUtils", "Close"), translate("FileUtils", "Remove All Files")) if answer == translate("FileUtils", "Open With Default File Manager"): from Core import Execute Execute.openWith([getRealDirName(pathOfDeletedFilesAndDirectories)]) if answer == translate("FileUtils", "Remove All Files"): uni.MySettings["isDontDeleteFileAndDirectory"] = "false" removeDir(pathOfDeletedFilesAndDirectories) uni.MySettings["isDontDeleteFileAndDirectory"] = "true" Dialogs.show(translate("FileUtils", "Directory Of Deleted Has Been Removed"), translate("FileUtils", "Directory of deleted has been removed successfully."))
codeparrot/github-code-clean
# -*- coding: utf-8 -*- ################################ ######## Red - Discord bot ##### ################################ # made by Twentysix # # import discord import logging import time import datetime import requests import aiohttp import traceback import re import youtube_dl import os import asyncio import glob from os import path from random import choice, randint, shuffle import dataIO #IO settings, proverbs, etc import economy #Credits import youtubeparser from sys import modules #settings = {"PREFIX" : "!"} #prevents boot error def loadHelp(): global help, audio_help, meme_help, admin_help, trivia_help help = """**Commands list:** {0}flip - Flip a coin {0}rps [rock or paper o scissors] - Play rock paper scissors {0}proverb {0}choose option1 or option2 or option3 (...) - Random choice {0}8 [question] - Ask 8 ball {0}sw - Start/stop the stopwatch {0}avatar [name or mention] - Shows user's avatar {0}trivia start - Start a trivia session {0}trivia stop - Stop a trivia session {0}twitch [stream] - Check if stream is online {0}twitchalert [stream] - Whenever the stream is online the bot will send an alert in the channel (admin only) {0}stoptwitchalert [stream] - Stop sending alerts about the specified stream in the channel (admin only) {0}roll [number] - Random number between 0 and [number] {0}gif [text] - GIF search {0}imdb - Retrieves a movie's information from IMDB using its title {0}urban [text] - Search definitions in the urban dictionary {0}meme [ID;Text1;Text2] - Create a meme {0}imdb [search terms] - Search on IMDB {0}customcommands - Custom commands' list {0}addcom [command] [text] - Add a custom command {0}editcom [command] [text] - Edit a custom command {0}delcom [command] - Delete a custom command {0}meme help - Memes help {0}audio help - Audio related commands {0}economy - Economy explanation, if available {0}trivia - Trivia commands and lists """.format(settings["PREFIX"]) audio_help = """ **General audio help commands:** {0}next or {0}skip - Next song {0}prev - Previous song {0}pause - Pause song {0}resume - Resume song {0}repeat or {0}replay - Replay current song {0}title or {0}song - Current song's title + link {0}youtube [link] - Play a youtube video in a voice channel {0}sing - Make Red sing {0}stop - Stop any voice channel activity {0}volume [0-1] - Sets the volume {0}downloadmode - Disables/enables download mode (admin only) **Playlist commands:** {0}play [playlist_name] - Play chosen playlist {0}playlists - Playlists' list {0}shuffle - Mix music list {0}addplaylist [name] [link] - Add a youtube playlist. Link format example: https://www.youtube.com/playlist?list=PLe8jmEHFkvsaDOOWcREvkgFoj6MD0pXXX {0}delplaylist [name] - Delete a youtube playlist. Limited to author and admins. {0}getplaylist - Receive the current playlist through DM. This also works with favorites. **Local commands:** {0}local [playlist_name] - Play chosen local playlist {0}locallist or {0}local or {0}locals - Local playlists' list **Favorites:** {0}addfavorite - Add song to your favorites {0}delfavorite - Remove song from your favorites {0}playfavorites - Play your favorites **You can submit your own playlist by doing the following:** 1) Make a txt file. Name must be only letters, numbers and underscores. It will be your playlist's name, so choose wisely. 2) One youtube link each line. 3) Send me the txt. If any line is incorrect I will reject it. 4) Listen to it with {0}play [playlist_name]! """.format(settings["PREFIX"]) meme_help = """ Usage example: One-Does-Not-Simply Template ID: 61579 {0}meme 61579;Test;Test Memes list: ID Name 61579 One Does Not Simply 438680 Batman Slapping Robin 61532 The Most Interesting Man In The World 101470 Ancient Aliens 61520 Futurama Fry 347390 X, X Everywhere 5496396 Leonardo Dicaprio Cheers 61539 First World Problems 61546 Brace Yourselves X is Coming 16464531 But Thats None Of My Business 61582 Creepy Condescending Wonka 61585 Bad Luck Brian 563423 That Would Be Great 61544 Success Kid 405658 Grumpy Cat 101288 Third World Skeptical Kid 8072285 Doge 100947 Matrix Morpheus 1509839 Captain Picard Facepalm 61533 X All The Y 1035805 Boardroom Meeting Suggestion 245898 Picard Wtf 21735 The Rock Driving 259680 Am I The Only One Around Here 14230520 Black Girl Wat 40945639 Dr Evil Laser 235589 Evil Toddler 61580 Too Damn High 61516 Philosoraptor 6235864 Finding Neverland 9440985 Face You Make Robert Downey Jr 101287 Third World Success Kid 100955 Confession Bear 444501 The lie detector determined that was a lie. The fact that you X determined that was a lie. Maury Povich. 97984 Disaster Girl 442575 Aint Nobody Got Time For That 109765 Ill Just Wait Here 124212 Say That Again I Dare You 28251713 Oprah You Get A 61556 Grandma Finds The Internet 101440 10 Guy 101711 Skeptical Baby 101716 Yo Dawg Heard You 101511 Dont You Squidward For more memes: `https://imgflip.com/memetemplates` Choose a meme, click on "Blank Template" then add the ID """.format(settings["PREFIX"]) admin_help = """ **Admin commands:** {0}addwords [word1 word2 (...)] [phrase/with/many/words] - Add words to message filter {0}removewords [word1 word2 (...)] [phrase/with/many/words] - Remove words from message filter {0}addregex [regex] - Add regular expression to message filter {0}removeregex [regex] - Remove regular expression from message filter {0}shutdown - Shutdown the bot {0}join [invite] - Join another server {0}leaveserver - Leave server {0}shush - Ignore the current channel {0}talk - Stop ignoring the current channel {0}reload - Reload most files. Useful in case of manual edits {0}name [name] - Change the bot's name {0}cleanup [number] - Delete the last [number] messages {0}cleanup [name/mention] [number] - Delete the last [number] of messages by [name] {0}blacklist [name/mention] - Add user to Red's blacklist {0}forgive [name/mention] - Removes user from Red's blacklist {0}setting [setting] [value] - Modify setting """.format(settings["PREFIX"]) trivia_help = """ **Trivia commands:** {0}trivia - Trivia questions lists and help {0}trivia [name] - Starts trivia session with specified list {0}trivia random - Starts trivia session with random list {0}trivia stop - Stop trivia session """.format(settings["PREFIX"]) youtube_dl_options = { 'format': 'bestaudio/best', 'extractaudio': True, 'audioformat': "mp3", 'outtmpl': '%(id)s', 'noplaylist': True, 'nocheckcertificate': True, 'ignoreerrors': True, 'quiet': True, 'no_warnings': True, 'outtmpl': "cache/%(id)s"} client = discord.Client() if not discord.opus.is_loaded(): discord.opus.load_opus('libopus-0.dll') @client.async_event async def on_message(message): global trivia_sessions p = settings["PREFIX"] await gameSwitcher.changeGame() if message.author.id in blacklisted_users and not isMemberAdmin(message): return False if message.channel.is_private and message.attachments != []: await transferPlaylist(message) if not message.channel.is_private and message.author.id != client.user.id: if settings["FILTER"] and not isMemberAdmin(message): if await checkFilter(message) or await checkRegex(message): return False #exits without checking for commands if message.channel.id in shush_list and message.content == p + "talk": await talk(message) if message.channel.id not in shush_list: if message.content == client.user.name.upper() or message.content == client.user.name.upper() + "?": await client.send_message(message.channel, "`" + choice(greetings_caps) + "`") elif message.content.lower() == client.user.name.lower() + "?": await client.send_message(message.channel, "`" + choice(greetings) + "`") elif message.content == client.user.mention + " ?" or message.content == client.user.mention + "?": await client.send_message(message.channel, "`" + choice(greetings) + "`") elif message.content == p + "flip": await client.send_message(message.channel, "*flips a coin and... " + choice(["HEADS!*", "TAILS!*"])) elif message.content.startswith(p + "rps"): await rpsgame(message) elif message.content == p + "proverb": await client.send_message(message.channel, "`" + choice(proverbs) + "`") elif message.content == p + "help": await client.send_message(message.author, help) await client.send_message(message.channel, "{} `Check your DMs for the command list.`".format(message.author.mention)) elif message.content.startswith(p + 'choose'): await randomchoice(message) elif message.content.startswith(p + '8 ') and message.content.endswith("?") and len(message.content) > 5: await client.send_message(message.channel, "{}: ".format(message.author.mention) + "`" + choice(ball) + "`") elif message.content.startswith(p + 'roll'): await roll(message) elif message.content.startswith(p + 'addcom'): await addcom(message) elif message.content.startswith(p + 'editcom'): await editcom(message) elif message.content.startswith(p + 'delcom'): await delcom(message) elif message.content == p + "customcommands": await listCustomCommands(message) elif message.content.startswith(p + 'sw'): await stopwatch(message) elif message.content.startswith(p + 'id'): await client.send_message(message.channel, "{} `Your id is {}`".format(message.author.mention, message.author.id)) elif message.content.startswith(p + 'twitchalert'): await addTwitchAlert(message) elif message.content.startswith(p + 'stoptwitchalert'): await removeTwitchAlert(message) elif message.content.startswith(p + 'twitch'): await twitchCheck(message) elif message.content.startswith(p + 'image'): #image(message) pass elif message.content.startswith(p + 'gif'): await gif(message) elif message.content.startswith(p + 'imdb'): await imdb(message) elif message.content.startswith(p + 'urban'): await urban(message) elif message.content.startswith(p + 'uptime'): await uptime(message) elif message.content.startswith(p + 'avatar'): await avatar(message) elif message.content == p + 'meme help' or message.content == p + 'memes': await client.send_message(message.author, meme_help) await client.send_message(message.channel, "{} `Check your DMs for " + p +"meme help.`".format(message.author.mention)) elif message.content.startswith (p + 'meme'): await memes(message) elif message.content.startswith (p + 'lmgtfy'): await lmgtfy(message) ################## music ####################### elif message.content == p + "sing": await playPlaylist(message, sing=True) elif message.content.startswith(p + 'playyoutube'): await playVideo(message) elif message.content.startswith(p + 'play '): await playPlaylist(message) elif message.content.startswith(p + 'local '): await playLocal(message) elif message.content == p + "local" or message.content == p + "locallist" or message.content == p + "locals": await listLocal(message) await client.send_message(message.channel, "{} `Check your DMs for the local playlists list.`".format(message.author.mention)) elif message.content == p + "stop": await leaveVoice() elif message.content == p + "playlist" or message.content == p + "playlists": await listPlaylists(message) await client.send_message(message.channel, "{} `Check your DMs for the playlists list.`".format(message.author.mention)) elif message.content == p + "skip" or message.content == p + "next": if currentPlaylist: currentPlaylist.nextSong(currentPlaylist.getNextSong()) elif message.content == p + "prev" or message.content == p + "previous": if currentPlaylist: currentPlaylist.nextSong(currentPlaylist.getPreviousSong()) elif message.content == p + "repeat" or message.content == p + "replay": if currentPlaylist: currentPlaylist.nextSong(currentPlaylist.current) elif message.content == p + "pause": if currentPlaylist: currentPlaylist.pause() elif message.content == p + "resume": if currentPlaylist: currentPlaylist.resume() elif message.content == p + "shuffle": if currentPlaylist: currentPlaylist.shuffle() elif message.content == p + "song" or message.content == p + "title" : if currentPlaylist: await getSongTitle(message) elif message.content == p + "audio help": await client.send_message(message.author, audio_help) await client.send_message(message.channel, "{} `Check your DMs for the audio help.`".format(message.author.mention)) elif message.content.startswith(p + "addplaylist"): await addPlaylist(message) elif message.content.startswith(p + "delplaylist"): await delPlaylist(message) elif message.content == p + "addfavorite": await addToFavorites(message) elif message.content == p + "delfavorite": await removeFromFavorites(message) elif message.content == p + "playfavorites": await playFavorites(message) elif message.content == p + "getplaylist": await sendPlaylist(message) elif message.content.startswith(p + "volume"): await setVolume(message) elif message.content == p + "downloadmode": await downloadMode(message) elif message.content == p + "endpoll": await endPoll(message) elif message.content.startswith(p + "poll"): await startPoll(message) ################################################ elif message.content == p + "trivia": await triviaList(message) elif message.content.startswith(p + "trivia"): if checkAuth("Trivia", message, settings): if message.content == p + "trivia stop": if getTriviabyChannel(message.channel): await getTriviabyChannel(message.channel).endGame() await client.send_message(message.channel, "`Trivia stopped.`") else: await client.send_message(message.channel, "`There's no trivia session ongoing in this channel.`") elif not getTriviabyChannel(message.channel): t = Trivia(message) trivia_sessions.append(t) await t.loadQuestions(message.content) else: await client.send_message(message.channel, "`A trivia session is already ongoing in this channel.`") else: await client.send_message(message.channel, "`Trivia is currently admin-only.`") ######## Admin commands ####################### elif message.content.startswith(p + 'addwords'): await addBadWords(message) elif message.content.startswith(p + 'removewords'): await removeBadWords(message) elif message.content.startswith(p + 'addregex ') and len(message.content) > 11: await addRegex(message) elif message.content.startswith(p + 'removeregex ') and len(message.content) > 14: await removeRegex(message) elif message.content == p + "shutdown": await shutdown(message) elif message.content.startswith(p + 'join'): await join(message) elif message.content == p + "leaveserver": await leave(message) elif message.content == p + "shush": await shush(message) elif message.content == p + "talk": #prevents !talk custom command pass elif message.content == p + "reload": await reloadSettings(message) elif message.content.startswith(p + "name"): await changeName(message) elif message.content.startswith(p + "cleanup"): await cleanup(message) elif message.content == p + "admin help": if isMemberAdmin(message): await client.send_message(message.author, admin_help) else: await client.send_message(message.channel, "`Admin status required.`") elif message.content.startswith(p + "debug"): await debug(message) elif message.content.startswith(p + "exec"): await execFunc(message) elif message.content.startswith(p + "blacklist"): await blacklist(message, "add") elif message.content.startswith(p + "forgive"): await blacklist(message, "remove") elif message.content.startswith(p + "setting"): await modifySettings(message) ################################### elif getTriviabyChannel(message.channel): #check if trivia is ongoing in the channel trvsession = getTriviabyChannel(message.channel) await trvsession.checkAnswer(message) elif "economy" in modules: await economy.checkCommands(message) if getPollByChannel(message): getPollByChannel(message).checkAnswer(message) if message.content.startswith(p) and len(message.content) > 2 and settings["CUSTOMCOMMANDS"]: await customCommand(message) @client.async_event async def on_ready(): logger.info("I'm online " + "(" + client.user.id + ")") await gameSwitcher.changeGame(now=True) # cns = threading.Thread(target=console, args=[]) # cns.start() # console, WIP @client.async_event def on_message_delete(message): # WIP. Need to check for permissions #await client.send_message(message.channel, "{} `I have deleted your message.`".format(message.author.mention)) pass @client.async_event async def on_message_edit(before, message): if message.author.id != client.user.id and settings["FILTER"] and not isMemberAdmin(message) and not message.channel.is_private: await checkFilter(message) await checkRegex(message) def loggerSetup(): #api wrapper logger = logging.getLogger('discord') logger.setLevel(logging.WARNING) handler = logging.FileHandler(filename='wrapper.log', encoding='utf-8', mode='a') handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s', datefmt="[%d/%m/%Y %H:%M]")) logger.addHandler(handler) #Red logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s', datefmt="[%d/%m/%Y %H:%M]")) logger.addHandler(handler) file_handler = logging.FileHandler(filename="red.log", mode='a') file_formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s', datefmt="[%d/%m/%Y %H:%M]") file_handler.setFormatter(file_formatter) logger.addHandler(file_handler) return logger class Trivia(): def __init__(self, message): self.gaveAnswer = ["I know this one! {}!", "Easy: {}.", "Oh really? It's {} of course."] self.currentQ = None # {"QUESTION" : "String", "ANSWERS" : []} self.questionList = "" self.channel = message.channel logger.info("Trivia started in channel " + self.channel.id) self.scoreList = {} self.status = None self.timer = None self.count = 0 async def loadQuestions(self, msg): msg = msg.split(" ") if len(msg) == 2: _, qlist = msg if qlist == "random": chosenList = choice(glob.glob("trivia/*.txt")) self.questionList = self.loadList(chosenList) self.status = "new question" self.timeout = time.perf_counter() if self.questionList: await self.newQuestion() else: if os.path.isfile("trivia/" + qlist + ".txt"): self.questionList = self.loadList("trivia/" + qlist + ".txt") self.status = "new question" self.timeout = time.perf_counter() if self.questionList: await self.newQuestion() else: await client.send_message(self.channel, "`There is no list with that name.`") await self.stopTrivia() else: await client.send_message(self.channel, "`" + settings["PREFIX"] + "trivia [list name]`") async def stopTrivia(self): global trivia_sessions self.status = "stop" trivia_sessions.remove(self) logger.info("Trivia stopped in channel " + self.channel.id) async def endGame(self): global trivia_sessions self.status = "stop" if self.scoreList: await self.sendTable() trivia_sessions.remove(self) logger.info("Trivia stopped in channel " + self.channel.id) def loadList(self, qlist): with open(qlist, "r", encoding="utf-8") as f: qlist = f.readlines() parsedList = [] for line in qlist: if "`" in line and len(line) > 4: line = line.replace("\n", "") line = line.split("`") question = line[0] answers = [] for l in line[1:]: answers.append(l.lower()) if len(line) >= 2: line = {"QUESTION" : question, "ANSWERS": answers} #string, list parsedList.append(line) if parsedList != []: return parsedList else: self.stopTrivia() return None async def newQuestion(self): for score in self.scoreList.values(): if score == settings["TRIVIA_MAX_SCORE"]: await self.endGame() return True if self.questionList == []: await self.endGame() return True self.currentQ = choice(self.questionList) self.questionList.remove(self.currentQ) self.status = "waiting for answer" self.count += 1 self.timer = int(time.perf_counter()) await client.send_message(self.channel, "**Question number {}!**\n\n{}".format(str(self.count), self.currentQ["QUESTION"])) while self.status != "correct answer" and abs(self.timer - int(time.perf_counter())) <= settings["TRIVIA_DELAY"]: if abs(self.timeout - int(time.perf_counter())) >= settings["TRIVIA_TIMEOUT"]: await client.send_message(self.channel, "Guys...? Well, I guess I'll stop then.") await self.stopTrivia() return True await asyncio.sleep(1) #Waiting for an answer or for the time limit if self.status == "correct answer": self.status = "new question" await asyncio.sleep(3) if not self.status == "stop": await self.newQuestion() elif self.status == "stop": return True else: msg = choice(self.gaveAnswer).format(self.currentQ["ANSWERS"][0]) if settings["TRIVIA_BOT_PLAYS"]: msg += " **+1** for me!" self.addPoint(client.user.name) self.currentQ["ANSWERS"] = [] await client.send_message(self.channel, msg) await client.send_typing(self.channel) await asyncio.sleep(3) if not self.status == "stop": await self.newQuestion() async def sendTable(self): self.scoreList = sorted(self.scoreList.items(), reverse=True, key=lambda x: x[1]) # orders score from lower to higher t = "```Scores: \n\n" for score in self.scoreList: t += score[0] # name t += "\t" t += str(score[1]) # score t += "\n" t += "```" await client.send_message(self.channel, t) async def checkAnswer(self, message): self.timeout = time.perf_counter() for answer in self.currentQ["ANSWERS"]: if answer in message.content.lower(): self.currentQ["ANSWERS"] = [] self.status = "correct answer" self.addPoint(message.author.name) await client.send_message(self.channel, "You got it {}! **+1** to you!".format(message.author.name)) await client.send_typing(self.channel) return True def addPoint(self, user): if user in self.scoreList: self.scoreList[user] += 1 else: self.scoreList[user] = 1 def getTriviaQuestion(self): q = choice(list(trivia_questions.keys())) return q, trivia_questions[q] # question, answer class botPlays(): def __init__(self): self.games = dataIO.fileIO("json/games.json", "load") self.lastChanged = int(time.perf_counter()) self.delay = 300 async def changeGame(self, now=False): if abs(self.lastChanged - int(time.perf_counter())) >= self.delay or now: self.lastChanged = int(time.perf_counter()) await client.change_status(discord.Game(name=choice(self.games))) class Playlist(): def __init__(self, filename=None): #a playlist with a single song is just there to make !addfavorite work with !youtube command self.filename = filename self.current = 0 self.stop = False self.lastAction = 999 self.currentTitle = "" self.type = filename["type"] if filename["type"] == "playlist": self.playlist = dataIO.fileIO("playlists/" + filename["filename"] + ".txt", "load")["playlist"] elif filename["type"] == "favorites": self.playlist = dataIO.fileIO("favorites/" + filename["filename"] + ".txt", "load") elif filename["type"] == "local": self.playlist = filename["filename"] elif filename["type"] == "singleSong": self.playlist = [filename["filename"]] self.playSingleSong(self.playlist[0]) else: raise("Invalid playlist call.") if filename["type"] != "singleSong": self.nextSong(0) def nextSong(self, nextTrack, lastError=False): global musicPlayer if not self.passedTime() < 1 and not self.stop: #direct control if musicPlayer: musicPlayer.stop() self.lastAction = int(time.perf_counter()) try: if isPlaylistValid([self.playlist[nextTrack]]): #Checks if it's a valid youtube link if settings["DOWNLOADMODE"]: path = self.getVideo(self.playlist[nextTrack]) try: logger.info("Starting track...") musicPlayer = client.voice.create_ffmpeg_player("cache/" + path, options='''-filter:a "volume={}"'''.format(settings["VOLUME"])) musicPlayer.start() except: logger.warning("Something went wrong with track " + self.playlist[self.current]) if not lastError: #prevents error loop self.lastAction = 999 self.nextSong(self.getNextSong(), lastError=True) else: #Stream mode. Buggy. musicPlayer = client.voice.create_ytdl_player(self.playlist[nextTrack], options=youtube_dl_options) musicPlayer.start() else: # must be a local playlist then musicPlayer = client.voice.create_ffmpeg_player(self.playlist[nextTrack], options='''-filter:a "volume={}"'''.format(settings["VOLUME"])) musicPlayer.start() except Exception as e: logger.warning("Something went wrong with track " + self.playlist[self.current]) if not lastError: #prevents error loop self.lastAction = 999 self.nextSong(self.getNextSong(), lastError=True) def getVideo(self, url): try: yt = youtube_dl.YoutubeDL(youtube_dl_options) v = yt.extract_info(url, download=False) if not os.path.isfile("cache/" + v["id"]): logger.info("Track not in cache, downloading...") v = yt.extract_info(url, download=True) self.currentTitle = v["title"] return v["id"] except Exception as e: logger.error(e) return False def playSingleSong(self, url): global musicPlayer if settings["DOWNLOADMODE"]: v = self.getVideo(url) if musicPlayer: if musicPlayer.is_playing(): musicPlayer.stop() if v: musicPlayer = client.voice.create_ffmpeg_player("cache/" + v, options='''-filter:a "volume={}"'''.format(settings["VOLUME"])) musicPlayer.start() else: if musicPlayer: if musicPlayer.is_playing(): musicPlayer.stop() musicPlayer = client.voice.create_ytdl_player(self.playlist[0], options=youtube_dl_options) musicPlayer.start() async def songSwitcher(self): while not self.stop: if musicPlayer.is_done() and not self.stop: self.nextSong(self.getNextSong()) await asyncio.sleep(0.5) def passedTime(self): return abs(self.lastAction - int(time.perf_counter())) def getPreviousSong(self): try: song = self.playlist[self.current-1] self.current -= 1 return self.current except: #if the current song was the first song, returns the last in the playlist song = self.playlist[len(self.current)-1] self.current -= 1 return self.current def getNextSong(self): try: song = self.playlist[self.current+1] self.current += 1 return self.current except: #if the current song was the last song, returns the first in the playlist song = self.playlist[0] self.current = 0 return self.current def pause(self): if musicPlayer.is_playing() and not self.stop: musicPlayer.pause() def resume(self): if not self.stop: musicPlayer.resume() def shuffle(self): if not self.stop: shuffle(self.playlist) class Poll(): def __init__(self, message): self.channel = message.channel self.author = message.author.id msg = message.content[6:] msg = msg.split(";") if len(msg) < 2: # Needs at least one question and 2 choices self.valid = False return None else: self.valid = True self.already_voted = [] self.question = msg[0] msg.remove(self.question) self.answers = {} i = 1 for answer in msg: # {id : {answer, votes}} self.answers[i] = {"ANSWER" : answer, "VOTES" : 0} i += 1 async def start(self): msg = "**POLL STARTED!**\n\n{}\n\n".format(self.question) for id, data in self.answers.items(): msg += "{}. *{}*\n".format(id, data["ANSWER"]) msg += "\nType the number to vote!" await client.send_message(self.channel, msg) await asyncio.sleep(settings["POLL_DURATION"]) if self.valid: await self.endPoll() async def endPoll(self): global poll_sessions self.valid = False msg = "**POLL ENDED!**\n\n{}\n\n".format(self.question) for data in self.answers.values(): msg += "*{}* - {} votes\n".format(data["ANSWER"], str(data["VOTES"])) await client.send_message(self.channel, msg) poll_sessions.remove(self) def checkAnswer(self, message): try: i = int(message.content) if i in self.answers.keys(): if message.author.id not in self.already_voted: data = self.answers[i] data["VOTES"] += 1 self.answers[i] = data self.already_voted.append(message.author.id) except ValueError: pass async def startPoll(message): global poll_sessions if not getPollByChannel(message): p = Poll(message) if p.valid: poll_sessions.append(p) await p.start() else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "poll question;option1;option2 (...)`") else: await client.send_message(message.channel, "`A poll is already ongoing in this channel.`") async def endPoll(message): global poll_sessions if getPollByChannel(message): p = getPollByChannel(message) if p.author == message.author.id or isMemberAdmin(message): await getPollByChannel(message).endPoll() else: await client.send_message(message.channel, "`Only admins and the author can stop the poll.`") else: await client.send_message(message.channel, "`There's no poll ongoing in this channel.`") def getPollByChannel(message): for poll in poll_sessions: if poll.channel == message.channel: return poll return False async def addcom(message): if checkAuth("ModifyCommands", message, settings): msg = message.content.split() if len(msg) > 2: msg = message.content[8:] # removes !addcom newcmd = msg[:msg.find(" ")] # extracts custom command customtext = msg[msg.find(" ") + 1:] # extracts [text] if len(newcmd) > 1 and newcmd.find(" ") == -1: if not message.channel.server.id in commands: commands[message.channel.server.id] = {} cmdlist = commands[message.channel.server.id] if newcmd not in cmdlist: cmdlist[newcmd] = customtext commands[message.channel.server.id] = cmdlist dataIO.fileIO("json/commands.json", "save", commands) logger.info("Saved commands database.") await client.send_message(message.channel, "`Custom command successfully added.`") else: await client.send_message(message.channel, "`This command already exists. Use " + settings["PREFIX"] + "editcom [command] [text]`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "addcom [command] [text]`") else: await client.send_message(message.channel, "`You don't have permissions to edit custom commands.`") async def editcom(message): if checkAuth("ModifyCommands", message, settings): msg = message.content.split() if len(msg) > 2: msg = message.content[9:] # removes !editcom cmd = msg[:msg.find(" ")] # extracts custom command customtext = msg[msg.find(" ") + 1:] # extracts [text] if message.channel.server.id in commands: cmdlist = commands[message.channel.server.id] if cmd in cmdlist: cmdlist[cmd] = customtext commands[message.channel.server.id] = cmdlist dataIO.fileIO("json/commands.json", "save", commands) logger.info("Saved commands database.") await client.send_message(message.channel, "`Custom command successfully edited.`") else: await client.send_message(message.channel, "`That command doesn't exist. Use " + settings["PREFIX"] + "addcom [command] [text]`") else: await client.send_message(message.channel, "`There are no custom commands in this server. Use " + settings["PREFIX"] + "addcom [command] [text]`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "editcom [command] [text]`") else: await client.send_message(message.channel, "`You don't have permissions to edit custom commands.`") async def delcom(message): if checkAuth("ModifyCommands", message, settings): msg = message.content.split() if len(msg) == 2: if message.channel.server.id in commands: cmdlist = commands[message.channel.server.id] if msg[1] in cmdlist: cmdlist.pop(msg[1], None) commands[message.channel.server.id] = cmdlist dataIO.fileIO("json/commands.json", "save", commands) logger.info("Saved commands database.") await client.send_message(message.channel, "`Custom command successfully deleted.`") else: await client.send_message(message.channel, "`That command doesn't exist.`") else: await client.send_message(message.channel, "`There are no custom commands in this server. Use " + settings["PREFIX"] + "addcom [command] [text]`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "delcom [command]`") else: await client.send_message(message.channel, "`You don't have permissions to edit custom commands.`") async def listCustomCommands(message): msg = "Custom commands: \n\n```" if message.channel.server.id in commands: cmds = commands[message.channel.server.id].keys() if cmds: for i, d in enumerate(cmds): if i % 4 == 0 and i != 0: msg = msg + d + "\n" else: msg = msg + d + "\t" msg += "```" await client.send_message(message.author, msg) else: await client.send_message(message.author, "There are no custom commands.") else: await client.send_message(message.author, "There are no custom commands.") def checkAuth(cmd, message, settings): #checks if those settings are on. If they are, it checks if the user is a owner if cmd == "ModifyCommands": if settings["EDIT_CC_ADMIN_ONLY"]: if isMemberAdmin(message): return True else: return False else: return True elif cmd == "Trivia": if settings["TRIVIA_ADMIN_ONLY"]: if isMemberAdmin(message): return True else: return False else: return True else: logger.error("Invalid call to checkAuth") return False async def rpsgame(message): rps = {"rock" : ":moyai:", "paper": ":page_facing_up:", "scissors":":scissors:" } msg = message.content.lower().split(" ") if len(msg) == 2: _, userchoice = msg if userchoice in rps.keys(): botchoice = choice(list(rps.keys())) msgs = { "win": " You win {}!".format(message.author.mention), "square": " We're square {}!".format(message.author.mention), "lose": " You lose {}!".format(message.author.mention) } if userchoice == botchoice: await client.send_message(message.channel, rps[botchoice] + msgs["square"]) elif userchoice == "rock" and botchoice == "paper": await client.send_message(message.channel, rps[botchoice] + msgs["lose"]) elif userchoice == "rock" and botchoice == "scissors": await client.send_message(message.channel, rps[botchoice] + msgs["win"]) elif userchoice == "paper" and botchoice == "rock": await client.send_message(message.channel, rps[botchoice] + msgs["win"]) elif userchoice == "paper" and botchoice == "scissors": await client.send_message(message.channel, rps[botchoice] + msgs["lose"]) elif userchoice == "scissors" and botchoice == "rock": await client.send_message(message.channel, rps[botchoice] + msgs["lose"]) elif userchoice == "scissors" and botchoice == "paper": await client.send_message(message.channel, rps[botchoice] + msgs["win"]) else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "rps [rock or paper or scissors]`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "rps [rock or paper or scissors]`") async def randomchoice(message): sentences = ["Mmm... I think I'll choose ", "I choose ", "I prefer ", "This one is best: ", "This: "] msg = message.content[8:] # removes !choose msg = msg.split(" or ") if len(msg) == 1: await client.send_message(message.channel, "`" + settings["PREFIX"] + "choose option1 or option2 or option3 (...)`") elif len(msg) >= 2: await client.send_message(message.channel, "`" + choice(sentences) + choice(msg) + "`") else: await client.send_message(message.channel, "`The options must be at least two.`") async def stopwatch(message): global stopwatches if message.author.id in stopwatches: tmp = abs(stopwatches[message.author.id] - int(time.perf_counter())) tmp = str(datetime.timedelta(seconds=tmp)) await client.send_message(message.channel, "`Stopwatch stopped! Time: " + str(tmp) + " `") stopwatches.pop(message.author.id, None) else: stopwatches[message.author.id] = int(time.perf_counter()) await client.send_message(message.channel, "`Stopwatch started! Use " + settings["PREFIX"] + "sw to stop it.`") """ async def image(message): # API's dead. msg = message.content.split() if len(msg) > 1: if len(msg[1]) > 1 and len([msg[1]]) < 20: try: msg.remove(msg[0]) msg = "+".join(msg) search = "http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=" + msg + "&start=0" result = requests.get(search).json() url = result["responseData"]["results"][0]["url"] await client.send_message(message.channel, url) except: await client.send_message(message.channel, "Error.") else: await client.send_message(message.channel, "Invalid search.") else: await client.send_message(message.channel, "!image [text]") """ async def imdb(message): # Method added by BananaWaffles. msg = message.content.split() if apis["MYAPIFILMS_TOKEN"] == "TOKENHERE": await client.send_message(message.channel, "`This command wasn't configured properly. If you're the owner, edit json/apis.json`") return False if len(msg) > 1: if len(msg[1]) > 1 and len([msg[1]]) < 20: try: msg.remove(msg[0]) msg = "+".join(msg) search = "http://api.myapifilms.com/imdb/title?format=json&title=" + msg + "&token=" + apis["MYAPIFILMS_TOKEN"] async with aiohttp.get(search) as r: result = await r.json() title = result['data']['movies'][0]['title'] year = result['data']['movies'][0]['year'] rating = result['data']['movies'][0]['rating'] url = result['data']['movies'][0]['urlIMDB'] msg = "Title: " + title + " | Released on: " + year + " | IMDB Rating: " + rating + ".\n" + url await client.send_message(message.channel, msg) except: await client.send_message(message.channel, "`Error.`") else: await client.send_message(message.channel, "`Invalid search.`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "imdb [text]`") async def memes(message): msg = message.content[6:] msg = msg.split(";") if apis["IMGFLIP_USERNAME"] == "USERNAMEHERE" or apis["IMGFLIP_PASSWORD"] == "PASSWORDHERE": await client.send_message(message.channel, "`This command wasn't configured properly. If you're the owner, edit json/apis.json`") return False if len(msg) == 3: if len(msg[0]) > 1 and len([msg[1]]) < 20 and len([msg[2]]) < 20: try: search = "https://api.imgflip.com/caption_image?template_id=" + msg[0] + "&username=" + apis["IMGFLIP_USERNAME"] + "&password=" + apis["IMGFLIP_PASSWORD"] + "&text0=" + msg[1] + "&text1=" + msg[2] async with aiohttp.get(search) as r: result = await r.json() if result["data"] != []: url = result["data"]["url"] await client.send_message(message.channel, url) except: error = result["error_message"] await client.send_message(message.channel, error) else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "meme id;text1;text2 | " + settings["PREFIX"] + "meme help for full list`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "meme id;text1;text2 | " + settings["PREFIX"] + "meme help for full list`") async def urban(message): msg = message.content.split() if len(msg) > 1: if len(msg[1]) > 1 and len([msg[1]]) < 20: try: msg.remove(msg[0]) msg = "+".join(msg) search = "http://api.urbandictionary.com/v0/define?term=" + msg async with aiohttp.get(search) as r: result = await r.json() if result["list"] != []: definition = result['list'][0]['definition'] example = result['list'][0]['example'] await client.send_message(message.channel, "Definition: " + definition + "\n\n" + "Example: " + example ) else: await client.send_message(message.channel, "`Your search terms gave no results.`") except: await client.send_message(message.channel, "`Error.`") else: await client.send_message(message.channel, "`Invalid search.`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "urban [text]`") async def gif(message): msg = message.content.split() if len(msg) > 1: if len(msg[1]) > 1 and len([msg[1]]) < 20: try: msg.remove(msg[0]) msg = "+".join(msg) search = "http://api.giphy.com/v1/gifs/search?q=" + msg + "&api_key=dc6zaTOxFJmzC" async with aiohttp.get(search) as r: result = await r.json() if result["data"] != []: url = result["data"][0]["url"] await client.send_message(message.channel, url) else: await client.send_message(message.channel, "`Your search terms gave no results.`") except: await client.send_message(message.channel, "`Error.`") else: await client.send_message(message.channel, "`Invalid search.`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "gif [text]`") async def avatar(message): if message.mentions: m = message.mentions[0] await client.send_message(message.channel, "{}'s avatar: {}".format(m.name, m.avatar_url)) else: if len(message.content.split(" ")) >= 2: name = message.content[8:] member = discord.utils.get(message.server.members, name=name) if member != None: await client.send_message(message.channel, "{}'s avatar: {}".format(member.name, member.avatar_url)) else: await client.send_message(message.channel, "`User not found.`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "avatar [name or mention]`") async def lmgtfy(message): msg = message.content.split() if len(msg) >= 2: msg = "+".join(msg[1:]) await client.send_message(message.channel, "http://lmgtfy.com/?q=" + msg) else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "lmgtfy [search terms]`") def getTriviabyChannel(channel): for t in trivia_sessions: if t.channel == channel: return t return False async def roll(message): msg = message.content.split() if len(msg) == 2: if msg[1].isdigit(): msg[1] = int(msg[1]) if msg[1] < 99999 and msg[1] > 1: await client.send_message(message.channel, "{} :game_die: `{}` :game_die:".format(message.author.mention, str(randint(1, msg[1])))) else: await client.send_message(message.channel, "{} `A number between 1 and 99999, maybe? :)`".format(message.author.mention)) else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "roll [number]`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "roll [number]`") async def checkFilter(message): #WIP msg = message.content.lower() if message.server.id in badwords: for word in badwords[message.server.id]: if msg.find(word.lower()) != -1: if canDeleteMessages(message): await client.delete_message(message) logger.info("Message eliminated.") return True else: logger.info("Couldn't delete message. I need permissions.") return False return False async def checkRegex(message): #WIP msg = message.content #.lower()? if message.server.id in badwords_regex: for pattern in badwords_regex[message.server.id]: rr = re.search(pattern, msg, re.I | re.U) if rr != None: if canDeleteMessages(message): await client.delete_message(message) logger.info("Message eliminated. Regex: " + pattern) return True else: logger.info("Couldn't delete message. I need permissions.") return False return False async def twitchCheck(message): msg = message.content.split() if len(msg) == 2: try: url = "https://api.twitch.tv/kraken/streams/" + msg[1] async with aiohttp.get(url) as r: data = await r.json() if "error" in data: await client.send_message(message.channel, "{} `There is no streamer named {}`".format(message.author.mention, msg[1])) elif "stream" in data: if data["stream"] != None: await client.send_message(message.channel, "{} `{} is online!` {}".format(message.author.mention, msg[1], "http://www.twitch.tv/" + msg[1])) else: await client.send_message(message.channel, "{} `{} is offline.`".format(message.author.mention, msg[1])) else: await client.send_message(message.channel, "{} `There is no streamer named {}`".format(message.author.mention, msg[1])) except: await client.send_message(message.channel, "{} `Error.`".format(message.author.mention)) else: await client.send_message(message.channel, "{} `".format(message.author.mention) + settings["PREFIX"] + "twitch [name]`") async def triviaList(message): await client.send_message(message.author, trivia_help) msg = "**Available trivia lists:** \n\n```" lists = os.listdir("trivia/") if lists: clean_list = [] for txt in lists: if txt.endswith(".txt") and " " not in txt: txt = txt.replace(".txt", "") clean_list.append(txt) if clean_list: for i, d in enumerate(clean_list): if i % 4 == 0 and i != 0: msg = msg + d + "\n" else: msg = msg + d + "\t" msg += "```" await client.send_message(message.author, msg) else: await client.send_message(message.author, "There are no trivia lists available.") else: await client.send_message(message.author, "There are no trivia lists available.") async def uptime(message): up = abs(uptime_timer - int(time.perf_counter())) up = str(datetime.timedelta(seconds=up)) await client.send_message(message.channel, "`Uptime: {}`".format(up)) async def checkVoice(message): if not client.is_voice_connected(): if message.author.voice_channel: if message.author.voice_channel.permissions_for(message.server.me).connect: await client.join_voice_channel(message.author.voice_channel) else: await client.send_message(message.channel, "{} `I need permissions to join that channel.`".format(message.author.mention)) return False else: await client.send_message(message.channel, "{} `You need to join a voice channel first.`".format(message.author.mention)) return False return True async def playVideo(message): global musicPlayer, currentPlaylist toDelete = None if await checkVoice(message): pattern = "(?:youtube\.com\/watch\?v=)(.*)|(?:youtu.be/)(.*)" rr = re.search(pattern, message.content, re.I | re.U) if rr.group(1) != None: id = rr.group(1) elif rr.group(2) != None: id = rr.group(2) else: await client.send_message(message.channel, "{} `Invalid link.`".format(message.author.mention)) return False stopMusic() if settings["DOWNLOADMODE"]: toDelete = await client.send_message(message.channel, "`I'm in download mode. It might take a bit for me to start. I'll delete this message as soon as I'm ready.`".format(id, message.author.name)) data = {"filename" : 'https://www.youtube.com/watch?v=' + id, "type" : "singleSong"} currentPlaylist = Playlist(data) if canDeleteMessages(message): await client.send_message(message.channel, "`Playing youtube video {} requested by {}`".format(await youtubeparser.getTitle(currentPlaylist.playlist[currentPlaylist.current]), message.author.name)) await client.delete_message(message) if toDelete: await client.delete_message(toDelete) # currentPlaylist.playlist = ['https://www.youtube.com/watch?v=' + id] # musicPlayer = client.voice.create_ytdl_player('https://www.youtube.com/watch?v=' + id, options=youtube_dl_options) # musicPlayer.start() #!addfavorite compatibility stuff async def playPlaylist(message, sing=False): global musicPlayer, currentPlaylist p = settings["PREFIX"] msg = message.content toDelete = None if not sing: if msg != p + "play" or msg != "play ": if await checkVoice(message): msg = message.content[6:] if dataIO.fileIO("playlists/" + msg + ".txt", "check"): stopMusic() data = {"filename" : msg, "type" : "playlist"} if settings["DOWNLOADMODE"]: toDelete = await client.send_message(message.channel, "`I'm in download mode. It might take a bit for me to start and switch between tracks. I'll delete this message as soon as the current playlist stops.`".format(id, message.author.name)) currentPlaylist = Playlist(data) await asyncio.sleep(2) await currentPlaylist.songSwitcher() if toDelete: await client.delete_message(toDelete) else: await client.send_message(message.channel, "{} `That playlist doesn't exist.`".format(message.author.mention)) else: if await checkVoice(message): stopMusic() msg = ["*Oh Daisy..*"] playlist = ["https://www.youtube.com/watch?v=E7WQ1tdxSqI"] song = choice(playlist) data = {"filename" : song, "type" : "singleSong"} if settings["DOWNLOADMODE"]: toDelete = await client.send_message(message.channel, "`I'm in download mode. It might take a bit for me to start. I'll delete this message as soon as I'm ready.`".format(id, message.author.name)) currentPlaylist = Playlist(data) # currentPlaylist.playlist = [song] # musicPlayer = client.voice.create_ytdl_player(song, options=youtube_dl_options) # musicPlayer.start() if toDelete: await client.delete_message(toDelete) await client.send_message(message.channel, choice(msg)) async def playLocal(message): global currentPlaylist p = settings["PREFIX"] msg = message.content.split(" ") if await checkVoice(message): if len(msg) == 2: localplaylists = getLocalPlaylists() if localplaylists and ("/" not in msg[1] and "\\" not in msg[1]): if msg[1] in localplaylists: files = [] if glob.glob("localtracks/" + msg[1] + "/*.mp3"): files.extend(glob.glob("localtracks/" + msg[1] + "/*.mp3")) if glob.glob("localtracks/" + msg[1] + "/*.flac"): files.extend(glob.glob("localtracks/" + msg[1] + "/*.flac")) stopMusic() data = {"filename" : files, "type" : "local"} currentPlaylist = Playlist(data) await asyncio.sleep(2) await currentPlaylist.songSwitcher() else: await client.send_message(message.channel, "`There is no local playlist called {}. " + p + "local or " + p + "locallist to receive the list.`".format(msg[1])) else: await client.send_message(message.channel, "`There are no valid playlists in the localtracks folder.`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "local [playlist]`") def getLocalPlaylists(): dirs = [] files = os.listdir("localtracks/") for f in files: if os.path.isdir("localtracks/" + f) and " " not in f: if glob.glob("localtracks/" + f + "/*.mp3") != []: dirs.append(f) elif glob.glob("localtracks/" + f + "/*.flac") != []: dirs.append(f) if dirs != []: return dirs else: return False async def leaveVoice(): if client.is_voice_connected(): stopMusic() await client.voice.disconnect() async def listPlaylists(message): msg = "Available playlists: \n\n```" files = os.listdir("playlists/") if files: for i, f in enumerate(files): if f.endswith(".txt"): if i % 4 == 0 and i != 0: msg = msg + f.replace(".txt", "") + "\n" else: msg = msg + f.replace(".txt", "") + "\t" msg += "```" await client.send_message(message.author, msg) else: await client.send_message(message.author, "There are no playlists.") async def listLocal(message): msg = "Available local playlists: \n\n```" dirs = getLocalPlaylists() if dirs: for i, d in enumerate(dirs): if i % 4 == 0 and i != 0: msg = msg + d + "\n" else: msg = msg + d + "\t" msg += "```" await client.send_message(message.author, msg) else: await client.send_message(message.author, "There are no local playlists.") def stopMusic(): global musicPlayer, currentPlaylist if currentPlaylist != None: currentPlaylist.stop = True if musicPlayer != None: musicPlayer.stop() async def transferPlaylist(message): msg = message.attachments[0] if msg["filename"].endswith(".txt"): if not dataIO.fileIO("playlists/" + msg["filename"], "check"): #returns false if file already exists r = await aiohttp.get(msg["url"]) r = await r.text() data = r.replace("\r", "") data = data.split() if isPlaylistValid(data) and isPlaylistNameValid(msg["filename"].replace(".txt", "")): data = { "author" : message.author.id, "playlist": data} dataIO.fileIO("playlists/" + msg["filename"], "save", data) await client.send_message(message.channel, "`Playlist added. Name: {}`".format(msg["filename"].replace(".txt", ""))) else: await client.send_message(message.channel, "`Something is wrong with the playlist or its filename. Type " + settings["PREFIX"] + "audio help to read how to format it properly.`") else: await client.send_message(message.channel, "`A playlist with that name already exists. Change the filename and resubmit it.`") def isPlaylistValid(data): data = [y for y in data if y != ""] # removes all empty elements data = [y for y in data if y != "\n"] for link in data: pattern = "^(https:\/\/www\.youtube\.com\/watch\?v=...........*)|^(https:\/\/youtu.be\/...........*)|^(https:\/\/youtube\.com\/watch\?v=...........*)" rr = re.search(pattern, link, re.I | re.U) if rr == None: return False return True def isPlaylistNameValid(name): for l in name: if l.isdigit() or l.isalpha() or l == "_": pass else: return False return True def isPlaylistLinkValid(link): pattern = "^https:\/\/www.youtube.com\/playlist\?list=(.[^:/]*)" rr = re.search(pattern, link, re.I | re.U) if not rr == None: return rr.group(1) else: return False async def addPlaylist(message): msg = message.content.split(" ") if len(msg) == 3: _, name, link = msg if isPlaylistNameValid(name) and len(name) < 25 and isPlaylistLinkValid(link): if dataIO.fileIO("playlists/" + name + ".txt", "check"): await client.send_message(message.channel, "`A playlist with that name already exists.`") return False links = await youtubeparser.parsePlaylist(link) if links: data = { "author" : message.author.id, "playlist": links} dataIO.fileIO("playlists/" + name + ".txt", "save", data) await client.send_message(message.channel, "`Playlist added. Name: {}`".format(name)) else: await client.send_message(message.channel, "`Something went wrong. Either the link was incorrect or I was unable to retrieve the page.`") else: await client.send_message(message.channel, "`Something is wrong with the playlist's link or its filename. Remember, the name must be with only numbers, letters and underscores. Link must be this format: https://www.youtube.com/playlist?list=PLe8jmEHFkvsaDOOWcREvkgFoj6MD0pXXX`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "addplaylist [name] [link]`") async def delPlaylist(message): msg = message.content.split(" ") if len(msg) == 2: _, filename = msg if dataIO.fileIO("playlists/" + filename + ".txt", "check"): authorid = dataIO.fileIO("playlists/" + filename + ".txt", "load")["author"] if message.author.id == authorid or isMemberAdmin(message): os.remove("playlists/" + filename + ".txt") await client.send_message(message.channel, "`Playlist {} removed.`".format(filename)) else: await client.send_message(message.channel, "`Only the playlist's author and admins can do that.`") else: await client.send_message(message.channel, "`There is no playlist with that name.`") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "delplaylist [name]`") async def getSongTitle(message): title = await youtubeparser.getTitle(currentPlaylist.playlist[currentPlaylist.current]) if title: await client.send_message(message.channel, "`Current song: {}\n{}`".format(title, currentPlaylist.playlist[currentPlaylist.current])) else: await client.send_message(message.channel, "`I couldn't retrieve the current song's title.`") async def addToFavorites(message): if currentPlaylist: if dataIO.fileIO("favorites/" + message.author.id + ".txt", "check"): data = dataIO.fileIO("favorites/" + message.author.id + ".txt", "load") else: data = [] data.append(currentPlaylist.playlist[currentPlaylist.current]) dataIO.fileIO("favorites/" + message.author.id + ".txt", "save", data) await client.send_message(message.channel, "{} `This song has been added to your favorites.`".format(message.author.mention)) else: await client.send_message(message.channel, "{} `No song is being played`".format(message.author.mention)) async def removeFromFavorites(message): if currentPlaylist: if dataIO.fileIO("favorites/" + message.author.id + ".txt", "check"): data = dataIO.fileIO("favorites/" + message.author.id + ".txt", "load") if currentPlaylist.playlist[currentPlaylist.current] in data: data.remove(currentPlaylist.playlist[currentPlaylist.current]) dataIO.fileIO("favorites/" + message.author.id + ".txt", "save", data) await client.send_message(message.channel, "{} `This song has been removed from your favorites.`".format(message.author.mention)) else: await client.send_message(message.channel, "{} `This song isn't in your favorites.`".format(message.author.mention)) else: await client.send_message(message.channel, "{} `You don't have any favorites yet. Start adding them with " + settings["PREFIX"] + "addfavorite`".format(message.author.mention)) else: await client.send_message(message.channel, "{} `No song is being played`".format(message.author.mention)) async def playFavorites(message): global musicPlayer, currentPlaylist if await checkVoice(message): if dataIO.fileIO("favorites/" + message.author.id + ".txt", "check"): data = {"filename" : message.author.id, "type" : "favorites"} stopMusic() currentPlaylist = Playlist(data) await asyncio.sleep(2) await currentPlaylist.songSwitcher() else: await client.send_message(message.channel, "{} `You don't have any favorites yet. Start adding them with !addfavorite`".format(message.author.mention)) async def sendPlaylist(message): if currentPlaylist: msg = "Here's the current playlist:\n```" for track in currentPlaylist.playlist: msg += track msg += "\n" if len(msg) >= 1900: msg += "```" await client.send_message(message.author, msg) msg = "```" if msg != "```": msg += "```" await client.send_message(message.author, msg) async def setVolume(message): global settings p = settings["PREFIX"] msg = message.content if len(msg.split(" ")) == 2: msg = msg.split(" ") try: vol = float(msg[1]) if vol >= 0 and vol <= 1: settings["VOLUME"] = vol await(client.send_message(message.channel, "`Volume set. Next track will have the desired volume.`")) dataIO.fileIO("json/settings.json", "save", settings) else: await(client.send_message(message.channel, "`Volume must be between 0 and 1. Example: " + p + "volume 0.50`")) except: await(client.send_message(message.channel, "`Volume must be between 0 and 1. Example: " + p + "volume 0.15`")) else: await(client.send_message(message.channel, "`Volume must be between 0 and 1. Example: " + p + "volume 0.15`")) async def downloadMode(message): if isMemberAdmin(message): if settings["DOWNLOADMODE"]: settings["DOWNLOADMODE"] = False await(client.send_message(message.channel, "`Download mode disabled. This mode is unstable and tracks might interrupt. Also, the volume settings will not have any effect.`")) else: settings["DOWNLOADMODE"] = True await(client.send_message(message.channel, "`Download mode enabled.`")) dataIO.fileIO("json/settings.json", "save", settings) else: await(client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name))) ############## ADMIN COMMANDS ################### async def shutdown(message): if isMemberAdmin(message): await client.send_message(message.channel, "Daisy, Daisy, give me *your answer do...* ***Shutting down*** ") await client.logout() try: exit(1) except SystemExit: #clean exit logger.info("Shutting down as requested by " + message.author.id + "...") pass else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def join(message): if isMemberAdmin(message): msg = message.content.split() if len(msg) > 1: await client.accept_invite(msg[1]) else: logger.warning("Join: missing parameters") else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def leave(message): if isMemberAdmin(message): await client.send_message(message.channel, "`Bye.`") await client.leave_server(message.channel.server) else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def shush(message): global shush_list if isMemberAdmin(message): await client.send_message(message.channel, "`Ok, I'll ignore this channel.`") shush_list.append(message.channel.id) dataIO.fileIO("json/shushlist.json", "save", shush_list) logger.info("Saved silenced channels database.") else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def talk(message): if isMemberAdmin(message): if message.channel.id in shush_list: shush_list.remove(message.channel.id) dataIO.fileIO("json/shushlist.json", "save", shush_list) logger.info("Saved silenced channels database.") await client.send_message(message.channel, "`Aaand I'm back.`") else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def addBadWords(message): global badwords if isMemberAdmin(message): msg = message.content.split() if len(msg) >= 2: del msg[0] if not message.server.id in badwords: badwords[message.server.id] = [] for word in msg: if word.find("/") != -1: word = word.replace("/", " ") badwords[message.server.id].append(word) await client.send_message(message.channel, "`Updated banned words database.`") dataIO.fileIO("json/filter.json", "save", badwords) logger.info("Saved filter words.") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "addwords [word1] [word2] [phrase/with/many/words] (...)`") else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def removeBadWords(message): global badwords if isMemberAdmin(message): msg = message.content.split() if len(msg) >= 2: del msg[0] if message.server.id in badwords: for w in msg: try: if w.find("/") != -1: w = w.replace("/", " ") badwords[message.server.id].remove(w) except: pass await client.send_message(message.channel, "`Updated banned words database.`") dataIO.fileIO("json/filter.json", "save", badwords) logger.info("Saved filter words.") else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "removewords [word1] [word2] [phrase/with/many/words](...)`") else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def changeName(message): global settings if isMemberAdmin(message): msg = message.content.split() if len(msg) == 2: try: await client.edit_profile(settings["PASSWORD"], username=msg[1]) except Exception as e: logger.error(e) else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "name [new name]`") else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def addRegex(message): global badwords_regex if isMemberAdmin(message): msg = message.content msg = msg[10:] if not message.server.id in badwords_regex: badwords_regex[message.server.id] = [] badwords_regex[message.server.id].append(msg) await client.send_message(message.channel, "`Updated regex filter database.`") dataIO.fileIO("json/regex_filter.json", "save", badwords_regex) logger.info("Saved regex filter database.") else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def removeRegex(message): global badwords_regex if isMemberAdmin(message): msg = message.content msg = msg[13:] if message.server.id in badwords_regex: if msg in badwords_regex[message.server.id]: badwords_regex[message.server.id].remove(msg) await client.send_message(message.channel, "`Updated regex filter database.`") dataIO.fileIO("json/regex_filter.json", "save", badwords_regex) logger.info("Saved regex filter database.") else: await client.send_message(message.channel, "`No match.`") else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def reloadSettings(message): if isMemberAdmin(message): loadDataFromFiles(True) await client.send_message(message.channel, "`Settings and files reloaded.`") else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def cleanup(message): errorMsg = "`" + settings["PREFIX"] + "cleanup [number] " + settings["PREFIX"] + "cleanup [name/mention] [number]`" if isMemberAdmin(message): if canDeleteMessages(message): try: async for x in client.logs_from(message.channel, limit=1): pass except TypeError: logger.error("Your discord.py is outdated. Update it to use cleanup.") return False msg = message.content.split() if len(msg) == 2: if msg[1].isdigit(): n = int(msg[1]) async for x in client.logs_from(message.channel, limit=n+1): await client.delete_message(x) else: await client.send_message(message.channel, errorMsg) elif len(msg) == 3: _, name, limit = msg try: limit = int(limit) except: await client.send_message(message.channel, errorMsg) return False if message.mentions: m = message.mentions[0] else: m = discord.utils.get(message.server.members, name=name) if m and limit != 0: checksLeft = 5 await client.delete_message(message) while checksLeft != 0 and limit != 0: async for x in client.logs_from(message.channel, limit=100): if x.author == m and limit != 0: await client.delete_message(x) limit -= 1 checksLeft -= 1 else: await client.send_message(message.channel, errorMsg) else: await client.send_message(message.channel, errorMsg) else: await client.send_message(message.channel, "`I need permissions to delete messages.`") else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) def isMemberAdmin(message): if not message.channel.is_private: if discord.utils.get(message.author.roles, name=settings["ADMINROLE"]) != None: return True else: return False else: return False def canDeleteMessages(message): return message.channel.permissions_for(message.server.me).manage_messages async def addTwitchAlert(message): global twitchStreams added = False if isMemberAdmin(message): msg = message.content.split(" ") if len(msg) == 2: if "twitch.tv/" in msg[1]: await client.send_message(message.channel, "`Enter the name of the stream, not the URL.`") return False for i, stream in enumerate(twitchStreams): if stream["NAME"] == msg[1] and message.channel.id in stream["CHANNELS"]: await client.send_message(message.channel, "`I'm already monitoring that stream in this channel.`") return False for stream in twitchStreams: if stream["NAME"] == msg[1] and message.channel.id not in stream["CHANNELS"]: # twitchAlert is already monitoring this streamer but not in this channel twitchStreams[i]["CHANNELS"].append(message.channel.id) added = True if not added: # twitchAlert wasn't monitoring this streamer twitchStreams.append({"CHANNELS" : [message.channel.id], "NAME" : msg[1], "ALREADY_ONLINE" : False}) dataIO.fileIO("json/twitch.json", "save", twitchStreams) await client.send_message(message.channel, "`I will always send an alert in this channel whenever {}'s stream is online. Use !stoptwitchalert [name] to stop it.`".format(msg[1])) else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "twitchalert [name]`") else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def removeTwitchAlert(message): global twitchStreams if isMemberAdmin(message): msg = message.content.split(" ") if len(msg) == 2: for i, stream in enumerate(twitchStreams): if stream["NAME"] == msg[1] and message.channel.id in stream["CHANNELS"]: if len(stream["CHANNELS"]) == 1: twitchStreams.remove(stream) else: twitchStreams[i]["CHANNELS"].remove(message.channel.id) dataIO.fileIO("json/twitch.json", "save", twitchStreams) await client.send_message(message.channel, "`I will stop sending alerts about {}'s stream in this channel.`".format(msg[1])) return True await client.send_message(message.channel, "`There's no alert for {}'s stream in this channel.`".format(msg[1])) else: await client.send_message(message.channel, "`" + settings["PREFIX"] + "stoptwitchalert [name]`") else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def blacklist(message, mode): global blacklisted_users p = settings["PREFIX"] if isMemberAdmin(message): if message.mentions: m = message.mentions[0] else: if len(message.content.split(" ")) >= 2: if message.content.startswith(p + "blacklist"): name = message.content[11:] else: name = message.content[9:] m = discord.utils.get(message.server.members, name=name) if m == None: await client.send_message(message.channel, "`User not found.`") return False else: return False if mode == "add": blacklisted_users.append(m.id) await client.send_message(message.channel, "`{} is now in blacklist.`".format(m.name)) else: if m.id in blacklisted_users: blacklisted_users.remove(m.id) await client.send_message(message.channel, "`{} has been removed from blacklist.`".format(m.name)) else: await client.send_message(message.channel, "`User not in blacklist.`") return False dataIO.fileIO("json/blacklist.json", "save", blacklisted_users) else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) async def modifySettings(message): global settings if isMemberAdmin(message): msg = message.content.split(" ") if len(msg) == 3: _, key, value = msg if key.lower() == "password" or key.lower() == "email" or key.lower() == "debug_id": await client.send_message(message.channel, "`You cannot modify EMAIL, PASSWORD or DEBUG_ID`") return False if key.lower() == "prefix" and len(value) != 1: await client.send_message(message.channel, "`Prefix cannot be more than one character.`") return False if key in settings.keys(): if value.lower() == "true": value = True elif value.lower() == "false": value = False else: try: value = int(value) except: pass settings[key] = value dataIO.fileIO("json/settings.json", "save", settings) loadHelp() if "economy" in modules: economy.settings = settings economy.loadHelp() await client.send_message(message.channel, "`'{}' set to '{}'`".format(key, str(value))) else: await client.send_message(message.channel, "`That setting doesn't exist`") else: msg = "```" for k, v in settings.items(): if k != "EMAIL" and k != "PASSWORD": msg += k + ": " + str(v) + "\n" msg += "```\n" msg += settings["PREFIX"] + "setting [setting] [value]" await client.send_message(message.channel, msg) else: await client.send_message(message.channel, "`Im sorry {} I'm afraid I can't do that.`".format(message.author.name)) ################################################ @asyncio.coroutine async def twitchAlert(): global twitchStreams CHECK_DELAY = 10 while True: if twitchStreams and client.is_logged_in: to_delete = [] save = False consistency_check = twitchStreams for i, stream in enumerate(twitchStreams): if twitchStreams == consistency_check: #prevents buggy behavior if twitchStreams gets modified during the iteration try: url = "https://api.twitch.tv/kraken/streams/" + stream["NAME"] async with aiohttp.get(url) as r: data = await r.json() if "status" in data: if data["status"] == 404: #Stream doesn't exist, remove from list to_delete.append(stream) elif "stream" in data: if data["stream"] != None: if not stream["ALREADY_ONLINE"]: for channel in stream["CHANNELS"]: try: await client.send_message(client.get_channel(channel), "`{} is online!` {}".format(stream["NAME"], "http://www.twitch.tv/" + stream["NAME"])) except: #In case of missing permissions pass twitchStreams[i]["ALREADY_ONLINE"] = True save = True else: if stream["ALREADY_ONLINE"]: twitchStreams[i]["ALREADY_ONLINE"] = False save = True except Exception as e: logger.warning(e) if save: #Saves online status, in case the bot needs to be restarted it can prevent message spam dataIO.fileIO("json/twitch.json", "save", twitchStreams) save = False await asyncio.sleep(CHECK_DELAY) else: break if to_delete: for invalid_stream in to_delete: twitchStreams.remove(invalid_stream) dataIO.fileIO("json/twitch.json", "save", twitchStreams) else: await asyncio.sleep(5) async def customCommand(message): msg = message.content[1:] if message.channel.server.id in commands: cmdlist = commands[message.channel.server.id] if msg in cmdlist: await client.send_message(message.channel, cmdlist[msg] ) async def debug(message): # If you don't know what this is, *leave it alone* if message.author.id == settings["DEBUG_ID"]: # Never assign DEBUG_ID to someone other than you msg = message.content.split("`") # Example: !debug `message.author.id` if len(msg) == 3: _, cmd, _ = msg try: result = str(eval(cmd)) if settings["PASSWORD"].lower() not in result.lower() and settings["EMAIL"].lower() not in result.lower(): await client.send_message(message.channel, "```" + result + "```") else: await client.send_message(message.author, "`Are you trying to send my credentials in chat? Because that's how you send my credentials in chat.`") except Exception as e: await client.send_message(message.channel, "```" + str(e) + "```") async def execFunc(message): #same warning as the other function ^ if message.author.id == settings["DEBUG_ID"]: msg = message.content.split("`") # Example: !exec `import this` if len(msg) == 3: _, cmd, _ = msg try: result = exec(cmd) #await client.send_message(message.channel, "```" + str(result) + "```") except Exception as e: await client.send_message(message.channel, "```" + str(e) + "```") def console(): while True: try: exec(input("")) except Exception: traceback.print_exc() print("\n") def loadDataFromFiles(loadsettings=False): global proverbs, commands, trivia_questions, badwords, badwords_regex, shush_list, twitchStreams, blacklisted_users, apis proverbs = dataIO.loadProverbs() logger.info("Loaded " + str(len(proverbs)) + " proverbs.") commands = dataIO.fileIO("json/commands.json", "load") logger.info("Loaded " + str(len(commands)) + " lists of custom commands.") badwords = dataIO.fileIO("json/filter.json", "load") logger.info("Loaded " + str(len(badwords)) + " lists of filtered words.") blacklisted_users = dataIO.fileIO("json/blacklist.json", "load") logger.info("Loaded " + str(len(blacklisted_users)) + " blacklisted users.") badwords_regex = dataIO.fileIO("json/regex_filter.json", "load") logger.info("Loaded " + str(len(badwords_regex)) + " regex lists.") shush_list = dataIO.fileIO("json/shushlist.json", "load") logger.info("Loaded " + str(len(shush_list)) + " silenced channels.") twitchStreams = dataIO.fileIO("json/twitch.json", "load") logger.info("Loaded " + str(len(twitchStreams)) + " streams to monitor.") apis = dataIO.fileIO("json/apis.json", "load") logger.info("Loaded APIs configuration.") if loadsettings: global settings settings = dataIO.fileIO("json/settings.json", "load") loadHelp() if "economy" in modules: economy.settings = settings economy.loadHelp() def main(): global ball, greetings, greetings_caps, stopwatches, trivia_sessions, message, gameSwitcher, uptime_timer, musicPlayer, currentPlaylist global logger, settings, poll_sessions logger = loggerSetup() dataIO.logger = logger dataIO.migration() dataIO.createEmptyFiles() settings = dataIO.loadAndCheckSettings() loadDataFromFiles() ball = ["As I see it, yes", "It is certain", "It is decidedly so", "Most likely", "Outlook good", "Signs point to yes", "Without a doubt", "Yes", "Yes – definitely", "You may rely on it", "Reply hazy, try again", "Ask again later", "Better not tell you now", "Cannot predict now", "Concentrate and ask again", "Don't count on it", "My reply is no", "My sources say no", "Outlook not so good", "Very doubtful"] greetings = ["Hey.", "Yes?", "Hi.", "I'm listening.", "Hello.", "I'm here."] greetings_caps = ["DON'T SCREAM", "WHAT", "WHAT IS IT?!", "ì_ì", "NO CAPS LOCK"] stopwatches = {} trivia_sessions = [] poll_sessions = [] message = "" gameSwitcher = botPlays() if "economy" in modules: economy.client = client economy.initialize() uptime_timer = int(time.perf_counter()) musicPlayer = None currentPlaylist = None loop.create_task(twitchAlert()) #client.run(settings["EMAIL"], settings["PASSWORD"]) yield from client.login(settings["EMAIL"], settings["PASSWORD"]) yield from client.connect() if __name__ == '__main__': loop = asyncio.get_event_loop() try: loop.run_until_complete(main()) except discord.LoginFailure: logger.error("The credentials you put in settings.json are wrong. Take a look.") except Exception as e: logger.error(e) loop.run_until_complete(client.logout()) finally: loop.close()
codeparrot/github-code-clean
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'VCMBirthRecord.SettlementCode' db.delete_column(u'source_data_vcmbirthrecord', 'SettlementCode') # Deleting field 'VCMBirthRecord.HouseHoldNumber' db.delete_column(u'source_data_vcmbirthrecord', 'HouseHoldNumber') # Deleting field 'VCMBirthRecord.VCMRILink' db.delete_column(u'source_data_vcmbirthrecord', 'VCMRILink') # Deleting field 'VCMBirthRecord.NameOfChild' db.delete_column(u'source_data_vcmbirthrecord', 'NameOfChild') # Deleting field 'VCMBirthRecord.DOB' db.delete_column(u'source_data_vcmbirthrecord', 'DOB') # Deleting field 'VCMBirthRecord.VCMNameCAttended' db.delete_column(u'source_data_vcmbirthrecord', 'VCMNameCAttended') # Deleting field 'VCMBirthRecord.meta_instanceID' db.delete_column(u'source_data_vcmbirthrecord', 'meta_instanceID') # Deleting field 'VCMBirthRecord.VCM0Dose' db.delete_column(u'source_data_vcmbirthrecord', 'VCM0Dose') # Deleting field 'VCMBirthRecord.SubmissionDate' db.delete_column(u'source_data_vcmbirthrecord', 'SubmissionDate') # Deleting field 'VCMBirthRecord.DateOfReport' db.delete_column(u'source_data_vcmbirthrecord', 'DateOfReport') # Deleting field 'VCMBirthRecord.KEY' db.delete_column(u'source_data_vcmbirthrecord', 'KEY') # Deleting field 'VCMBirthRecord.DateReport' db.delete_column(u'source_data_vcmbirthrecord', 'DateReport') # Adding field 'VCMBirthRecord.submissiondate' db.add_column(u'source_data_vcmbirthrecord', 'submissiondate', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMBirthRecord.dateofreport' db.add_column(u'source_data_vcmbirthrecord', 'dateofreport', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMBirthRecord.datereport' db.add_column(u'source_data_vcmbirthrecord', 'datereport', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMBirthRecord.settlementcode' db.add_column(u'source_data_vcmbirthrecord', 'settlementcode', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMBirthRecord.householdnumber' db.add_column(u'source_data_vcmbirthrecord', 'householdnumber', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMBirthRecord.dob' db.add_column(u'source_data_vcmbirthrecord', 'dob', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMBirthRecord.nameofchild' db.add_column(u'source_data_vcmbirthrecord', 'nameofchild', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMBirthRecord.vcm0dose' db.add_column(u'source_data_vcmbirthrecord', 'vcm0dose', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMBirthRecord.vcmrilink' db.add_column(u'source_data_vcmbirthrecord', 'vcmrilink', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMBirthRecord.vcmnamecattended' db.add_column(u'source_data_vcmbirthrecord', 'vcmnamecattended', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMBirthRecord.meta_instanceid' db.add_column(u'source_data_vcmbirthrecord', 'meta_instanceid', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMBirthRecord.key' db.add_column(u'source_data_vcmbirthrecord', 'key', self.gf('django.db.models.fields.CharField')(default=1, unique=True, max_length=255), keep_default=False) # Deleting field 'VCMSummaryNew.Tot_2_11Months' db.delete_column(u'source_data_vcmsummarynew', 'Tot_2_11Months') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_AgedOutM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_AgedOutM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_RelBeliefsF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_RelBeliefsF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_AgedOutF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_AgedOutF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_RelBeliefsM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_RelBeliefsM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_SideEffectsF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SideEffectsF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_SideEffectsM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SideEffectsM') # Deleting field 'VCMSummaryNew.CensusNewBornsF' db.delete_column(u'source_data_vcmsummarynew', 'CensusNewBornsF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_SocEventM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SocEventM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_UnhappyWTeamM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_UnhappyWTeamM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_NoPlusesF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoPlusesF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_NoFeltNeedF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoFeltNeedF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_SocEventF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SocEventF') # Deleting field 'VCMSummaryNew.CensusNewBornsM' db.delete_column(u'source_data_vcmsummarynew', 'CensusNewBornsM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_NoFeltNeedM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoFeltNeedM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_UnhappyWTeamF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_UnhappyWTeamF') # Deleting field 'VCMSummaryNew.group_spec_events_Spec_AFPCase' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_AFPCase') # Deleting field 'VCMSummaryNew.Census2_11MoM' db.delete_column(u'source_data_vcmsummarynew', 'Census2_11MoM') # Deleting field 'VCMSummaryNew.Census2_11MoF' db.delete_column(u'source_data_vcmsummarynew', 'Census2_11MoF') # Deleting field 'VCMSummaryNew.group_spec_events_Spec_OtherDisease' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_OtherDisease') # Deleting field 'VCMSummaryNew.Date_Implement' db.delete_column(u'source_data_vcmsummarynew', 'Date_Implement') # Deleting field 'VCMSummaryNew.meta_instanceID' db.delete_column(u'source_data_vcmsummarynew', 'meta_instanceID') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_NoGovtServicesM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoGovtServicesM') # Deleting field 'VCMSummaryNew.group_msd_chd_Tot_Missed_Check' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Tot_Missed_Check') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_NoGovtServicesF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoGovtServicesF') # Deleting field 'VCMSummaryNew.Tot_Newborns' db.delete_column(u'source_data_vcmsummarynew', 'Tot_Newborns') # Deleting field 'VCMSummaryNew.Tot_Vax12_59Mo' db.delete_column(u'source_data_vcmsummarynew', 'Tot_Vax12_59Mo') # Deleting field 'VCMSummaryNew.Vax12_59MoM' db.delete_column(u'source_data_vcmsummarynew', 'Vax12_59MoM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_SchoolF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SchoolF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_SchoolM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SchoolM') # Deleting field 'VCMSummaryNew.group_spec_events_Spec_VCMAttendedNCer' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_VCMAttendedNCer') # Deleting field 'VCMSummaryNew.Vax2_11MoF' db.delete_column(u'source_data_vcmsummarynew', 'Vax2_11MoF') # Deleting field 'VCMSummaryNew.group_spec_events_Spec_PregnantMother' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_PregnantMother') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_PlaygroundM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PlaygroundM') # Deleting field 'VCMSummaryNew.Vax2_11MoM' db.delete_column(u'source_data_vcmsummarynew', 'Vax2_11MoM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_PlaygroundF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PlaygroundF') # Deleting field 'VCMSummaryNew.group_spec_events_Spec_RIReferral' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_RIReferral') # Deleting field 'VCMSummaryNew.SubmissionDate' db.delete_column(u'source_data_vcmsummarynew', 'SubmissionDate') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_OtherProtectionF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_OtherProtectionF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_OtherProtectionM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_OtherProtectionM') # Deleting field 'VCMSummaryNew.group_spec_events_Spec_Newborn' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_Newborn') # Deleting field 'VCMSummaryNew.group_spec_events_Spec_ZeroDose' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_ZeroDose') # Deleting field 'VCMSummaryNew.Tot_Vax2_11Mo' db.delete_column(u'source_data_vcmsummarynew', 'Tot_Vax2_11Mo') # Deleting field 'VCMSummaryNew.group_spec_events_Spec_CMAMReferral' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_CMAMReferral') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_SecurityM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SecurityM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_SecurityF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SecurityF') # Deleting field 'VCMSummaryNew.Tot_Missed' db.delete_column(u'source_data_vcmsummarynew', 'Tot_Missed') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_TooManyRoundsF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_TooManyRoundsF') # Deleting field 'VCMSummaryNew.Tot_Vax' db.delete_column(u'source_data_vcmsummarynew', 'Tot_Vax') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_TooManyRoundsM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_TooManyRoundsM') # Deleting field 'VCMSummaryNew.KEY' db.delete_column(u'source_data_vcmsummarynew', 'KEY') # Deleting field 'VCMSummaryNew.group_spec_events_Spec_FIC' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_FIC') # Deleting field 'VCMSummaryNew.Vax12_59MoF' db.delete_column(u'source_data_vcmsummarynew', 'Vax12_59MoF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_MarketM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_MarketM') # Deleting field 'VCMSummaryNew.Tot_Census' db.delete_column(u'source_data_vcmsummarynew', 'Tot_Census') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_MarketF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_MarketF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_HHNotVisitedF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_HHNotVisitedF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_HHNotVisitedM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_HHNotVisitedM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_NoConsentM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoConsentM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_PolioUncommonM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PolioUncommonM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_ChildDiedF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_ChildDiedF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_PolDiffsM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PolDiffsM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_PolioUncommonF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PolioUncommonF') # Deleting field 'VCMSummaryNew.Tot_VaxNewBorn' db.delete_column(u'source_data_vcmsummarynew', 'Tot_VaxNewBorn') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_PolDiffsF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PolDiffsF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_ChildDiedM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_ChildDiedM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_NoPlusesM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoPlusesM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_FarmM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_FarmM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_FarmF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_FarmF') # Deleting field 'VCMSummaryNew.SettlementCode' db.delete_column(u'source_data_vcmsummarynew', 'SettlementCode') # Deleting field 'VCMSummaryNew.VaxNewBornsM' db.delete_column(u'source_data_vcmsummarynew', 'VaxNewBornsM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_FamilyMovedF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_FamilyMovedF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_FamilyMovedM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_FamilyMovedM') # Deleting field 'VCMSummaryNew.VaxNewBornsF' db.delete_column(u'source_data_vcmsummarynew', 'VaxNewBornsF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_NoConsentF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoConsentF') # Deleting field 'VCMSummaryNew.Tot_12_59Months' db.delete_column(u'source_data_vcmsummarynew', 'Tot_12_59Months') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_PolioHasCureM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PolioHasCureM') # Deleting field 'VCMSummaryNew.Census12_59MoM' db.delete_column(u'source_data_vcmsummarynew', 'Census12_59MoM') # Deleting field 'VCMSummaryNew.DateOfReport' db.delete_column(u'source_data_vcmsummarynew', 'DateOfReport') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_ChildSickM' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_ChildSickM') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_PolioHasCureF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PolioHasCureF') # Deleting field 'VCMSummaryNew.group_msd_chd_Msd_ChildSickF' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_ChildSickF') # Deleting field 'VCMSummaryNew.group_spec_events_Spec_MslsCase' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_MslsCase') # Deleting field 'VCMSummaryNew.Census12_59MoF' db.delete_column(u'source_data_vcmsummarynew', 'Census12_59MoF') # Adding field 'VCMSummaryNew.submissiondate' db.add_column(u'source_data_vcmsummarynew', 'submissiondate', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.dateofreport' db.add_column(u'source_data_vcmsummarynew', 'dateofreport', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.date_implement' db.add_column(u'source_data_vcmsummarynew', 'date_implement', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.settlementcode' db.add_column(u'source_data_vcmsummarynew', 'settlementcode', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.censusnewbornsf' db.add_column(u'source_data_vcmsummarynew', 'censusnewbornsf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.censusnewbornsm' db.add_column(u'source_data_vcmsummarynew', 'censusnewbornsm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.tot_newborns' db.add_column(u'source_data_vcmsummarynew', 'tot_newborns', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.census2_11mof' db.add_column(u'source_data_vcmsummarynew', 'census2_11mof', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.census2_11mom' db.add_column(u'source_data_vcmsummarynew', 'census2_11mom', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.tot_2_11months' db.add_column(u'source_data_vcmsummarynew', 'tot_2_11months', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.census12_59mof' db.add_column(u'source_data_vcmsummarynew', 'census12_59mof', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.census12_59mom' db.add_column(u'source_data_vcmsummarynew', 'census12_59mom', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.tot_12_59months' db.add_column(u'source_data_vcmsummarynew', 'tot_12_59months', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.tot_census' db.add_column(u'source_data_vcmsummarynew', 'tot_census', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.vaxnewbornsf' db.add_column(u'source_data_vcmsummarynew', 'vaxnewbornsf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.vaxnewbornsm' db.add_column(u'source_data_vcmsummarynew', 'vaxnewbornsm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.tot_vaxnewborn' db.add_column(u'source_data_vcmsummarynew', 'tot_vaxnewborn', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.vax2_11mof' db.add_column(u'source_data_vcmsummarynew', 'vax2_11mof', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.vax2_11mom' db.add_column(u'source_data_vcmsummarynew', 'vax2_11mom', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.tot_vax2_11mo' db.add_column(u'source_data_vcmsummarynew', 'tot_vax2_11mo', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.vax12_59mof' db.add_column(u'source_data_vcmsummarynew', 'vax12_59mof', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.vax12_59mom' db.add_column(u'source_data_vcmsummarynew', 'vax12_59mom', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.tot_vax12_59mo' db.add_column(u'source_data_vcmsummarynew', 'tot_vax12_59mo', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.tot_vax' db.add_column(u'source_data_vcmsummarynew', 'tot_vax', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.tot_missed' db.add_column(u'source_data_vcmsummarynew', 'tot_missed', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_playgroundf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_playgroundf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_playgroundm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_playgroundm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_soceventf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_soceventf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_soceventm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_soceventm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_marketf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_marketf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_marketm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_marketm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_farmf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_farmf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_farmm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_farmm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_schoolf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_schoolf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_schoolm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_schoolm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_childsickf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_childsickf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_childsickm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_childsickm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_sideeffectsf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_sideeffectsf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_sideeffectsm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_sideeffectsm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_nofeltneedf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_nofeltneedf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_nofeltneedm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_nofeltneedm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_toomanyroundsf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_toomanyroundsf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_toomanyroundsm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_toomanyroundsm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_relbeliefsf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_relbeliefsf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_relbeliefsm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_relbeliefsm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_poldiffsf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_poldiffsf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_poldiffsm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_poldiffsm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_unhappywteamf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_unhappywteamf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_unhappywteamm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_unhappywteamm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_noplusesf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_noplusesf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_noplusesm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_noplusesm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_nogovtservicesf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_nogovtservicesf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_nogovtservicesm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_nogovtservicesm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_poliouncommonf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_poliouncommonf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_poliouncommonm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_poliouncommonm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_poliohascuref' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_poliohascuref', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_poliohascurem' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_poliohascurem', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_otherprotectionf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_otherprotectionf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_otherprotectionm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_otherprotectionm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_noconsentf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_noconsentf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_noconsentm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_noconsentm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_hhnotvisitedf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_hhnotvisitedf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_hhnotvisitedm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_hhnotvisitedm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_securityf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_securityf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_securitym' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_securitym', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_agedoutf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_agedoutf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_agedoutm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_agedoutm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_familymovedf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_familymovedf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_familymovedm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_familymovedm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_childdiedf' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_childdiedf', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_msd_childdiedm' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_childdiedm', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_tot_missed_check' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_tot_missed_check', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_spec_events_spec_zerodose' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_zerodose', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_spec_events_spec_pregnantmother' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_pregnantmother', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_spec_events_spec_newborn' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_newborn', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_spec_events_spec_vcmattendedncer' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_vcmattendedncer', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_spec_events_spec_cmamreferral' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_cmamreferral', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_spec_events_spec_rireferral' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_rireferral', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_spec_events_spec_afpcase' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_afpcase', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_spec_events_spec_mslscase' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_mslscase', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_spec_events_spec_otherdisease' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_otherdisease', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_spec_events_spec_fic' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_fic', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.meta_instanceid' db.add_column(u'source_data_vcmsummarynew', 'meta_instanceid', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.key' db.add_column(u'source_data_vcmsummarynew', 'key', self.gf('django.db.models.fields.CharField')(default=1, unique=True, max_length=255), keep_default=False) def backwards(self, orm): # User chose to not deal with backwards NULL issues for 'VCMBirthRecord.SettlementCode' raise RuntimeError("Cannot reverse this migration. 'VCMBirthRecord.SettlementCode' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMBirthRecord.SettlementCode' db.add_column(u'source_data_vcmbirthrecord', 'SettlementCode', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMBirthRecord.HouseHoldNumber' raise RuntimeError("Cannot reverse this migration. 'VCMBirthRecord.HouseHoldNumber' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMBirthRecord.HouseHoldNumber' db.add_column(u'source_data_vcmbirthrecord', 'HouseHoldNumber', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMBirthRecord.VCMRILink' raise RuntimeError("Cannot reverse this migration. 'VCMBirthRecord.VCMRILink' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMBirthRecord.VCMRILink' db.add_column(u'source_data_vcmbirthrecord', 'VCMRILink', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMBirthRecord.NameOfChild' raise RuntimeError("Cannot reverse this migration. 'VCMBirthRecord.NameOfChild' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMBirthRecord.NameOfChild' db.add_column(u'source_data_vcmbirthrecord', 'NameOfChild', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMBirthRecord.DOB' raise RuntimeError("Cannot reverse this migration. 'VCMBirthRecord.DOB' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMBirthRecord.DOB' db.add_column(u'source_data_vcmbirthrecord', 'DOB', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMBirthRecord.VCMNameCAttended' raise RuntimeError("Cannot reverse this migration. 'VCMBirthRecord.VCMNameCAttended' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMBirthRecord.VCMNameCAttended' db.add_column(u'source_data_vcmbirthrecord', 'VCMNameCAttended', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMBirthRecord.meta_instanceID' raise RuntimeError("Cannot reverse this migration. 'VCMBirthRecord.meta_instanceID' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMBirthRecord.meta_instanceID' db.add_column(u'source_data_vcmbirthrecord', 'meta_instanceID', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMBirthRecord.VCM0Dose' raise RuntimeError("Cannot reverse this migration. 'VCMBirthRecord.VCM0Dose' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMBirthRecord.VCM0Dose' db.add_column(u'source_data_vcmbirthrecord', 'VCM0Dose', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMBirthRecord.SubmissionDate' raise RuntimeError("Cannot reverse this migration. 'VCMBirthRecord.SubmissionDate' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMBirthRecord.SubmissionDate' db.add_column(u'source_data_vcmbirthrecord', 'SubmissionDate', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMBirthRecord.DateOfReport' raise RuntimeError("Cannot reverse this migration. 'VCMBirthRecord.DateOfReport' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMBirthRecord.DateOfReport' db.add_column(u'source_data_vcmbirthrecord', 'DateOfReport', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMBirthRecord.KEY' raise RuntimeError("Cannot reverse this migration. 'VCMBirthRecord.KEY' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMBirthRecord.KEY' db.add_column(u'source_data_vcmbirthrecord', 'KEY', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMBirthRecord.DateReport' raise RuntimeError("Cannot reverse this migration. 'VCMBirthRecord.DateReport' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMBirthRecord.DateReport' db.add_column(u'source_data_vcmbirthrecord', 'DateReport', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # Deleting field 'VCMBirthRecord.submissiondate' db.delete_column(u'source_data_vcmbirthrecord', 'submissiondate') # Deleting field 'VCMBirthRecord.dateofreport' db.delete_column(u'source_data_vcmbirthrecord', 'dateofreport') # Deleting field 'VCMBirthRecord.datereport' db.delete_column(u'source_data_vcmbirthrecord', 'datereport') # Deleting field 'VCMBirthRecord.settlementcode' db.delete_column(u'source_data_vcmbirthrecord', 'settlementcode') # Deleting field 'VCMBirthRecord.householdnumber' db.delete_column(u'source_data_vcmbirthrecord', 'householdnumber') # Deleting field 'VCMBirthRecord.dob' db.delete_column(u'source_data_vcmbirthrecord', 'dob') # Deleting field 'VCMBirthRecord.nameofchild' db.delete_column(u'source_data_vcmbirthrecord', 'nameofchild') # Deleting field 'VCMBirthRecord.vcm0dose' db.delete_column(u'source_data_vcmbirthrecord', 'vcm0dose') # Deleting field 'VCMBirthRecord.vcmrilink' db.delete_column(u'source_data_vcmbirthrecord', 'vcmrilink') # Deleting field 'VCMBirthRecord.vcmnamecattended' db.delete_column(u'source_data_vcmbirthrecord', 'vcmnamecattended') # Deleting field 'VCMBirthRecord.meta_instanceid' db.delete_column(u'source_data_vcmbirthrecord', 'meta_instanceid') # Deleting field 'VCMBirthRecord.key' db.delete_column(u'source_data_vcmbirthrecord', 'key') # Adding field 'VCMSummaryNew.Tot_2_11Months' db.add_column(u'source_data_vcmsummarynew', 'Tot_2_11Months', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_Msd_AgedOutM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_AgedOutM', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_Msd_RelBeliefsF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_RelBeliefsF', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_Msd_AgedOutF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_AgedOutF', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_Msd_RelBeliefsM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_RelBeliefsM', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_Msd_SideEffectsF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SideEffectsF', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_Msd_SideEffectsM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SideEffectsM', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.CensusNewBornsF' db.add_column(u'source_data_vcmsummarynew', 'CensusNewBornsF', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_Msd_SocEventM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SocEventM', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_Msd_UnhappyWTeamM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_UnhappyWTeamM', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_Msd_NoPlusesF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoPlusesF', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_Msd_NoFeltNeedF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoFeltNeedF', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_Msd_SocEventF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SocEventF', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.CensusNewBornsM' db.add_column(u'source_data_vcmsummarynew', 'CensusNewBornsM', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # Adding field 'VCMSummaryNew.group_msd_chd_Msd_NoFeltNeedM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoFeltNeedM', self.gf('django.db.models.fields.CharField')(default=1, max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_UnhappyWTeamF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_UnhappyWTeamF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_UnhappyWTeamF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_UnhappyWTeamF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_spec_events_Spec_AFPCase' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_spec_events_Spec_AFPCase' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_spec_events_Spec_AFPCase' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_AFPCase', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Census2_11MoM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Census2_11MoM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Census2_11MoM' db.add_column(u'source_data_vcmsummarynew', 'Census2_11MoM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Census2_11MoF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Census2_11MoF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Census2_11MoF' db.add_column(u'source_data_vcmsummarynew', 'Census2_11MoF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_spec_events_Spec_OtherDisease' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_spec_events_Spec_OtherDisease' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_spec_events_Spec_OtherDisease' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_OtherDisease', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Date_Implement' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Date_Implement' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Date_Implement' db.add_column(u'source_data_vcmsummarynew', 'Date_Implement', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.meta_instanceID' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.meta_instanceID' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.meta_instanceID' db.add_column(u'source_data_vcmsummarynew', 'meta_instanceID', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_NoGovtServicesM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_NoGovtServicesM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_NoGovtServicesM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoGovtServicesM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Tot_Missed_Check' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Tot_Missed_Check' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Tot_Missed_Check' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Tot_Missed_Check', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_NoGovtServicesF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_NoGovtServicesF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_NoGovtServicesF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoGovtServicesF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Tot_Newborns' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Tot_Newborns' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Tot_Newborns' db.add_column(u'source_data_vcmsummarynew', 'Tot_Newborns', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Tot_Vax12_59Mo' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Tot_Vax12_59Mo' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Tot_Vax12_59Mo' db.add_column(u'source_data_vcmsummarynew', 'Tot_Vax12_59Mo', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Vax12_59MoM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Vax12_59MoM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Vax12_59MoM' db.add_column(u'source_data_vcmsummarynew', 'Vax12_59MoM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_SchoolF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_SchoolF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_SchoolF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SchoolF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_SchoolM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_SchoolM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_SchoolM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SchoolM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_spec_events_Spec_VCMAttendedNCer' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_spec_events_Spec_VCMAttendedNCer' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_spec_events_Spec_VCMAttendedNCer' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_VCMAttendedNCer', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Vax2_11MoF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Vax2_11MoF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Vax2_11MoF' db.add_column(u'source_data_vcmsummarynew', 'Vax2_11MoF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_spec_events_Spec_PregnantMother' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_spec_events_Spec_PregnantMother' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_spec_events_Spec_PregnantMother' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_PregnantMother', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_PlaygroundM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_PlaygroundM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_PlaygroundM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PlaygroundM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Vax2_11MoM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Vax2_11MoM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Vax2_11MoM' db.add_column(u'source_data_vcmsummarynew', 'Vax2_11MoM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_PlaygroundF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_PlaygroundF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_PlaygroundF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PlaygroundF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_spec_events_Spec_RIReferral' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_spec_events_Spec_RIReferral' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_spec_events_Spec_RIReferral' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_RIReferral', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.SubmissionDate' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.SubmissionDate' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.SubmissionDate' db.add_column(u'source_data_vcmsummarynew', 'SubmissionDate', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_OtherProtectionF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_OtherProtectionF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_OtherProtectionF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_OtherProtectionF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_OtherProtectionM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_OtherProtectionM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_OtherProtectionM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_OtherProtectionM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_spec_events_Spec_Newborn' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_spec_events_Spec_Newborn' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_spec_events_Spec_Newborn' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_Newborn', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_spec_events_Spec_ZeroDose' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_spec_events_Spec_ZeroDose' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_spec_events_Spec_ZeroDose' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_ZeroDose', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Tot_Vax2_11Mo' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Tot_Vax2_11Mo' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Tot_Vax2_11Mo' db.add_column(u'source_data_vcmsummarynew', 'Tot_Vax2_11Mo', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_spec_events_Spec_CMAMReferral' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_spec_events_Spec_CMAMReferral' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_spec_events_Spec_CMAMReferral' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_CMAMReferral', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_SecurityM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_SecurityM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_SecurityM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SecurityM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_SecurityF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_SecurityF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_SecurityF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_SecurityF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Tot_Missed' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Tot_Missed' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Tot_Missed' db.add_column(u'source_data_vcmsummarynew', 'Tot_Missed', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_TooManyRoundsF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_TooManyRoundsF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_TooManyRoundsF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_TooManyRoundsF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Tot_Vax' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Tot_Vax' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Tot_Vax' db.add_column(u'source_data_vcmsummarynew', 'Tot_Vax', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_TooManyRoundsM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_TooManyRoundsM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_TooManyRoundsM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_TooManyRoundsM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.KEY' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.KEY' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.KEY' db.add_column(u'source_data_vcmsummarynew', 'KEY', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_spec_events_Spec_FIC' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_spec_events_Spec_FIC' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_spec_events_Spec_FIC' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_FIC', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Vax12_59MoF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Vax12_59MoF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Vax12_59MoF' db.add_column(u'source_data_vcmsummarynew', 'Vax12_59MoF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_MarketM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_MarketM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_MarketM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_MarketM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Tot_Census' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Tot_Census' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Tot_Census' db.add_column(u'source_data_vcmsummarynew', 'Tot_Census', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_MarketF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_MarketF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_MarketF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_MarketF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_HHNotVisitedF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_HHNotVisitedF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_HHNotVisitedF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_HHNotVisitedF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_HHNotVisitedM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_HHNotVisitedM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_HHNotVisitedM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_HHNotVisitedM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_NoConsentM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_NoConsentM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_NoConsentM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoConsentM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_PolioUncommonM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_PolioUncommonM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_PolioUncommonM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PolioUncommonM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_ChildDiedF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_ChildDiedF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_ChildDiedF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_ChildDiedF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_PolDiffsM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_PolDiffsM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_PolDiffsM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PolDiffsM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_PolioUncommonF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_PolioUncommonF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_PolioUncommonF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PolioUncommonF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Tot_VaxNewBorn' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Tot_VaxNewBorn' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Tot_VaxNewBorn' db.add_column(u'source_data_vcmsummarynew', 'Tot_VaxNewBorn', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_PolDiffsF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_PolDiffsF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_PolDiffsF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PolDiffsF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_ChildDiedM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_ChildDiedM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_ChildDiedM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_ChildDiedM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_NoPlusesM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_NoPlusesM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_NoPlusesM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoPlusesM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_FarmM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_FarmM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_FarmM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_FarmM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_FarmF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_FarmF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_FarmF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_FarmF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.SettlementCode' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.SettlementCode' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.SettlementCode' db.add_column(u'source_data_vcmsummarynew', 'SettlementCode', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.VaxNewBornsM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.VaxNewBornsM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.VaxNewBornsM' db.add_column(u'source_data_vcmsummarynew', 'VaxNewBornsM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_FamilyMovedF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_FamilyMovedF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_FamilyMovedF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_FamilyMovedF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_FamilyMovedM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_FamilyMovedM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_FamilyMovedM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_FamilyMovedM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.VaxNewBornsF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.VaxNewBornsF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.VaxNewBornsF' db.add_column(u'source_data_vcmsummarynew', 'VaxNewBornsF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_NoConsentF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_NoConsentF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_NoConsentF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_NoConsentF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Tot_12_59Months' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Tot_12_59Months' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Tot_12_59Months' db.add_column(u'source_data_vcmsummarynew', 'Tot_12_59Months', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_PolioHasCureM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_PolioHasCureM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_PolioHasCureM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PolioHasCureM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Census12_59MoM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Census12_59MoM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Census12_59MoM' db.add_column(u'source_data_vcmsummarynew', 'Census12_59MoM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.DateOfReport' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.DateOfReport' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.DateOfReport' db.add_column(u'source_data_vcmsummarynew', 'DateOfReport', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_ChildSickM' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_ChildSickM' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_ChildSickM' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_ChildSickM', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_PolioHasCureF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_PolioHasCureF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_PolioHasCureF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_PolioHasCureF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_msd_chd_Msd_ChildSickF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_msd_chd_Msd_ChildSickF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_msd_chd_Msd_ChildSickF' db.add_column(u'source_data_vcmsummarynew', 'group_msd_chd_Msd_ChildSickF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.group_spec_events_Spec_MslsCase' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.group_spec_events_Spec_MslsCase' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.group_spec_events_Spec_MslsCase' db.add_column(u'source_data_vcmsummarynew', 'group_spec_events_Spec_MslsCase', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # User chose to not deal with backwards NULL issues for 'VCMSummaryNew.Census12_59MoF' raise RuntimeError("Cannot reverse this migration. 'VCMSummaryNew.Census12_59MoF' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Adding field 'VCMSummaryNew.Census12_59MoF' db.add_column(u'source_data_vcmsummarynew', 'Census12_59MoF', self.gf('django.db.models.fields.CharField')(max_length=255), keep_default=False) # Deleting field 'VCMSummaryNew.submissiondate' db.delete_column(u'source_data_vcmsummarynew', 'submissiondate') # Deleting field 'VCMSummaryNew.dateofreport' db.delete_column(u'source_data_vcmsummarynew', 'dateofreport') # Deleting field 'VCMSummaryNew.date_implement' db.delete_column(u'source_data_vcmsummarynew', 'date_implement') # Deleting field 'VCMSummaryNew.settlementcode' db.delete_column(u'source_data_vcmsummarynew', 'settlementcode') # Deleting field 'VCMSummaryNew.censusnewbornsf' db.delete_column(u'source_data_vcmsummarynew', 'censusnewbornsf') # Deleting field 'VCMSummaryNew.censusnewbornsm' db.delete_column(u'source_data_vcmsummarynew', 'censusnewbornsm') # Deleting field 'VCMSummaryNew.tot_newborns' db.delete_column(u'source_data_vcmsummarynew', 'tot_newborns') # Deleting field 'VCMSummaryNew.census2_11mof' db.delete_column(u'source_data_vcmsummarynew', 'census2_11mof') # Deleting field 'VCMSummaryNew.census2_11mom' db.delete_column(u'source_data_vcmsummarynew', 'census2_11mom') # Deleting field 'VCMSummaryNew.tot_2_11months' db.delete_column(u'source_data_vcmsummarynew', 'tot_2_11months') # Deleting field 'VCMSummaryNew.census12_59mof' db.delete_column(u'source_data_vcmsummarynew', 'census12_59mof') # Deleting field 'VCMSummaryNew.census12_59mom' db.delete_column(u'source_data_vcmsummarynew', 'census12_59mom') # Deleting field 'VCMSummaryNew.tot_12_59months' db.delete_column(u'source_data_vcmsummarynew', 'tot_12_59months') # Deleting field 'VCMSummaryNew.tot_census' db.delete_column(u'source_data_vcmsummarynew', 'tot_census') # Deleting field 'VCMSummaryNew.vaxnewbornsf' db.delete_column(u'source_data_vcmsummarynew', 'vaxnewbornsf') # Deleting field 'VCMSummaryNew.vaxnewbornsm' db.delete_column(u'source_data_vcmsummarynew', 'vaxnewbornsm') # Deleting field 'VCMSummaryNew.tot_vaxnewborn' db.delete_column(u'source_data_vcmsummarynew', 'tot_vaxnewborn') # Deleting field 'VCMSummaryNew.vax2_11mof' db.delete_column(u'source_data_vcmsummarynew', 'vax2_11mof') # Deleting field 'VCMSummaryNew.vax2_11mom' db.delete_column(u'source_data_vcmsummarynew', 'vax2_11mom') # Deleting field 'VCMSummaryNew.tot_vax2_11mo' db.delete_column(u'source_data_vcmsummarynew', 'tot_vax2_11mo') # Deleting field 'VCMSummaryNew.vax12_59mof' db.delete_column(u'source_data_vcmsummarynew', 'vax12_59mof') # Deleting field 'VCMSummaryNew.vax12_59mom' db.delete_column(u'source_data_vcmsummarynew', 'vax12_59mom') # Deleting field 'VCMSummaryNew.tot_vax12_59mo' db.delete_column(u'source_data_vcmsummarynew', 'tot_vax12_59mo') # Deleting field 'VCMSummaryNew.tot_vax' db.delete_column(u'source_data_vcmsummarynew', 'tot_vax') # Deleting field 'VCMSummaryNew.tot_missed' db.delete_column(u'source_data_vcmsummarynew', 'tot_missed') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_playgroundf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_playgroundf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_playgroundm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_playgroundm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_soceventf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_soceventf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_soceventm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_soceventm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_marketf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_marketf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_marketm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_marketm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_farmf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_farmf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_farmm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_farmm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_schoolf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_schoolf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_schoolm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_schoolm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_childsickf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_childsickf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_childsickm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_childsickm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_sideeffectsf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_sideeffectsf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_sideeffectsm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_sideeffectsm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_nofeltneedf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_nofeltneedf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_nofeltneedm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_nofeltneedm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_toomanyroundsf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_toomanyroundsf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_toomanyroundsm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_toomanyroundsm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_relbeliefsf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_relbeliefsf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_relbeliefsm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_relbeliefsm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_poldiffsf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_poldiffsf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_poldiffsm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_poldiffsm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_unhappywteamf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_unhappywteamf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_unhappywteamm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_unhappywteamm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_noplusesf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_noplusesf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_noplusesm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_noplusesm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_nogovtservicesf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_nogovtservicesf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_nogovtservicesm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_nogovtservicesm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_poliouncommonf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_poliouncommonf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_poliouncommonm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_poliouncommonm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_poliohascuref' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_poliohascuref') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_poliohascurem' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_poliohascurem') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_otherprotectionf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_otherprotectionf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_otherprotectionm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_otherprotectionm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_noconsentf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_noconsentf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_noconsentm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_noconsentm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_hhnotvisitedf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_hhnotvisitedf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_hhnotvisitedm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_hhnotvisitedm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_securityf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_securityf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_securitym' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_securitym') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_agedoutf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_agedoutf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_agedoutm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_agedoutm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_familymovedf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_familymovedf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_familymovedm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_familymovedm') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_childdiedf' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_childdiedf') # Deleting field 'VCMSummaryNew.group_msd_chd_msd_childdiedm' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_msd_childdiedm') # Deleting field 'VCMSummaryNew.group_msd_chd_tot_missed_check' db.delete_column(u'source_data_vcmsummarynew', 'group_msd_chd_tot_missed_check') # Deleting field 'VCMSummaryNew.group_spec_events_spec_zerodose' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_zerodose') # Deleting field 'VCMSummaryNew.group_spec_events_spec_pregnantmother' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_pregnantmother') # Deleting field 'VCMSummaryNew.group_spec_events_spec_newborn' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_newborn') # Deleting field 'VCMSummaryNew.group_spec_events_spec_vcmattendedncer' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_vcmattendedncer') # Deleting field 'VCMSummaryNew.group_spec_events_spec_cmamreferral' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_cmamreferral') # Deleting field 'VCMSummaryNew.group_spec_events_spec_rireferral' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_rireferral') # Deleting field 'VCMSummaryNew.group_spec_events_spec_afpcase' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_afpcase') # Deleting field 'VCMSummaryNew.group_spec_events_spec_mslscase' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_mslscase') # Deleting field 'VCMSummaryNew.group_spec_events_spec_otherdisease' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_otherdisease') # Deleting field 'VCMSummaryNew.group_spec_events_spec_fic' db.delete_column(u'source_data_vcmsummarynew', 'group_spec_events_spec_fic') # Deleting field 'VCMSummaryNew.meta_instanceid' db.delete_column(u'source_data_vcmsummarynew', 'meta_instanceid') # Deleting field 'VCMSummaryNew.key' db.delete_column(u'source_data_vcmsummarynew', 'key') models = { 'source_data.activityreport': { 'DateRecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'KEY': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'Meta': {'object_name': 'ActivityReport'}, 'SettlementGPS_Accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SettlementGPS_Altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SettlementGPS_Latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SettlementGPS_Longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SubmissionDate': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'activity': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cd_attendance': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cd_hh_pending_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cd_iec': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cd_local_leadership_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cd_num_hh_affected': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cd_num_vaccinated': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cd_pro_opv_cd': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cd_resolved': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cm_VCM_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cm_VCM_sett': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cm_attendance': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cm_iec': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cm_num_caregiver_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cm_num_husband_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cm_num_positive': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'cm_num_vaccinated': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 11, 0, 0)'}), 'endtime': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_appropriate_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_clinician1': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_clinician2': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_crowdcontroller': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_nc_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_num_measles': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_num_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_num_patients': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_num_penta': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_opvvaccinator': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_recorder_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_recorder_ri': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_separatetally': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_stockout': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_team_allowances': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_townannouncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ipds_community_leader_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'ipds_issue_reported': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'ipds_issue_resolved': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'ipds_num_children': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'ipds_num_hh': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'ipds_other_issue': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'ipds_team': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'ipds_team_allowances': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'meta_instanceID': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'names': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}), 'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'ward': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'source_data.clustersupervisor': { 'DateRecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'KEY': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'Meta': {'object_name': 'ClusterSupervisor'}, 'SubmissionDate': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'coord_rfp_meeting': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'coord_smwg_meetings': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'coord_vcm_meeting': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 11, 0, 0)'}), 'end_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'fund_transparency': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hrop_activities_conducted': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hrop_activities_planned': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hrop_endorsed': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hrop_implementation': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hrop_socialdata': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hrop_special_pop': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hrop_workplan_aligned': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instruction': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'meta_instanceID': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'num_LGAC': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}), 'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'ri_supervision': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'supervisee_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'supervision_location_Accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'supervision_location_Altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'supervision_location_Latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'supervision_location_Longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'supervisor_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'supervisor_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'vcm_birthtracking': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'vcm_data': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'vcm_supervision': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'source_data.etljob': { 'Meta': {'object_name': 'EtlJob'}, 'date_attempted': ('django.db.models.fields.DateTimeField', [], {}), 'date_completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'guid': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'task_name': ('django.db.models.fields.CharField', [], {'max_length': '55'}) }, 'source_data.healthcamp': { 'DateRecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'KEY': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'Meta': {'object_name': 'HealthCamp'}, 'SettlementGPS_Accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SettlementGPS_Altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SettlementGPS_Latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SettlementGPS_Longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SubmissionDate': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'agencyname': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'appropriate_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'clinician1': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'clinician2': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 11, 0, 0)'}), 'crowdcontroller': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'endtime': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'formhub_uuid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_photo': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hc_stockout': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'megaphone': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'meta_instanceID': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'names': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'nc_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'num_measles': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'num_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'num_patients': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'num_penta': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'opvvaccinator': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}), 'recorder_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'recorder_ri': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'region': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'separatetally': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'townannouncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'userid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'ward': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'source_data.knowthepeople': { 'Brothers': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'CitiesVisited': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'DOB': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'KEY': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'Meta': {'object_name': 'KnowThePeople'}, 'NameOfPax': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'PrefferedCity': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'Sisters': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'State_country': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SubmissionDate': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 11, 0, 0)'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'meta_instanceID': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}), 'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'source_data.paxlistreporttraining': { 'EmailAddr': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'KEY': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'Meta': {'object_name': 'PaxListReportTraining'}, 'NameOfParticipant': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'PhoneNumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'State': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SubmissionDate': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'TimeStamp': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'Title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 11, 0, 0)'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'meta_instanceID': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}), 'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'source_data.phoneinventory': { 'Asset_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'Colour_phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'DeviceID': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'KEY': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'LGA': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'Meta': {'object_name': 'PhoneInventory'}, 'Name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'State': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SubmissionDate': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 11, 0, 0)'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'meta_instanceID': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}), 'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'telephone_no': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'source_data.practicevcmbirthrecord': { 'DOB': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'DateOfReport': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'DateReport': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'HouseHoldNumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'KEY': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'Meta': {'object_name': 'PracticeVCMBirthRecord'}, 'NameOfChild': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SettlementCode': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SubmissionDate': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'VCM0Dose': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'VCMNameCAttended': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'VCMRILink': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 11, 0, 0)'}), 'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'meta_instanceID': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}), 'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'source_data.practicevcmsettcoordinates': { 'DateRecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'KEY': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'Meta': {'object_name': 'PracticeVCMSettCoordinates'}, 'SettlementCode': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SettlementGPS_Accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SettlementGPS_Altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SettlementGPS_Latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SettlementGPS_Longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SettlementName': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'SubmissionDate': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'VCMName': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'VCMPhone': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 11, 0, 0)'}), 'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'meta_instanceID': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}), 'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'source_data.practicevcmsummary': { 'Census12_59MoF': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'Census12_59MoM': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
codeparrot/github-code-clean
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Variable class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import enum # pylint: disable=g-bad-import-order import itertools import functools import os import six from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import variable_pb2 from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import from tensorflow.python import _pywrap_utils from tensorflow.python.compat import compat as fwd_compat from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_state_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import compat from tensorflow.python.util import object_identity from tensorflow.python.util import tf_should_use from tensorflow.python.util.deprecation import deprecated from tensorflow.python.util.tf_export import tf_export def default_variable_creator(_, **kwds): del kwds raise NotImplementedError("variable_scope needs to be imported") def default_variable_creator_v2(_, **kwds): del kwds raise NotImplementedError("variable_scope needs to be imported") def _make_getter(captured_getter, captured_previous): """To avoid capturing loop variables.""" def getter(**kwargs): return captured_getter(captured_previous, **kwargs) return getter @tf_export("VariableSynchronization") class VariableSynchronization(enum.Enum): """Indicates when a distributed variable will be synced. * `AUTO`: Indicates that the synchronization will be determined by the current `DistributionStrategy` (eg. With `MirroredStrategy` this would be `ON_WRITE`). * `NONE`: Indicates that there will only be one copy of the variable, so there is no need to sync. * `ON_WRITE`: Indicates that the variable will be updated across devices every time it is written. * `ON_READ`: Indicates that the variable will be aggregated across devices when it is read (eg. when checkpointing or when evaluating an op that uses the variable). """ AUTO = 0 NONE = 1 ON_WRITE = 2 ON_READ = 3 # LINT.IfChange @tf_export("VariableAggregation", v1=[]) class VariableAggregationV2(enum.Enum): """Indicates how a distributed variable will be aggregated. `tf.distribute.Strategy` distributes a model by making multiple copies (called "replicas") acting data-parallel on different elements of the input batch. When performing some variable-update operation, say `var.assign_add(x)`, in a model, we need to resolve how to combine the different values for `x` computed in the different replicas. * `NONE`: This is the default, giving an error if you use a variable-update operation with multiple replicas. * `SUM`: Add the updates across replicas. * `MEAN`: Take the arithmetic mean ("average") of the updates across replicas. * `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same update, but we only want to perform the update once. Used, e.g., for the global step counter. """ NONE = 0 SUM = 1 MEAN = 2 ONLY_FIRST_REPLICA = 3 def __hash__(self): return hash(self.value) def __eq__(self, other): if self is other: return True elif isinstance(other, VariableAggregation): return int(self.value) == int(other.value) else: return False @tf_export(v1=["VariableAggregation"]) class VariableAggregation(enum.Enum): NONE = 0 SUM = 1 MEAN = 2 ONLY_FIRST_REPLICA = 3 ONLY_FIRST_TOWER = 3 # DEPRECATED def __hash__(self): return hash(self.value) # LINT.ThenChange(//tensorflow/core/framework/variable.proto) # # Note that we are currently relying on the integer values of the Python enums # matching the integer values of the proto enums. VariableAggregation.__doc__ = ( VariableAggregationV2.__doc__ + "* `ONLY_FIRST_TOWER`: Deprecated alias for `ONLY_FIRST_REPLICA`.\n ") def validate_synchronization_aggregation_trainable(synchronization, aggregation, trainable, name): """Given user-provided variable properties, sets defaults and validates.""" if aggregation is None: aggregation = VariableAggregation.NONE else: if not isinstance(aggregation, (VariableAggregation, VariableAggregationV2)): try: aggregation = VariableAggregationV2(aggregation) except ValueError: raise ValueError( "Invalid variable aggregation mode: {} for variable: {}".format( aggregation, name)) if synchronization is None: synchronization = VariableSynchronization.AUTO else: try: synchronization = VariableSynchronization(synchronization) except ValueError: raise ValueError( "Invalid variable synchronization mode: {} for variable: {}".format( synchronization, name)) if trainable is None: trainable = synchronization != VariableSynchronization.ON_READ return synchronization, aggregation, trainable class VariableMetaclass(type): """Metaclass to allow construction of tf.Variable to be overridden.""" def _variable_v1_call(cls, initial_value=None, trainable=None, collections=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None, constraint=None, use_resource=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE, shape=None): """Call on Variable class. Useful to force the signature.""" previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs) for _, getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access previous_getter = _make_getter(getter, previous_getter) # Reset `aggregation` that is explicitly set as `None` to the enum NONE. if aggregation is None: aggregation = VariableAggregation.NONE return previous_getter( initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, variable_def=variable_def, dtype=dtype, expected_shape=expected_shape, import_scope=import_scope, constraint=constraint, use_resource=use_resource, synchronization=synchronization, aggregation=aggregation, shape=shape) def _variable_v2_call(cls, initial_value=None, trainable=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, import_scope=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE, shape=None): """Call on Variable class. Useful to force the signature.""" previous_getter = lambda **kws: default_variable_creator_v2(None, **kws) for _, getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access previous_getter = _make_getter(getter, previous_getter) # Reset `aggregation` that is explicitly set as `None` to the enum NONE. if aggregation is None: aggregation = VariableAggregation.NONE return previous_getter( initial_value=initial_value, trainable=trainable, validate_shape=validate_shape, caching_device=caching_device, name=name, variable_def=variable_def, dtype=dtype, import_scope=import_scope, constraint=constraint, synchronization=synchronization, aggregation=aggregation, shape=shape) def __call__(cls, *args, **kwargs): if cls is VariableV1: return cls._variable_v1_call(*args, **kwargs) elif cls is Variable: return cls._variable_v2_call(*args, **kwargs) else: return super(VariableMetaclass, cls).__call__(*args, **kwargs) @tf_export("Variable", v1=[]) class Variable(six.with_metaclass(VariableMetaclass, trackable.Trackable)): """See the [Variables Guide](https://tensorflow.org/guide/variables). A variable maintains state in the graph across calls to `run()`. You add a variable to the graph by constructing an instance of the class `Variable`. The `Variable()` constructor requires an initial value for the variable, which can be a `Tensor` of any type and shape. The initial value defines the type and shape of the variable. After construction, the type and shape of the variable are fixed. The value can be changed using one of the assign methods. If you want to change the shape of a variable later you have to use an `assign` Op with `validate_shape=False`. Just like any `Tensor`, variables created with `Variable()` can be used as inputs for other Ops in the graph. Additionally, all the operators overloaded for the `Tensor` class are carried over to variables, so you can also add nodes to the graph by just doing arithmetic on variables. ```python import tensorflow as tf # Create a variable. w = tf.Variable(<initial-value>, name=<optional-name>) # Use the variable in the graph like any Tensor. y = tf.matmul(w, ...another variable or tensor...) # The overloaded operators are available too. z = tf.sigmoid(w + y) # Assign a new value to the variable with `assign()` or a related method. w.assign(w + 1.0) w.assign_add(1.0) ``` When you launch the graph, variables have to be explicitly initialized before you can run Ops that use their value. You can initialize a variable by running its *initializer op*, restoring the variable from a save file, or simply running an `assign` Op that assigns a value to the variable. In fact, the variable *initializer op* is just an `assign` Op that assigns the variable's initial value to the variable itself. ```python # Launch the graph in a session. with tf.compat.v1.Session() as sess: # Run the variable initializer. sess.run(w.initializer) # ...you now can run ops that use the value of 'w'... ``` The most common initialization pattern is to use the convenience function `global_variables_initializer()` to add an Op to the graph that initializes all the variables. You then run that Op after launching the graph. ```python # Add an Op to initialize global variables. init_op = tf.compat.v1.global_variables_initializer() # Launch the graph in a session. with tf.compat.v1.Session() as sess: # Run the Op that initializes global variables. sess.run(init_op) # ...you can now run any Op that uses variable values... ``` If you need to create a variable with an initial value dependent on another variable, use the other variable's `initialized_value()`. This ensures that variables are initialized in the right order. All variables are automatically collected in the graph where they are created. By default, the constructor adds the new variable to the graph collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function `global_variables()` returns the contents of that collection. When building a machine learning model it is often convenient to distinguish between variables holding the trainable model parameters and other variables such as a `global step` variable used to count training steps. To make this easier, the variable constructor supports a `trainable=<bool>` parameter. If `True`, the new variable is also added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. The convenience function `trainable_variables()` returns the contents of this collection. The various `Optimizer` classes use this collection as the default list of variables to optimize. """ def __init__(self, initial_value=None, trainable=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, import_scope=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE, shape=None): """Creates a new variable with value `initial_value`. The new variable is added to the graph collections listed in `collections`, which defaults to `[GraphKeys.GLOBAL_VARIABLES]`. If `trainable` is `True` the variable is also added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This constructor creates both a `variable` Op and an `assign` Op to set the variable to its initial value. Args: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, GradientTapes automatically watch uses of this variable. Defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. variable_def: `VariableDef` protocol buffer. If not `None`, recreates the Variable object with its contents, referencing the variable's nodes in the graph, which must already exist. The graph is not changed. `variable_def` and the other arguments are mutually exclusive. dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. import_scope: Optional `string`. Name scope to add to the `Variable.` Only used when initializing from protocol buffer. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. shape: (optional) The shape of this variable. If None, the shape of `initial_value` will be used. When setting this argument to `tf.TensorShape(None)` (representing an unspecified shape), the variable can be assigned with values of different shapes. Raises: ValueError: If both `variable_def` and initial_value are specified. ValueError: If the initial value is not specified, or does not have a shape and `validate_shape` is `True`. RuntimeError: If eager execution is enabled. """ raise NotImplementedError def __repr__(self): raise NotImplementedError def value(self): """Returns the last snapshot of this variable. You usually do not need to call this method as all ops that need the value of the variable call it automatically through a `convert_to_tensor()` call. Returns a `Tensor` which holds the value of the variable. You can not assign a new value to this tensor as it is not a reference to the variable. To avoid copies, if the consumer of the returned value is on the same device as the variable, this actually returns the live value of the variable, not a copy. Updates to the variable are seen by the consumer. If the consumer is on a different device it will get a copy of the variable. Returns: A `Tensor` containing the value of the variable. """ raise NotImplementedError def read_value(self): """Returns the value of this variable, read in the current context. Can be different from value() if it's on another device, with control dependencies, etc. Returns: A `Tensor` containing the value of the variable. """ raise NotImplementedError def set_shape(self, shape): """Overrides the shape for this variable. Args: shape: the `TensorShape` representing the overridden shape. """ raise NotImplementedError @property def trainable(self): raise NotImplementedError @property def synchronization(self): raise NotImplementedError @property def aggregation(self): raise NotImplementedError def eval(self, session=None): """In a session, computes and returns the value of this variable. This is not a graph construction method, it does not add ops to the graph. This convenience method requires a session where the graph containing this variable has been launched. If no session is passed, the default session is used. See `tf.compat.v1.Session` for more information on launching a graph and on sessions. ```python v = tf.Variable([1, 2]) init = tf.compat.v1.global_variables_initializer() with tf.compat.v1.Session() as sess: sess.run(init) # Usage passing the session explicitly. print(v.eval(sess)) # Usage with the default session. The 'with' block # above makes 'sess' the default session. print(v.eval()) ``` Args: session: The session to use to evaluate this variable. If none, the default session is used. Returns: A numpy `ndarray` with a copy of the value of this variable. """ raise NotImplementedError @deprecated( None, "Use Variable.read_value. Variables in 2.X are initialized " "automatically both in eager and graph (inside tf.defun) contexts.") def initialized_value(self): """Returns the value of the initialized variable. You should use this instead of the variable itself to initialize another variable with a value that depends on the value of this variable. ```python # Initialize 'v' with a random tensor. v = tf.Variable(tf.random.truncated_normal([10, 40])) # Use `initialized_value` to guarantee that `v` has been # initialized before its value is used to initialize `w`. # The random values are picked only once. w = tf.Variable(v.initialized_value() * 2.0) ``` Returns: A `Tensor` holding the value of this variable after its initializer has run. """ with ops.init_scope(): return control_flow_ops.cond( is_variable_initialized(self), self.read_value, lambda: self.initial_value) @property def initial_value(self): """Returns the Tensor used as the initial value for the variable. Note that this is different from `initialized_value()` which runs the op that initializes the variable before returning its value. This method returns the tensor that is used by the op that initializes the variable. Returns: A `Tensor`. """ raise NotImplementedError @property def constraint(self): """Returns the constraint function associated with this variable. Returns: The constraint function that was passed to the variable constructor. Can be `None` if no constraint was passed. """ raise NotImplementedError def assign(self, value, use_locking=False, name=None, read_value=True): """Assigns a new value to the variable. This is essentially a shortcut for `assign(self, value)`. Args: value: A `Tensor`. The new value for this variable. use_locking: If `True`, use locking during the assignment. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A `Tensor` that will hold the new value of this variable after the assignment has completed. """ raise NotImplementedError def assign_add(self, delta, use_locking=False, name=None, read_value=True): """Adds a value to this variable. This is essentially a shortcut for `assign_add(self, delta)`. Args: delta: A `Tensor`. The value to add to this variable. use_locking: If `True`, use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A `Tensor` that will hold the new value of this variable after the addition has completed. """ raise NotImplementedError def assign_sub(self, delta, use_locking=False, name=None, read_value=True): """Subtracts a value from this variable. This is essentially a shortcut for `assign_sub(self, delta)`. Args: delta: A `Tensor`. The value to subtract from this variable. use_locking: If `True`, use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A `Tensor` that will hold the new value of this variable after the subtraction has completed. """ raise NotImplementedError def scatter_sub(self, sparse_delta, use_locking=False, name=None): """Subtracts `tf.IndexedSlices` from this variable. Args: sparse_delta: `tf.IndexedSlices` to be subtracted from this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered subtraction has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ raise NotImplementedError def scatter_add(self, sparse_delta, use_locking=False, name=None): """Adds `tf.IndexedSlices` to this variable. Args: sparse_delta: `tf.IndexedSlices` to be added to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered addition has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ raise NotImplementedError def scatter_max(self, sparse_delta, use_locking=False, name=None): """Updates this variable with the max of `tf.IndexedSlices` and itself. Args: sparse_delta: `tf.IndexedSlices` to use as an argument of max with this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered maximization has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ raise NotImplementedError def scatter_min(self, sparse_delta, use_locking=False, name=None): """Updates this variable with the min of `tf.IndexedSlices` and itself. Args: sparse_delta: `tf.IndexedSlices` to use as an argument of min with this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered minimization has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ raise NotImplementedError def scatter_mul(self, sparse_delta, use_locking=False, name=None): """Multiply this variable by `tf.IndexedSlices`. Args: sparse_delta: `tf.IndexedSlices` to multiply this variable by. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered multiplication has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ raise NotImplementedError def scatter_div(self, sparse_delta, use_locking=False, name=None): """Divide this variable by `tf.IndexedSlices`. Args: sparse_delta: `tf.IndexedSlices` to divide this variable by. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered division has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ raise NotImplementedError def scatter_update(self, sparse_delta, use_locking=False, name=None): """Assigns `tf.IndexedSlices` to this variable. Args: sparse_delta: `tf.IndexedSlices` to be assigned to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered assignment has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ raise NotImplementedError def batch_scatter_update(self, sparse_delta, use_locking=False, name=None): """Assigns `tf.IndexedSlices` to this variable batch-wise. Analogous to `batch_gather`. This assumes that this variable and the sparse_delta IndexedSlices have a series of leading dimensions that are the same for all of them, and the updates are performed on the last dimension of indices. In other words, the dimensions should be the following: `num_prefix_dims = sparse_delta.indices.ndims - 1` `batch_dim = num_prefix_dims + 1` `sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[ batch_dim:]` where `sparse_delta.updates.shape[:num_prefix_dims]` `== sparse_delta.indices.shape[:num_prefix_dims]` `== var.shape[:num_prefix_dims]` And the operation performed can be expressed as: `var[i_1, ..., i_n, sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[ i_1, ..., i_n, j]` When sparse_delta.indices is a 1D tensor, this operation is equivalent to `scatter_update`. To avoid this operation one can looping over the first `ndims` of the variable and using `scatter_update` on the subtensors that result of slicing the first dimension. This is a valid option for `ndims = 1`, but less efficient than this implementation. Args: sparse_delta: `tf.IndexedSlices` to be assigned to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered assignment has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ raise NotImplementedError def scatter_nd_sub(self, indices, updates, name=None): """Applies sparse subtraction to individual values or slices in a Variable. Assuming the variable has rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into self. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of self. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]]. ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) op = v.scatter_nd_sub(indices, updates) with tf.compat.v1.Session() as sess: print sess.run(op) ``` The resulting update to v would look like this: [1, -9, 3, -6, -6, 6, 7, -4] See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered subtraction has completed. """ raise NotImplementedError def scatter_nd_add(self, indices, updates, name=None): """Applies sparse addition to individual values or slices in a Variable. The Variable has rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into self. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of self. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]]. ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) add = v.scatter_nd_add(indices, updates) with tf.compat.v1.Session() as sess: print sess.run(add) ``` The resulting update to v would look like this: [1, 13, 3, 14, 14, 6, 7, 20] See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered addition has completed. """ raise NotImplementedError def scatter_nd_update(self, indices, updates, name=None): """Applies sparse assignment to individual values or slices in a Variable. The Variable has rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into self. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of self. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]]. ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) op = v.scatter_nd_assign(indices, updates) with tf.compat.v1.Session() as sess: print sess.run(op) ``` The resulting update to v would look like this: [1, 11, 3, 10, 9, 6, 7, 12] See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered assignment has completed. """ raise NotImplementedError def sparse_read(self, indices, name=None): r"""Gather slices from params axis axis according to indices. This function supports a subset of tf.gather, see tf.gather for details on usage. Args: indices: The index `Tensor`. Must be one of the following types: `int32`, `int64`. Must be in range `[0, params.shape[axis])`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `params`. """ raise AttributeError def gather_nd(self, indices, name=None): r"""Gather slices from `params` into a Tensor with shape specified by `indices`. See tf.gather_nd for details. Args: indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. Index tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `params`. """ raise AttributeError @deprecated(None, "Prefer Dataset.range instead.") def count_up_to(self, limit): """Increments this variable until it reaches `limit`. When that Op is run it tries to increment the variable by `1`. If incrementing the variable would bring it above `limit` then the Op raises the exception `OutOfRangeError`. If no error is raised, the Op outputs the value of the variable before the increment. This is essentially a shortcut for `count_up_to(self, limit)`. Args: limit: value at which incrementing the variable raises an error. Returns: A `Tensor` that will hold the variable value before the increment. If no other Op modifies this variable, the values produced will all be distinct. """ raise NotImplementedError @deprecated(None, "Prefer Variable.assign which has equivalent behavior in 2.X.") def load(self, value, session=None): """Load new value into this variable. Writes new value to variable's memory. Doesn't add ops to the graph. This convenience method requires a session where the graph containing this variable has been launched. If no session is passed, the default session is used. See `tf.compat.v1.Session` for more information on launching a graph and on sessions. ```python v = tf.Variable([1, 2]) init = tf.compat.v1.global_variables_initializer() with tf.compat.v1.Session() as sess: sess.run(init) # Usage passing the session explicitly. v.load([2, 3], sess) print(v.eval(sess)) # prints [2 3] # Usage with the default session. The 'with' block # above makes 'sess' the default session. v.load([3, 4], sess) print(v.eval()) # prints [3 4] ``` Args: value: New variable value session: The session to use to evaluate this variable. If none, the default session is used. Raises: ValueError: Session is not passed and no default session """ if context.executing_eagerly(): self.assign(value) else: session = session or ops.get_default_session() if session is None: raise ValueError( "Either session argument should be provided or default session " "should be established") session.run(self.initializer, {self.initializer.inputs[1]: value}) # Conversion to tensor. @staticmethod def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name """Utility function for converting a Variable to a Tensor.""" _ = name if dtype and not dtype.is_compatible_with(v.dtype): raise ValueError( "Incompatible type conversion requested to type '%s' for variable " "of type '%s'" % (dtype.name, v.dtype.name)) if as_ref: return v._ref() # pylint: disable=protected-access else: return v.value() @classmethod def _OverloadAllOperators(cls): # pylint: disable=invalid-name """Register overloads for all operators.""" for operator in ops.Tensor.OVERLOADABLE_OPERATORS: cls._OverloadOperator(operator) # For slicing, bind getitem differently than a tensor (use SliceHelperVar # instead) # pylint: disable=protected-access setattr(cls, "__getitem__", array_ops._SliceHelperVar) @classmethod def _OverloadOperator(cls, operator): # pylint: disable=invalid-name """Defer an operator overload to `ops.Tensor`. We pull the operator out of ops.Tensor dynamically to avoid ordering issues. Args: operator: string. The operator name. """ # We can't use the overload mechanism on __eq__ & __ne__ since __eq__ is # called when adding a variable to sets. As a result we call a.value() which # causes infinite recursion when operating within a GradientTape # TODO(gjn): Consider removing this if operator == "__eq__" or operator == "__ne__": return tensor_oper = getattr(ops.Tensor, operator) def _run_op(a, *args, **kwargs): # pylint: disable=protected-access return tensor_oper(a.value(), *args, **kwargs) functools.update_wrapper(_run_op, tensor_oper) setattr(cls, operator, _run_op) def __hash__(self): if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions(): # pylint: disable=protected-access raise TypeError("Variable is unhashable if Tensor equality is enabled. " "Instead, use tensor.experimental_ref() as the key.") else: return id(self) # TODO(gjn): duplicate of math_ops.tensor_equals, consider removing def __eq__(self, other): """Compares two variables element-wise for equality.""" if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions(): # pylint: disable=protected-access if fwd_compat.forward_compatible(2019, 9, 25): return gen_math_ops.equal(self, other, incompatible_shape_error=False) else: return gen_math_ops.equal(self, other) else: # In legacy graph mode, tensor equality is object equality return self is other # TODO(gjn): duplicate of math_ops.tensor_not_equals, consider removing def __ne__(self, other): """Compares two variables element-wise for equality.""" if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions(): # pylint: disable=protected-access if fwd_compat.forward_compatible(2019, 9, 25): return gen_math_ops.not_equal( self, other, incompatible_shape_error=False) else: return gen_math_ops.not_equal(self, other) else: # In legacy graph mode, tensor equality is object equality return self is not other def __iter__(self): """Dummy method to prevent iteration. Do not call. NOTE(mrry): If we register __getitem__ as an overloaded operator, Python will valiantly attempt to iterate over the variable's Tensor from 0 to infinity. Declaring this method prevents this unintended behavior. Raises: TypeError: when invoked. """ raise TypeError("'Variable' object is not iterable.") # NOTE(mrry): This enables the Variable's overloaded "right" binary # operators to run when the left operand is an ndarray, because it # accords the Variable class higher priority than an ndarray, or a # numpy matrix. # TODO(mrry): Convert this to using numpy's __numpy_ufunc__ # mechanism, which allows more control over how Variables interact # with ndarrays. __array_priority__ = 100 @property def name(self): """The name of this variable.""" raise NotImplementedError @property def _shared_name(self): """The shared name of the variable. Unlike name(), shared_name doesn't have ":0" suffix. It is user-specified name with name scope prefix. Returns: variable name. """ return self.name[:self.name.index(":")] @property def initializer(self): """The initializer operation for this variable.""" raise NotImplementedError @property def device(self): """The device of this variable.""" raise NotImplementedError @property def dtype(self): """The `DType` of this variable.""" raise NotImplementedError @property def op(self): """The `Operation` of this variable.""" raise NotImplementedError @property def graph(self): """The `Graph` of this variable.""" raise NotImplementedError @property def shape(self): """The `TensorShape` of this variable. Returns: A `TensorShape`. """ raise NotImplementedError def get_shape(self): """Alias of `Variable.shape`.""" return self.shape def _gather_saveables_for_checkpoint(self): """For implementing `Trackable`. This object is saveable on its own.""" return {trackable.VARIABLE_VALUE_KEY: self} def to_proto(self, export_scope=None): """Converts a `Variable` to a `VariableDef` protocol buffer. Args: export_scope: Optional `string`. Name scope to remove. Returns: A `VariableDef` protocol buffer, or `None` if the `Variable` is not in the specified name scope. """ raise NotImplementedError @staticmethod def from_proto(variable_def, import_scope=None): """Returns a `Variable` object created from `variable_def`.""" return RefVariable(variable_def=variable_def, import_scope=import_scope) def _set_save_slice_info(self, save_slice_info): """Sets the slice info for this `Variable`. Args: save_slice_info: A `Variable.SaveSliceInfo` object. """ self._save_slice_info = save_slice_info def _get_save_slice_info(self): return self._save_slice_info def experimental_ref(self): # tf.Tensor also has the same experimental_ref() API. If you update the # documenation here, please update tf.Tensor.experimental_ref() as well. """Returns a hashable reference object to this Variable. Warning: Experimental API that could be changed or removed. The primary usecase for this API is to put variables in a set/dictionary. We can't put variables in a set/dictionary as `variable.__hash__()` is no longer available starting Tensorflow 2.0. ```python import tensorflow as tf x = tf.Variable(5) y = tf.Variable(10) z = tf.Variable(10) # The followings will raise an exception starting 2.0 # TypeError: Variable is unhashable if Variable equality is enabled. variable_set = {x, y, z} variable_dict = {x: 'five', y: 'ten'} ``` Instead, we can use `variable.experimental_ref()`. ```python variable_set = {x.experimental_ref(), y.experimental_ref(), z.experimental_ref()} print(x.experimental_ref() in variable_set) ==> True variable_dict = {x.experimental_ref(): 'five', y.experimental_ref(): 'ten', z.experimental_ref(): 'ten'} print(variable_dict[y.experimental_ref()]) ==> ten ``` Also, the reference object provides `.deref()` function that returns the original Variable. ```python x = tf.Variable(5) print(x.experimental_ref().deref()) ==> <tf.Variable 'Variable:0' shape=() dtype=int32, numpy=5> ``` """ return object_identity.Reference(self) class SaveSliceInfo(object): """Information on how to save this Variable as a slice. Provides internal support for saving variables as slices of a larger variable. This API is not public and is subject to change. Available properties: * full_name * full_shape * var_offset * var_shape """ def __init__(self, full_name=None, full_shape=None, var_offset=None, var_shape=None, save_slice_info_def=None, import_scope=None): """Create a `SaveSliceInfo`. Args: full_name: Name of the full variable of which this `Variable` is a slice. full_shape: Shape of the full variable, as a list of int. var_offset: Offset of this `Variable` into the full variable, as a list of int. var_shape: Shape of this `Variable`, as a list of int. save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`, recreates the SaveSliceInfo object its contents. `save_slice_info_def` and other arguments are mutually exclusive. import_scope: Optional `string`. Name scope to add. Only used when initializing from protocol buffer. """ if save_slice_info_def: assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef) self.full_name = ops.prepend_name_scope( save_slice_info_def.full_name, import_scope=import_scope) self.full_shape = [i for i in save_slice_info_def.full_shape] self.var_offset = [i for i in save_slice_info_def.var_offset] self.var_shape = [i for i in save_slice_info_def.var_shape] else: self.full_name = full_name self.full_shape = full_shape self.var_offset = var_offset self.var_shape = var_shape @property def spec(self): """Computes the spec string used for saving.""" full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " " sl_spec = ":".join( ["%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)]) return full_shape_str + sl_spec def to_proto(self, export_scope=None): """Returns a SaveSliceInfoDef() proto. Args: export_scope: Optional `string`. Name scope to remove. Returns: A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not in the specified name scope. """ if (export_scope is None or self.full_name.startswith(export_scope)): save_slice_info_def = variable_pb2.SaveSliceInfoDef() save_slice_info_def.full_name = ops.strip_name_scope( self.full_name, export_scope) for i in self.full_shape: save_slice_info_def.full_shape.append(i) for i in self.var_offset: save_slice_info_def.var_offset.append(i) for i in self.var_shape: save_slice_info_def.var_shape.append(i) return save_slice_info_def else: return None Variable._OverloadAllOperators() # pylint: disable=protected-access _pywrap_utils.RegisterType("Variable", Variable) @tf_export(v1=["Variable"]) class VariableV1(Variable): """See the [Variables Guide](https://tensorflow.org/guide/variables). A variable maintains state in the graph across calls to `run()`. You add a variable to the graph by constructing an instance of the class `Variable`. The `Variable()` constructor requires an initial value for the variable, which can be a `Tensor` of any type and shape. The initial value defines the type and shape of the variable. After construction, the type and shape of the variable are fixed. The value can be changed using one of the assign methods. If you want to change the shape of a variable later you have to use an `assign` Op with `validate_shape=False`. Just like any `Tensor`, variables created with `Variable()` can be used as inputs for other Ops in the graph. Additionally, all the operators overloaded for the `Tensor` class are carried over to variables, so you can also add nodes to the graph by just doing arithmetic on variables. ```python import tensorflow as tf # Create a variable. w = tf.Variable(<initial-value>, name=<optional-name>) # Use the variable in the graph like any Tensor. y = tf.matmul(w, ...another variable or tensor...) # The overloaded operators are available too. z = tf.sigmoid(w + y) # Assign a new value to the variable with `assign()` or a related method. w.assign(w + 1.0) w.assign_add(1.0) ``` When you launch the graph, variables have to be explicitly initialized before you can run Ops that use their value. You can initialize a variable by running its *initializer op*, restoring the variable from a save file, or simply running an `assign` Op that assigns a value to the variable. In fact, the variable *initializer op* is just an `assign` Op that assigns the variable's initial value to the variable itself. ```python # Launch the graph in a session. with tf.compat.v1.Session() as sess: # Run the variable initializer. sess.run(w.initializer) # ...you now can run ops that use the value of 'w'... ``` The most common initialization pattern is to use the convenience function `global_variables_initializer()` to add an Op to the graph that initializes all the variables. You then run that Op after launching the graph. ```python # Add an Op to initialize global variables. init_op = tf.compat.v1.global_variables_initializer() # Launch the graph in a session. with tf.compat.v1.Session() as sess: # Run the Op that initializes global variables. sess.run(init_op) # ...you can now run any Op that uses variable values... ``` If you need to create a variable with an initial value dependent on another variable, use the other variable's `initialized_value()`. This ensures that variables are initialized in the right order. All variables are automatically collected in the graph where they are created. By default, the constructor adds the new variable to the graph collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function `global_variables()` returns the contents of that collection. When building a machine learning model it is often convenient to distinguish between variables holding the trainable model parameters and other variables such as a `global step` variable used to count training steps. To make this easier, the variable constructor supports a `trainable=<bool>` parameter. If `True`, the new variable is also added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. The convenience function `trainable_variables()` returns the contents of this collection. The various `Optimizer` classes use this collection as the default list of variables to optimize. WARNING: tf.Variable objects by default have a non-intuitive memory model. A Variable is represented internally as a mutable Tensor which can non-deterministically alias other Tensors in a graph. The set of operations which consume a Variable and can lead to aliasing is undetermined and can change across TensorFlow versions. Avoid writing code which relies on the value of a Variable either changing or not changing as other operations happen. For example, using Variable objects or simple functions thereof as predicates in a `tf.cond` is dangerous and error-prone: ``` v = tf.Variable(True) tf.cond(v, lambda: v.assign(False), my_false_fn) # Note: this is broken. ``` Here, adding `use_resource=True` when constructing the variable will fix any nondeterminism issues: ``` v = tf.Variable(True, use_resource=True) tf.cond(v, lambda: v.assign(False), my_false_fn) ``` To use the replacement for variables which does not have these issues: * Add `use_resource=True` when constructing `tf.Variable`; * Call `tf.compat.v1.get_variable_scope().set_use_resource(True)` inside a `tf.compat.v1.variable_scope` before the `tf.compat.v1.get_variable()` call. """ def __init__( self, # pylint: disable=super-init-not-called initial_value=None, trainable=None, collections=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None, constraint=None, use_resource=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE, shape=None): """Creates a new variable with value `initial_value`. The new variable is added to the graph collections listed in `collections`, which defaults to `[GraphKeys.GLOBAL_VARIABLES]`. If `trainable` is `True` the variable is also added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This constructor creates both a `variable` Op and an `assign` Op to set the variable to its initial value. Args: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, also adds the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default list of variables to use by the `Optimizer` classes. Defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. variable_def: `VariableDef` protocol buffer. If not `None`, recreates the Variable object with its contents, referencing the variable's nodes in the graph, which must already exist. The graph is not changed. `variable_def` and the other arguments are mutually exclusive. dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. expected_shape: A TensorShape. If set, initial_value is expected to have this shape. import_scope: Optional `string`. Name scope to add to the `Variable.` Only used when initializing from protocol buffer. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. use_resource: whether to use resource variables. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. shape: (optional) The shape of this variable. If None, the shape of `initial_value` will be used. When setting this argument to `tf.TensorShape(None)` (representing an unspecified shape), the variable can be assigned with values of different shapes. Raises: ValueError: If both `variable_def` and initial_value are specified. ValueError: If the initial value is not specified, or does not have a shape and `validate_shape` is `True`. RuntimeError: If eager execution is enabled. """ SaveSliceInfo = Variable.SaveSliceInfo # TODO(apassos): do not repeat all comments here class RefVariable(VariableV1): """Ref-based implementation of variables.""" def __init__( self, # pylint: disable=super-init-not-called initial_value=None, trainable=None, collections=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None, constraint=None, synchronization=None, aggregation=None, shape=None): """Creates a new variable with value `initial_value`. The new variable is added to the graph collections listed in `collections`, which defaults to `[GraphKeys.GLOBAL_VARIABLES]`. If `trainable` is `True` the variable is also added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This constructor creates both a `variable` Op and an `assign` Op to set the variable to its initial value. Args: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. In that case, `dtype` must be specified. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, also adds the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default list of variables to use by the `Optimizer` classes. Defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. variable_def: `VariableDef` protocol buffer. If not `None`, recreates the Variable object with its contents, referencing the variable's nodes in the graph, which must already exist. The graph is not changed. `variable_def` and the other arguments are mutually exclusive. dtype: If set, initial_value will be converted to the given type. If `None`, either the datatype will be kept (if `initial_value` is a Tensor), or `convert_to_tensor` will decide. expected_shape: A TensorShape. If set, initial_value is expected to have this shape. import_scope: Optional `string`. Name scope to add to the `Variable.` Only used when initializing from protocol buffer. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. shape: (optional) The shape of this variable. If None, the shape of `initial_value` will be used. When setting this argument to `tf.TensorShape(None)` (representing an unspecified shape), the variable can be assigned with values of different shapes. Raises: ValueError: If both `variable_def` and initial_value are specified. ValueError: If the initial value is not specified, or does not have a shape and `validate_shape` is `True`. RuntimeError: If eager execution is enabled. """ self._in_graph_mode = True if variable_def: # If variable_def is provided, recreates the variable from its fields. if initial_value: raise ValueError("variable_def and initial_value are mutually " "exclusive.") self._init_from_proto(variable_def, import_scope=import_scope) else: # Create from initial_value. self._init_from_args( initial_value=initial_value, trainable=trainable, collections=collections, validate_shape=validate_shape, caching_device=caching_device, name=name, dtype=dtype, expected_shape=expected_shape, constraint=constraint, synchronization=synchronization, aggregation=aggregation, shape=shape) def __repr__(self): if context.executing_eagerly() and not self._in_graph_mode: return "<tf.Variable '%s' shape=%s dtype=%s, numpy=%s>" % ( self.name, self.get_shape(), self.dtype.name, ops.numpy_text(self.read_value(), is_repr=True)) else: return "<tf.Variable '%s' shape=%s dtype=%s>" % ( self.name, self.get_shape(), self.dtype.name) def _init_from_args(self, initial_value=None, trainable=None, collections=None, validate_shape=True, caching_device=None, name=None, dtype=None, expected_shape=None, constraint=None, synchronization=None, aggregation=None, shape=None): """Creates a new variable from arguments. Args: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, also adds the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default list of variables to use by the `Optimizer` classes. Defaults to `True`, unless `synchronization` is set to `ON_READ`, in which case it defaults to `False`. collections: List of graph collections keys. The new variable is added to these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`. validate_shape: If `False`, allows the variable to be initialized with a value of unknown shape. If `True`, the default, the shape of `initial_value` must be known. caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If None, either the datatype will be kept (if initial_value is a Tensor) or float32 will be used (if it is a Python object convertible to a Tensor). expected_shape: Deprecated. Ignored. constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. shape: (optional) The shape of this variable. If None, the shape of `initial_value` will be used. When setting this argument to `tf.TensorShape(None)` (representing an unspecified shape), the variable can be assigned with values of different shapes. Raises: ValueError: If the initial value is not specified, or does not have a shape and `validate_shape` is `True`. RuntimeError: If lifted into the eager context. """ _ = expected_shape if initial_value is None: raise ValueError("initial_value must be specified.") init_from_fn = callable(initial_value) if collections is None: collections = [ops.GraphKeys.GLOBAL_VARIABLES] if not isinstance(collections, (list, tuple, set)): raise ValueError( "collections argument to Variable constructor must be a list, tuple, " "or set. Got %s of type %s" % (collections, type(collections))) if constraint is not None and not callable(constraint): raise ValueError("The `constraint` argument must be a callable.") # Store the graph key so optimizers know how to only retrieve variables from # this graph. self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access if isinstance(initial_value, trackable.CheckpointInitialValue): self._maybe_initialize_trackable() self._update_uid = initial_value.checkpoint_position.restore_uid initial_value = initial_value.wrapped_value synchronization, aggregation, trainable = ( validate_synchronization_aggregation_trainable(synchronization, aggregation, trainable, name)) self._synchronization = synchronization self._aggregation = aggregation self._trainable = trainable if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections: collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES] with ops.init_scope(): # Ensure that we weren't lifted into the eager context. if context.executing_eagerly(): raise RuntimeError( "RefVariable not supported when eager execution is enabled. ") with ops.name_scope(name, "Variable", [] if init_from_fn else [initial_value]) as name: if init_from_fn: # Use attr_scope and device(None) to simulate the behavior of # colocate_with when the variable we want to colocate with doesn't # yet exist. true_name = ops.name_from_scope_name(name) # pylint: disable=protected-access attr = attr_value_pb2.AttrValue( list=attr_value_pb2.AttrValue.ListValue( s=[compat.as_bytes("loc:@%s" % true_name)])) # pylint: disable=protected-access with ops.get_default_graph()._attr_scope({"_class": attr}): with ops.name_scope("Initializer"), ops.device(None): self._initial_value = ops.convert_to_tensor( initial_value(), name="initial_value", dtype=dtype) if shape is None: shape = ( self._initial_value.get_shape() if validate_shape else tensor_shape.unknown_shape()) self._variable = state_ops.variable_op_v2( shape, self._initial_value.dtype.base_dtype, name=name) # pylint: enable=protected-access # Or get the initial value from a Tensor or Python object. else: self._initial_value = ops.convert_to_tensor( initial_value, name="initial_value", dtype=dtype) # pylint: disable=protected-access if self._initial_value.op._get_control_flow_context() is not None: raise ValueError( "Initializer for variable %s is from inside a control-flow " "construct, such as a loop or conditional. When creating a " "variable inside a loop or conditional, use a lambda as the " "initializer." % name) if shape is None: # pylint: enable=protected-access shape = ( self._initial_value.get_shape() if validate_shape else tensor_shape.unknown_shape()) # In this case, the variable op can't be created until after the # initial_value has been converted to a Tensor with a known type. self._variable = state_ops.variable_op_v2( shape, self._initial_value.dtype.base_dtype, name=name) # Cache the name in `self`, because some APIs call `Variable.name` in a # tight loop, and this halves the cost. self._name = self._variable.name # Manually overrides the variable's shape with the initial value's. if validate_shape: initial_value_shape = self._initial_value.get_shape() if not initial_value_shape.is_fully_defined(): raise ValueError("initial_value must have a shape specified: %s" % self._initial_value) # If 'initial_value' makes use of other variables, make sure we don't # have an issue if these other variables aren't initialized first by # using their initialized_value() method. self._initializer_op = state_ops.assign( self._variable, _try_guard_against_uninitialized_dependencies( name, self._initial_value), validate_shape=validate_shape).op # TODO(vrv): Change this class to not take caching_device, but # to take the op to colocate the snapshot with, so we can use # colocation rather than devices. if caching_device is not None: with ops.device(caching_device): self._snapshot = array_ops.identity(self._variable, name="read") else: with ops.colocate_with(self._variable.op): self._snapshot = array_ops.identity(self._variable, name="read") ops.add_to_collections(collections, self) self._caching_device = caching_device self._save_slice_info = None self._constraint = constraint def _init_from_proto(self, variable_def, import_scope=None): """Recreates the Variable object from a `VariableDef` protocol buffer. Args: variable_def: `VariableDef` protocol buffer, describing a variable whose nodes already exists in the graph. import_scope: Optional `string`. Name scope to add. """ assert isinstance(variable_def, variable_pb2.VariableDef) # Create from variable_def. g = ops.get_default_graph() self._variable = g.as_graph_element( ops.prepend_name_scope( variable_def.variable_name, import_scope=import_scope)) self._name = self._variable.name self._initializer_op = g.as_graph_element( ops.prepend_name_scope( variable_def.initializer_name, import_scope=import_scope)) # Tests whether initial_value_name exists first for backwards compatibility. if (hasattr(variable_def, "initial_value_name") and variable_def.initial_value_name): self._initial_value = g.as_graph_element( ops.prepend_name_scope( variable_def.initial_value_name, import_scope=import_scope)) else: self._initial_value = None synchronization, aggregation, trainable = ( validate_synchronization_aggregation_trainable( variable_def.synchronization, variable_def.aggregation, variable_def.trainable, variable_def.variable_name)) self._synchronization = synchronization self._aggregation = aggregation self._trainable = trainable self._snapshot = g.as_graph_element( ops.prepend_name_scope( variable_def.snapshot_name, import_scope=import_scope)) if variable_def.HasField("save_slice_info_def"): self._save_slice_info = Variable.SaveSliceInfo( save_slice_info_def=variable_def.save_slice_info_def, import_scope=import_scope) else: self._save_slice_info = None self._caching_device = None self._constraint = None def _as_graph_element(self): """Conversion function for Graph.as_graph_element().""" return self._variable def value(self): """Returns the last snapshot of this variable. You usually do not need to call this method as all ops that need the value of the variable call it automatically through a `convert_to_tensor()` call. Returns a `Tensor` which holds the value of the variable. You can not assign a new value to this tensor as it is not a reference to the variable. To avoid copies, if the consumer of the returned value is on the same device as the variable, this actually returns the live value of the variable, not a copy. Updates to the variable are seen by the consumer. If the consumer is on a different device it will get a copy of the variable. Returns: A `Tensor` containing the value of the variable. """ return self._snapshot def read_value(self): """Returns the value of this variable, read in the current context. Can be different from value() if it's on another device, with control dependencies, etc. Returns: A `Tensor` containing the value of the variable. """ return array_ops.identity(self._variable, name="read") def _ref(self): """Returns a reference to this variable. You usually do not need to call this method as all ops that need a reference to the variable call it automatically. Returns is a `Tensor` which holds a reference to the variable. You can assign a new value to the variable by passing the tensor to an assign op. See `tf.Variable.value` if you want to get the value of the variable. Returns: A `Tensor` that is a reference to the variable. """ return self._variable def set_shape(self, shape): """Overrides the shape for this variable. Args: shape: the `TensorShape` representing the overridden shape. """ self._ref().set_shape(shape) self.value().set_shape(shape) @property def trainable(self): return self._trainable @property def synchronization(self): return self._synchronization @property def aggregation(self): return self._aggregation def eval(self, session=None): """In a session, computes and returns the value of this variable. This is not a graph construction method, it does not add ops to the graph. This convenience method requires a session where the graph containing this variable has been launched. If no session is passed, the default session is used. See `tf.compat.v1.Session` for more information on launching a graph and on sessions. ```python v = tf.Variable([1, 2]) init = tf.compat.v1.global_variables_initializer() with tf.compat.v1.Session() as sess: sess.run(init) # Usage passing the session explicitly. print(v.eval(sess)) # Usage with the default session. The 'with' block # above makes 'sess' the default session. print(v.eval()) ``` Args: session: The session to use to evaluate this variable. If none, the default session is used. Returns: A numpy `ndarray` with a copy of the value of this variable. """ return self._variable.eval(session=session) @property def initial_value(self): """Returns the Tensor used as the initial value for the variable. Note that this is different from `initialized_value()` which runs the op that initializes the variable before returning its value. This method returns the tensor that is used by the op that initializes the variable. Returns: A `Tensor`. """ return self._initial_value @property def constraint(self): """Returns the constraint function associated with this variable. Returns: The constraint function that was passed to the variable constructor. Can be `None` if no constraint was passed. """ return self._constraint def assign(self, value, use_locking=False, name=None, read_value=True): """Assigns a new value to the variable. This is essentially a shortcut for `assign(self, value)`. Args: value: A `Tensor`. The new value for this variable. use_locking: If `True`, use locking during the assignment. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A `Tensor` that will hold the new value of this variable after the assignment has completed. """ assign = state_ops.assign( self._variable, value, use_locking=use_locking, name=name) if read_value: return assign return assign.op def assign_add(self, delta, use_locking=False, name=None, read_value=True): """Adds a value to this variable. This is essentially a shortcut for `assign_add(self, delta)`. Args: delta: A `Tensor`. The value to add to this variable. use_locking: If `True`, use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A `Tensor` that will hold the new value of this variable after the addition has completed. """ assign = state_ops.assign_add( self._variable, delta, use_locking=use_locking, name=name) if read_value: return assign return assign.op def assign_sub(self, delta, use_locking=False, name=None, read_value=True): """Subtracts a value from this variable. This is essentially a shortcut for `assign_sub(self, delta)`. Args: delta: A `Tensor`. The value to subtract from this variable. use_locking: If `True`, use locking during the operation. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: A `Tensor` that will hold the new value of this variable after the subtraction has completed. """ assign = state_ops.assign_sub( self._variable, delta, use_locking=use_locking, name=name) if read_value: return assign return assign.op def scatter_sub(self, sparse_delta, use_locking=False, name=None): """Subtracts `tf.IndexedSlices` from this variable. Args: sparse_delta: `tf.IndexedSlices` to be subtracted from this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered subtraction has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_sub( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_add(self, sparse_delta, use_locking=False, name=None): """Adds `tf.IndexedSlices` to this variable. Args: sparse_delta: `tf.IndexedSlices` to be added to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered addition has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_add( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_max(self, sparse_delta, use_locking=False, name=None): """Updates this variable with the max of `tf.IndexedSlices` and itself. Args: sparse_delta: `tf.IndexedSlices` to use as an argument of max with this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered maximization has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_max( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_min(self, sparse_delta, use_locking=False, name=None): """Updates this variable with the min of `tf.IndexedSlices` and itself. Args: sparse_delta: `tf.IndexedSlices` to use as an argument of min with this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered minimization has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_min( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_mul(self, sparse_delta, use_locking=False, name=None): """Multiply this variable by `tf.IndexedSlices`. Args: sparse_delta: `tf.IndexedSlices` to multiply this variable by. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered multiplication has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_mul( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_div(self, sparse_delta, use_locking=False, name=None): """Divide this variable by `tf.IndexedSlices`. Args: sparse_delta: `tf.IndexedSlices` to divide this variable by. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered division has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_div( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_update(self, sparse_delta, use_locking=False, name=None): """Assigns `tf.IndexedSlices` to this variable. Args: sparse_delta: `tf.IndexedSlices` to be assigned to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered assignment has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ if not isinstance(sparse_delta, ops.IndexedSlices): raise TypeError("sparse_delta is not IndexedSlices: %s" % sparse_delta) return gen_state_ops.scatter_update( self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def batch_scatter_update(self, sparse_delta, use_locking=False, name=None): """Assigns `tf.IndexedSlices` to this variable batch-wise. Analogous to `batch_gather`. This assumes that this variable and the sparse_delta IndexedSlices have a series of leading dimensions that are the same for all of them, and the updates are performed on the last dimension of indices. In other words, the dimensions should be the following: `num_prefix_dims = sparse_delta.indices.ndims - 1` `batch_dim = num_prefix_dims + 1` `sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[ batch_dim:]` where `sparse_delta.updates.shape[:num_prefix_dims]` `== sparse_delta.indices.shape[:num_prefix_dims]` `== var.shape[:num_prefix_dims]` And the operation performed can be expressed as: `var[i_1, ..., i_n, sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[ i_1, ..., i_n, j]` When sparse_delta.indices is a 1D tensor, this operation is equivalent to `scatter_update`. To avoid this operation one can looping over the first `ndims` of the variable and using `scatter_update` on the subtensors that result of slicing the first dimension. This is a valid option for `ndims = 1`, but less efficient than this implementation. Args: sparse_delta: `tf.IndexedSlices` to be assigned to this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered assignment has completed. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`. """ return state_ops.batch_scatter_update( self, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name) def scatter_nd_sub(self, indices, updates, name=None): """Applies sparse subtraction to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) op = ref.scatter_nd_sub(indices, updates) with tf.compat.v1.Session() as sess: print sess.run(op) ``` The resulting update to ref would look like this: [1, -9, 3, -6, -6, 6, 7, -4] See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered subtraction has completed. """ return gen_state_ops.scatter_nd_sub( self._variable, indices, updates, use_locking=True, name=name) def scatter_nd_add(self, indices, updates, name=None): """Applies sparse addition to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) add = ref.scatter_nd_add(indices, updates) with tf.compat.v1.Session() as sess: print sess.run(add) ``` The resulting update to ref would look like this: [1, 13, 3, 14, 14, 6, 7, 20] See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered addition has completed. """ return gen_state_ops.scatter_nd_add( self._variable, indices, updates, use_locking=True, name=name) def scatter_nd_update(self, indices, updates, name=None): """Applies sparse assignment to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) op = ref.scatter_nd_update(indices, updates) with tf.compat.v1.Session() as sess: print sess.run(op) ``` The resulting update to ref would look like this: [1, 11, 3, 10, 9, 6, 7, 12] See `tf.scatter_nd` for more details about how to make updates to slices. Args: indices: The indices to be used in the operation. updates: The values to be used in the operation. name: the name of the operation. Returns: A `Tensor` that will hold the new value of this variable after the scattered assignment has completed. """ return gen_state_ops.scatter_nd_update( self._variable, indices, updates, use_locking=True, name=name) def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask): return gen_array_ops.strided_slice_assign( ref=self._ref(), begin=begin, end=end, strides=strides, value=value, name=name, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) @deprecated(None, "Prefer Dataset.range instead.") def count_up_to(self, limit): """Increments this variable until it reaches `limit`. When that Op is run it tries to increment the variable by `1`. If incrementing the variable would bring it above `limit` then the Op raises the exception `OutOfRangeError`. If no error is raised, the Op outputs the value of the variable before the increment. This is essentially a shortcut for `count_up_to(self, limit)`. Args: limit: value at which incrementing the variable raises an error. Returns: A `Tensor` that will hold the variable value before the increment. If no other Op modifies this variable, the values produced will all be distinct. """ return state_ops.count_up_to(self._variable, limit=limit) # Conversion to tensor. @staticmethod def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name """Utility function for converting a Variable to a Tensor.""" _ = name if dtype and not dtype.is_compatible_with(v.dtype): raise ValueError( "Incompatible type conversion requested to type '%s' for variable " "of type '%s'" % (dtype.name, v.dtype.name)) if as_ref: return v._ref() # pylint: disable=protected-access else: return v.value() # NOTE(mrry): This enables the Variable's overloaded "right" binary # operators to run when the left operand is an ndarray, because it # accords the Variable class higher priority than an ndarray, or a # numpy matrix. # TODO(mrry): Convert this to using numpy's __numpy_ufunc__ # mechanism, which allows more control over how Variables interact # with ndarrays. __array_priority__ = 100 @property def name(self): """The name of this variable.""" return self._name @property def initializer(self): """The initializer operation for this variable.""" return self._initializer_op @property def device(self): """The device of this variable.""" return self._variable.device @property def dtype(self): """The `DType` of this variable.""" return self._variable.dtype @property def op(self): """The `Operation` of this variable.""" return self._variable.op @property def graph(self): """The `Graph` of this variable.""" return self._variable.graph @property def _distribute_strategy(self): """The `tf.distribute.Strategy` that this variable was created under.""" return None # Ref variables are never created inside a strategy. @property def shape(self): """The `TensorShape` of this variable. Returns: A `TensorShape`. """ return self._variable.get_shape() def to_proto(self, export_scope=None): """Converts a `Variable` to a `VariableDef` protocol buffer. Args: export_scope: Optional `string`. Name scope to remove. Returns: A `VariableDef` protocol buffer, or `None` if the `Variable` is not in the specified name scope. """ if (export_scope is None or self._variable.name.startswith(export_scope)): var_def = variable_pb2.VariableDef() var_def.variable_name = ops.strip_name_scope(self._variable.name, export_scope) if self._initial_value is not None: # For backwards compatibility. var_def.initial_value_name = ops.strip_name_scope( self._initial_value.name, export_scope) var_def.trainable = self.trainable var_def.synchronization = self.synchronization.value var_def.aggregation = self.aggregation.value var_def.initializer_name = ops.strip_name_scope(self.initializer.name, export_scope) var_def.snapshot_name = ops.strip_name_scope(self._snapshot.name, export_scope) if self._save_slice_info: var_def.save_slice_info_def.MergeFrom( self._save_slice_info.to_proto(export_scope=export_scope)) return var_def else: return None def __iadd__(self, other): logging.log_first_n( logging.WARN, "Variable += will be deprecated. Use variable.assign_add" " if you want assignment to the variable value or 'x = x + y'" " if you want a new python Tensor object.", 1) return self + other def __isub__(self, other): logging.log_first_n( logging.WARN, "Variable -= will be deprecated. Use variable.assign_sub" " if you want assignment to the variable value or 'x = x - y'" " if you want a new python Tensor object.", 1) return self - other def __imul__(self, other): logging.log_first_n( logging.WARN, "Variable *= will be deprecated. Use `var.assign(var * other)`" " if you want assignment to the variable value or `x = x * y`" " if you want a new python Tensor object.", 1) return self * other def __idiv__(self, other): logging.log_first_n( logging.WARN, "Variable /= will be deprecated. Use `var.assign(var / other)`" " if you want assignment to the variable value or `x = x / y`" " if you want a new python Tensor object.", 1) return self / other def __itruediv__(self, other): logging.log_first_n( logging.WARN, "Variable /= will be deprecated. Use `var.assign(var / other)`" " if you want assignment to the variable value or `x = x / y`" " if you want a new python Tensor object.", 1) return self / other def __irealdiv__(self, other): logging.log_first_n( logging.WARN, "Variable /= will be deprecated. Use `var.assign(var / other)`" " if you want assignment to the variable value or `x = x / y`" " if you want a new python Tensor object.", 1) return self / other def __ipow__(self, other): logging.log_first_n( logging.WARN, "Variable **= will be deprecated. Use `var.assign(var ** other)`" " if you want assignment to the variable value or `x = x ** y`" " if you want a new python Tensor object.", 1) return self**other def _try_guard_against_uninitialized_dependencies(name, initial_value): """Attempt to guard against dependencies on uninitialized variables. Replace references to variables in `initial_value` with references to the variable's initialized values. The initialized values are essentially conditional TensorFlow graphs that return a variable's value if it is initialized or its `initial_value` if it hasn't been initialized. This replacement is done on a best effort basis: - If the `initial_value` graph contains cycles, we don't do any replacements for that graph. - If the variables that `initial_value` depends on are not present in the `GLOBAL_VARIABLES` or `LOCAL_VARIABLES` we don't replace them. In these cases, it is up to the caller to ensure that the `initial_value` graph uses initialized variables or that they guard access to variables using their `initialized_value` method. Args: name: Variable name. initial_value: `Tensor`. The initial value. Returns: A `Tensor` suitable to initialize a variable. Raises: TypeError: If `initial_value` is not a `Tensor`. """ if not isinstance(initial_value, ops.Tensor): raise TypeError("initial_value needs to be a Tensor: %s" % initial_value) # Don't modify initial_value if it contains any cyclic dependencies. if _has_cycle(initial_value.op, state={}): return initial_value return _safe_initial_value_from_tensor(name, initial_value, op_cache={}) _UNKNOWN, _STARTED, _FINISHED = range(3) def _has_cycle(op, state): """Detect cycles in the dependencies of `initial_value`.""" op_state = state.get(op.name, _UNKNOWN) if op_state == _STARTED: return True elif op_state == _FINISHED: return False state[op.name] = _STARTED for i in itertools.chain((i.op for i in op.inputs), op.control_inputs): if _has_cycle(i, state): return True state[op.name] = _FINISHED return False def _safe_initial_value_from_tensor(name, tensor, op_cache): """Replace dependencies on variables with their initialized values. Args: name: Variable name. tensor: A `Tensor`. The tensor to replace. op_cache: A dict mapping operation names to `Operation`s. Used to memoize the results so as to avoid creating redundant operations. Returns: A `Tensor` compatible with `tensor`. Any inputs that lead to variable values will be replaced with a corresponding graph that uses the variable's initialized values. This is done on a best-effort basis. If no modifications need to be made then `tensor` will be returned unchanged. """ op = tensor.op new_op = op_cache.get(op.name) if new_op is None: new_op = _safe_initial_value_from_op(name, op, op_cache) op_cache[op.name] = new_op return new_op.outputs[tensor.value_index] def _safe_initial_value_from_op(name, op, op_cache): """Replace dependencies on variables with their initialized values. Args: name: Variable name. op: An `Operation`. The operation to replace. op_cache: A dict mapping operation names to `Operation`s. Used to memoize the results so as to avoid creating redundant operations. Returns: An `Operation` compatible with `op`. Any inputs that lead to variable values will be replaced with a corresponding graph that uses the variable's initialized values. This is done on a best-effort basis. If no modifications need to be made then `op` will be returned unchanged. """ op_type = op.node_def.op if op_type in ("IsVariableInitialized", "VarIsInitializedOp", "ReadVariableOp", "If"): return op # Attempt to find the initialized_value of any variable reference / handles. # TODO(b/70206927): Fix handling of ResourceVariables. if op_type in ("Variable", "VariableV2", "VarHandleOp"): initialized_value = _find_initialized_value_for_variable(op) return op if initialized_value is None else initialized_value.op # Recursively build initializer expressions for inputs. modified = False new_op_inputs = [] for op_input in op.inputs: new_op_input = _safe_initial_value_from_tensor(name, op_input, op_cache) new_op_inputs.append(new_op_input) modified = modified or (new_op_input != op_input) # If at least one input was modified, replace the op. if modified: new_op_type = op_type if new_op_type == "RefSwitch": new_op_type = "Switch" new_op_name = op.node_def.name + "_" + name new_op_name = new_op_name.replace(":", "_") return op.graph.create_op( new_op_type, new_op_inputs, op._output_types, # pylint: disable=protected-access name=new_op_name, attrs=op.node_def.attr) return op def _find_initialized_value_for_variable(variable_op): """Find the initialized value for a variable op. To do so, lookup the variable op in the variables collection. Args: variable_op: A variable `Operation`. Returns: A `Tensor` representing the initialized value for the variable or `None` if the initialized value could not be found. """ try: var_names = [variable_op.node_def.name, variable_op.node_def.name + ":0"] for collection_name in (ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES): for var in variable_op.graph.get_collection(collection_name): if var.name in var_names: return var.initialized_value() except AttributeError: # Return None when an incomplete user-defined variable type was put in # the collection. return None return None class PartitionedVariable(object): """A container for partitioned `Variable` objects. @compatibility(eager) `tf.PartitionedVariable` is not compatible with eager execution. Use `tf.Variable` instead which is compatible with both eager execution and graph construction. See [the TensorFlow Eager Execution guide](https://www.tensorflow.org/guide/eager#variables_and_optimizers) for details on how variables work in eager execution. @end_compatibility """ def __init__(self, name, shape, dtype, variable_list, partitions): """Creates a new partitioned variable wrapper. Variables passed via the variable_list must contain a save_slice_info field. Concatenation and iteration is in lexicographic order according to the var_offset property of the save_slice_info. Args: name: String. Overall name of the variables. shape: List of integers. Overall shape of the variables. dtype: Type of the variables. variable_list: List of `Variable` that comprise this partitioned variable. partitions: List of integers. Number of partitions for each dimension. Raises: TypeError: If `variable_list` is not a list of `Variable` objects, or `partitions` is not a list. ValueError: If `variable_list` is empty, or the `Variable` shape information does not match `shape`, or `partitions` has invalid values. """ if not isinstance(variable_list, (list, tuple)): raise TypeError("variable_list is not a list or tuple: %s" % variable_list) if not isinstance(partitions, (list, tuple)): raise TypeError("partitions is not a list or tuple: %s" % partitions) if not all(p >= 1 for p in partitions): raise ValueError("partition values must be positive: %s" % partitions) if not variable_list: raise ValueError("variable_list may not be empty") # pylint: disable=protected-access for v in variable_list: # Sort the variable_list lexicographically according to var offset value. if not all(v._get_save_slice_info() is not None for v in variable_list): raise ValueError( "All variables must have a save_slice_info available: %s" % [v.name for v in variable_list]) if len(shape) != len(partitions): raise ValueError("len(shape) != len(partitions): %s vs. %s" % (shape, partitions)) if v._get_save_slice_info().full_shape != shape: raise ValueError("All variables' full shapes must match shape: %s; " "but full shapes were: %s" % (shape, str([v._get_save_slice_info().full_shape]))) self._variable_list = sorted( variable_list, key=lambda v: v._get_save_slice_info().var_offset) # pylint: enable=protected-access self._name = name self._shape = shape self._dtype = dtype self._partitions = partitions self._as_tensor = None def __iter__(self): """Return an iterable for accessing the underlying partition Variables.""" return iter(self._variable_list) def __len__(self): num_partition_axes = len(self._partition_axes()) if num_partition_axes > 1: raise ValueError("Cannot get a length for %d > 1 partition axes" % num_partition_axes) return len(self._variable_list) def _partition_axes(self): if all(p == 1 for p in self._partitions): return [0] else: return [i for i, p in enumerate(self._partitions) if p > 1] def _concat(self): """Returns the overall concatenated value as a `Tensor`. This is different from using the partitioned variable directly as a tensor (through tensor conversion and `as_tensor`) in that it creates a new set of operations that keeps the control dependencies from its scope. Returns: `Tensor` containing the concatenated value. """ if len(self._variable_list) == 1: with ops.name_scope(None): return array_ops.identity(self._variable_list[0], name=self._name) partition_axes = self._partition_axes() if len(partition_axes) > 1: raise NotImplementedError( "Cannot concatenate along more than one dimension: %s. " "Multi-axis partition concat is not supported" % str(partition_axes)) partition_ix = partition_axes[0] with ops.name_scope(self._name + "/ConcatPartitions/"): concatenated = array_ops.concat(self._variable_list, partition_ix) with ops.name_scope(None): return array_ops.identity(concatenated, name=self._name) def as_tensor(self): """Returns the overall concatenated value as a `Tensor`. The returned tensor will not inherit the control dependencies from the scope where the value is used, which is similar to getting the value of `Variable`. Returns: `Tensor` containing the concatenated value. """ with ops.control_dependencies(None): return self._concat() @staticmethod def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name _ = name if dtype is not None and not dtype.is_compatible_with(v.dtype): raise ValueError( "Incompatible type conversion requested to type '%s' for variable " "of type '%s'" % (dtype.name, v.dtype.name)) if as_ref: raise NotImplementedError( "PartitionedVariable doesn't support being used as a reference.") else: return v.as_tensor() @property def name(self): return self._name @property def dtype(self): return self._dtype @property def shape(self): return self.get_shape() @property def _distribute_strategy(self): """The `tf.distribute.Strategy` that this variable was created under.""" # NOTE(yuefengz): Today, no partitioned variables in a distribute strategy. return None def get_shape(self): return self._shape def _get_variable_list(self): return self._variable_list def _get_partitions(self): return self._partitions def _apply_assign_fn(self, assign_fn, value): partition_axes = self._partition_axes() if len(partition_axes) > 1: raise NotImplementedError( "Cannot do assign action along more than one dimension: %s. " "Multi-axis partition assign action is not supported " % str(partition_axes)) if isinstance(value, list): assert len(value) == len(self._variable_list) value_list = value elif isinstance(value, PartitionedVariable): value_list = [var_part for var_part in value] else: partition_ix = partition_axes[0] size_splits_list = [ tensor_shape.dimension_value(var.shape[partition_ix]) for var in self._variable_list ] value_list = array_ops.split(value, size_splits_list, axis=partition_ix) op_list = [ assign_fn(var, value_list[idx]) for idx, var in enumerate(self._variable_list) ] return op_list def assign(self, value, use_locking=False, name=None, read_value=True): assign_fn = lambda var, r_value: var.assign( r_value, use_locking=use_locking, name=name, read_value=read_value) assign_list = self._apply_assign_fn(assign_fn, value) if read_value: return assign_list return [assign.op for assign in assign_list] def assign_add(self, value, use_locking=False, name=None, read_value=True): assign_fn = lambda var, r_value: var.assign_add( r_value, use_locking=use_locking, name=name, read_value=read_value) assign_list = self._apply_assign_fn(assign_fn, value) if read_value: return assign_list return [assign.op for assign in assign_list] def assign_sub(self, value, use_locking=False, name=None, read_value=True): assign_fn = lambda var, r_value: var.assign_sub( r_value, use_locking=use_locking, name=name, read_value=read_value) assign_list = self._apply_assign_fn(assign_fn, value) if read_value: return assign_list return [assign.op for assign in assign_list] # Register a conversion function which reads the value of the variable, # allowing instances of the class to be used as tensors. ops.register_tensor_conversion_function(RefVariable, RefVariable._TensorConversionFunction) # pylint: disable=protected-access ops.register_dense_tensor_like_type(RefVariable) @tf_export(v1=["global_variables"]) def global_variables(scope=None): """Returns global variables. Global variables are variables that are shared across machines in a distributed environment. The `Variable()` constructor or `get_variable()` automatically adds new variables to the graph collection `GraphKeys.GLOBAL_VARIABLES`. This convenience function returns the contents of that collection. An alternative to global variables are local variables. See `tf.compat.v1.local_variables` Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of `Variable` objects. """ return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope) @tf_export(v1=["all_variables"]) @deprecated("2017-03-02", "Please use tf.global_variables instead.") def all_variables(): """Use `tf.compat.v1.global_variables` instead.""" return global_variables() def _all_saveable_objects(scope=None): """Returns all variables and `SaveableObject`s that must be checkpointed. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of `Variable` and `SaveableObject` to be checkpointed """ # TODO(andreasst): make this function public once things are settled. return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope) + ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS, scope)) @tf_export(v1=["local_variables"]) def local_variables(scope=None): """Returns local variables. Local variables - per process variables, usually not saved/restored to checkpoint and used for temporary or intermediate values. For example, they can be used as counters for metrics computation or number of epochs this machine has read data. The `tf.contrib.framework.local_variable()` function automatically adds the new variable to `GraphKeys.LOCAL_VARIABLES`. This convenience function returns the contents of that collection. An alternative to local variables are global variables. See `tf.compat.v1.global_variables` Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of local `Variable` objects. """ return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, scope) @tf_export(v1=["model_variables"]) def model_variables(scope=None): """Returns all variables in the MODEL_VARIABLES collection. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of local Variable objects. """ return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES, scope) @tf_export(v1=["trainable_variables"]) def trainable_variables(scope=None): """Returns all variables created with `trainable=True`. When passed `trainable=True`, the `Variable()` constructor automatically adds new variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the contents of that collection. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of Variable objects. """ return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope) @tf_export(v1=["moving_average_variables"]) def moving_average_variables(scope=None): """Returns all variables that maintain their moving averages. If an `ExponentialMovingAverage` object is created and the `apply()` method is called on a list of variables, these variables will be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection. This convenience function returns the contents of that collection. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of Variable objects. """ return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, scope) @tf_export(v1=["initializers.variables", "variables_initializer"]) def variables_initializer(var_list, name="init"): """Returns an Op that initializes a list of variables. After you launch the graph in a session, you can run the returned Op to initialize all the variables in `var_list`. This Op runs all the initializers of the variables in `var_list` in parallel. Calling `initialize_variables()` is equivalent to passing the list of initializers to `Group()`. If `var_list` is empty, however, the function still returns an Op that can be run. That Op just has no effect. Args: var_list: List of `Variable` objects to initialize. name: Optional name for the returned operation. Returns: An Op that run the initializers of all the specified variables. """ if var_list and not context.executing_eagerly(): return control_flow_ops.group(*[v.initializer for v in var_list], name=name) return control_flow_ops.no_op(name=name) @tf_export(v1=["initialize_variables"]) @tf_should_use.should_use_result @deprecated("2017-03-02", "Use `tf.variables_initializer` instead.") def initialize_variables(var_list, name="init"): """See `tf.compat.v1.variables_initializer`.""" return variables_initializer(var_list, name=name) @tf_export(v1=["initializers.global_variables", "global_variables_initializer"]) def global_variables_initializer(): """Returns an Op that initializes global variables. This is just a shortcut for `variables_initializer(global_variables())` Returns: An Op that initializes global variables in the graph. """ if context.executing_eagerly(): return control_flow_ops.no_op(name="global_variables_initializer") return variables_initializer(global_variables()) @tf_export(v1=["initialize_all_variables"]) @tf_should_use.should_use_result @deprecated("2017-03-02", "Use `tf.global_variables_initializer` instead.") def initialize_all_variables(): """See `tf.compat.v1.global_variables_initializer`.""" return global_variables_initializer() @tf_export(v1=["initializers.local_variables", "local_variables_initializer"]) def local_variables_initializer(): """Returns an Op that initializes all local variables. This is just a shortcut for `variables_initializer(local_variables())` Returns: An Op that initializes all local variables in the graph. """ if context.executing_eagerly(): return control_flow_ops.no_op(name="local_variables_initializer") return variables_initializer(local_variables()) @tf_export(v1=["initialize_local_variables"]) @tf_should_use.should_use_result @deprecated("2017-03-02", "Use `tf.local_variables_initializer` instead.") def initialize_local_variables(): """See `tf.compat.v1.local_variables_initializer`.""" return local_variables_initializer() @tf_export(v1=["is_variable_initialized"]) @tf_should_use.should_use_result def is_variable_initialized(variable): """Tests if a variable has been initialized. Args: variable: A `Variable`. Returns: Returns a scalar boolean Tensor, `True` if the variable has been initialized, `False` otherwise. """ return state_ops.is_variable_initialized(variable) @tf_export(v1=["assert_variables_initialized"]) @tf_should_use.should_use_result def assert_variables_initialized(var_list=None): """Returns an Op to check if variables are initialized. NOTE: This function is obsolete and will be removed in 6 months. Please change your implementation to use `report_uninitialized_variables()`. When run, the returned Op will raise the exception `FailedPreconditionError` if any of the variables has not yet been initialized. Note: This function is implemented by trying to fetch the values of the variables. If one of the variables is not initialized a message may be logged by the C++ runtime. This is expected. Args: var_list: List of `Variable` objects to check. Defaults to the value of `global_variables().` Returns: An Op, or None if there are no variables. """ if var_list is None: var_list = global_variables() + local_variables() # Backwards compatibility for old-style variables. TODO(touts): remove. if not var_list: var_list = [] for op in ops.get_default_graph().get_operations(): if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]: var_list.append(op.outputs[0]) if not var_list: return None else: ranks = [] for var in var_list: with ops.colocate_with(var.op): ranks.append(array_ops.rank_internal(var, optimize=False)) if len(ranks) == 1: return ranks[0] else: return array_ops.stack(ranks) @tf_export(v1=["report_uninitialized_variables"]) @tf_should_use.should_use_result def report_uninitialized_variables(var_list=None, name="report_uninitialized_variables"): """Adds ops to list the names of uninitialized variables. When run, it returns a 1-D tensor containing the names of uninitialized variables if there are any, or an empty array if there are none. Args: var_list: List of `Variable` objects to check. Defaults to the value of `global_variables() + local_variables()` name: Optional name of the `Operation`. Returns: A 1-D tensor containing names of the uninitialized variables, or an empty 1-D tensor if there are no variables or no uninitialized variables. """ if var_list is None: var_list = global_variables() + local_variables() # Backwards compatibility for old-style variables. TODO(touts): remove. if not var_list: var_list = [] for op in ops.get_default_graph().get_operations(): if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]: var_list.append(op.outputs[0]) with ops.name_scope(name): # Run all operations on CPU if var_list: init_vars = [state_ops.is_variable_initialized(v) for v in var_list] local_device = os.environ.get( "TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING", "/cpu:0") with ops.device(local_device): if not var_list: # Return an empty tensor so we only need to check for returned tensor # size being 0 as an indication of model ready. return array_ops.constant([], dtype=dtypes.string) else: # Get a 1-D boolean tensor listing whether each variable is initialized. variables_mask = math_ops.logical_not(array_ops.stack(init_vars)) # Get a 1-D string tensor containing all the variable names. variable_names_tensor = array_ops.constant( [s.op.name for s in var_list]) # Return a 1-D tensor containing all the names of # uninitialized variables. return array_ops.boolean_mask(variable_names_tensor, variables_mask) ops.register_tensor_conversion_function( PartitionedVariable, PartitionedVariable._TensorConversionFunction) # pylint: disable=protected-access
codeparrot/github-code-clean
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for compute service.""" import base64 import contextlib import datetime import operator import sys import time import traceback import uuid from eventlet import greenthread import mock from mox3 import mox from neutronclient.common import exceptions as neutron_exceptions from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import units from oslo_utils import uuidutils import six import testtools from testtools import matchers as testtools_matchers import nova from nova import availability_zones from nova import block_device from nova import compute from nova.compute import api as compute_api from nova.compute import arch from nova.compute import flavors from nova.compute import manager as compute_manager from nova.compute import power_state from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import manager as conductor_manager from nova.console import type as ctype from nova import context from nova import db from nova import exception from nova.image import api as image_api from nova.image import glance from nova.network import api as network_api from nova.network import model as network_model from nova.network.security_group import openstack_driver from nova import objects from nova.objects import block_device as block_device_obj from nova.objects import instance as instance_obj from nova import policy from nova import quota from nova.scheduler import client as scheduler_client from nova import test from nova.tests import fixtures from nova.tests.unit.compute import eventlet_utils from nova.tests.unit.compute import fake_resource_tracker from nova.tests.unit.db import fakes as db_fakes from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import fake_network from nova.tests.unit import fake_network_cache_model from nova.tests.unit import fake_notifier from nova.tests.unit import fake_server_actions from nova.tests.unit.image import fake as fake_image from nova.tests.unit import matchers from nova.tests.unit.objects import test_flavor from nova.tests.unit.objects import test_migration from nova.tests.unit import utils as test_utils from nova import utils from nova.virt import block_device as driver_block_device from nova.virt import event from nova.virt import fake from nova.virt import hardware from nova.volume import cinder QUOTAS = quota.QUOTAS LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('compute_manager', 'nova.service') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('live_migration_retry_count', 'nova.compute.manager') CONF.import_opt('default_ephemeral_format', 'nova.virt.driver') FAKE_IMAGE_REF = 'fake-image-ref' NODENAME = 'fakenode1' def fake_not_implemented(*args, **kwargs): raise NotImplementedError() def get_primitive_instance_by_uuid(context, instance_uuid): """Helper method to get an instance and then convert it to a primitive form using jsonutils. """ instance = db.instance_get_by_uuid(context, instance_uuid) return jsonutils.to_primitive(instance) def unify_instance(instance): """Return a dict-like instance for both object-initiated and model-initiated sources that can reasonably be compared. """ newdict = dict() for k, v in six.iteritems(instance): if isinstance(v, datetime.datetime): # NOTE(danms): DB models and Instance objects have different # timezone expectations v = v.replace(tzinfo=None) elif k == 'fault': # NOTE(danms): DB models don't have 'fault' continue elif k == 'pci_devices': # NOTE(yonlig.he) pci devices need lazy loading # fake db does not support it yet. continue newdict[k] = v return newdict class FakeComputeTaskAPI(object): def resize_instance(self, context, instance, extra_instance_updates, scheduler_hint, flavor, reservations): pass class BaseTestCase(test.TestCase): def setUp(self): super(BaseTestCase, self).setUp() self.flags(network_manager='nova.network.manager.FlatManager') fake.set_nodes([NODENAME]) self.flags(use_local=True, group='conductor') fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) self.compute = importutils.import_object(CONF.compute_manager) # execute power syncing synchronously for testing: self.compute._sync_power_pool = eventlet_utils.SyncPool() # override tracker with a version that doesn't need the database: fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host, self.compute.driver, NODENAME) self.compute._resource_tracker_dict[NODENAME] = fake_rt def fake_get_compute_nodes_in_db(context, use_slave=False): fake_compute_nodes = [{'local_gb': 259, 'vcpus_used': 0, 'deleted': 0, 'hypervisor_type': 'powervm', 'created_at': '2013-04-01T00:27:06.000000', 'local_gb_used': 0, 'updated_at': '2013-04-03T00:35:41.000000', 'hypervisor_hostname': 'fake_phyp1', 'memory_mb_used': 512, 'memory_mb': 131072, 'current_workload': 0, 'vcpus': 16, 'cpu_info': 'ppc64,powervm,3940', 'running_vms': 0, 'free_disk_gb': 259, 'service_id': 7, 'hypervisor_version': 7, 'disk_available_least': 265856, 'deleted_at': None, 'free_ram_mb': 130560, 'metrics': '', 'stats': '', 'numa_topology': '', 'id': 2, 'host': 'fake_phyp1', 'host_ip': '127.0.0.1'}] return [objects.ComputeNode._from_db_object( context, objects.ComputeNode(), cn) for cn in fake_compute_nodes] def fake_compute_node_delete(context, compute_node_id): self.assertEqual(2, compute_node_id) self.stubs.Set(self.compute, '_get_compute_nodes_in_db', fake_get_compute_nodes_in_db) self.stubs.Set(db, 'compute_node_delete', fake_compute_node_delete) self.compute.update_available_resource( context.get_admin_context()) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) self.none_quotas = objects.Quotas.from_reservations( self.context, None) def fake_show(meh, context, id, **kwargs): if id: return {'id': id, 'min_disk': None, 'min_ram': None, 'name': 'fake_name', 'status': 'active', 'properties': {'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'something_else': 'meow'}} else: raise exception.ImageNotFound(image_id=id) fake_image.stub_out_image_service(self.stubs) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) fake_taskapi = FakeComputeTaskAPI() self.stubs.Set(self.compute, 'compute_task_api', fake_taskapi) fake_network.set_stub_network_methods(self.stubs) fake_server_actions.stub_out_action_events(self.stubs) def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs): return network_model.NetworkInfo() self.stubs.Set(network_api.API, 'get_instance_nw_info', fake_get_nw_info) def fake_allocate_for_instance(cls, ctxt, instance, *args, **kwargs): self.assertFalse(ctxt.is_admin) return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1) self.stubs.Set(network_api.API, 'allocate_for_instance', fake_allocate_for_instance) self.compute_api = compute.API() # Just to make long lines short self.rt = self.compute._get_resource_tracker(NODENAME) def tearDown(self): timeutils.clear_time_override() ctxt = context.get_admin_context() fake_image.FakeImageService_reset() instances = db.instance_get_all(ctxt) for instance in instances: db.instance_destroy(ctxt, instance['uuid']) fake.restore_nodes() super(BaseTestCase, self).tearDown() def _fake_instance(self, updates): return fake_instance.fake_instance_obj(None, **updates) def _create_fake_instance_obj(self, params=None, type_name='m1.tiny', services=False, context=None): flavor = flavors.get_flavor_by_name(type_name) inst = objects.Instance(context=context or self.context) inst.vm_state = vm_states.ACTIVE inst.task_state = None inst.power_state = power_state.RUNNING inst.image_ref = FAKE_IMAGE_REF inst.reservation_id = 'r-fakeres' inst.user_id = self.user_id inst.project_id = self.project_id inst.host = 'fake_host' inst.node = NODENAME inst.instance_type_id = flavor.id inst.ami_launch_index = 0 inst.memory_mb = 0 inst.vcpus = 0 inst.root_gb = 0 inst.ephemeral_gb = 0 inst.architecture = arch.X86_64 inst.os_type = 'Linux' inst.system_metadata = ( params and params.get('system_metadata', {}) or {}) inst.locked = False inst.created_at = timeutils.utcnow() inst.updated_at = timeutils.utcnow() inst.launched_at = timeutils.utcnow() inst.security_groups = objects.SecurityGroupList(objects=[]) flavors.save_flavor_info(inst.system_metadata, flavor) if params: inst.update(params) if services: _create_service_entries(self.context.elevated(), [['fake_zone', [inst.host]]]) inst.create() return inst def _create_instance_type(self, params=None): """Create a test instance type.""" if not params: params = {} context = self.context.elevated() inst = {} inst['name'] = 'm1.small' inst['memory_mb'] = 1024 inst['vcpus'] = 1 inst['root_gb'] = 20 inst['ephemeral_gb'] = 10 inst['flavorid'] = '1' inst['swap'] = 2048 inst['rxtx_factor'] = 1 inst.update(params) return db.flavor_create(context, inst)['id'] def _create_group(self): values = {'name': 'testgroup', 'description': 'testgroup', 'user_id': self.user_id, 'project_id': self.project_id} return db.security_group_create(self.context, values) def _stub_migrate_server(self): def _fake_migrate_server(*args, **kwargs): pass self.stubs.Set(conductor_manager.ComputeTaskManager, 'migrate_server', _fake_migrate_server) def _init_aggregate_with_host(self, aggr, aggr_name, zone, host): if not aggr: aggr = self.api.create_aggregate(self.context, aggr_name, zone) aggr = self.api.add_host_to_aggregate(self.context, aggr['id'], host) return aggr class ComputeVolumeTestCase(BaseTestCase): def setUp(self): super(ComputeVolumeTestCase, self).setUp() self.volume_id = 'fake' self.fetched_attempts = 0 self.instance = { 'id': 'fake', 'uuid': 'fake', 'name': 'fake', 'root_device_name': '/dev/vda', } self.fake_volume = fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': self.volume_id, 'device_name': '/dev/vdb'}) self.instance_object = objects.Instance._from_db_object( self.context, objects.Instance(), fake_instance.fake_db_instance()) self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw: {'id': self.volume_id, 'attach_status': 'detached'}) self.stubs.Set(self.compute.driver, 'get_volume_connector', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'initialize_connection', lambda *a, **kw: {}) self.stubs.Set(self.compute.volume_api, 'terminate_connection', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'attach', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'detach', lambda *a, **kw: None) self.stubs.Set(self.compute.volume_api, 'check_attach', lambda *a, **kw: None) self.stubs.Set(greenthread, 'sleep', lambda *a, **kw: None) def store_cinfo(context, *args, **kwargs): self.cinfo = jsonutils.loads(args[-1].get('connection_info')) return self.fake_volume self.stubs.Set(db, 'block_device_mapping_create', store_cinfo) self.stubs.Set(db, 'block_device_mapping_update', store_cinfo) def test_attach_volume_serial(self): fake_bdm = objects.BlockDeviceMapping(context=self.context, **self.fake_volume) with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata', return_value={})): instance = self._create_fake_instance_obj() self.compute.attach_volume(self.context, instance, bdm=fake_bdm) self.assertEqual(self.cinfo.get('serial'), self.volume_id) def test_attach_volume_raises(self): fake_bdm = objects.BlockDeviceMapping(**self.fake_volume) instance = self._create_fake_instance_obj() def fake_attach(*args, **kwargs): raise test.TestingException with contextlib.nested( mock.patch.object(driver_block_device.DriverVolumeBlockDevice, 'attach'), mock.patch.object(cinder.API, 'unreserve_volume'), mock.patch.object(objects.BlockDeviceMapping, 'destroy') ) as (mock_attach, mock_unreserve, mock_destroy): mock_attach.side_effect = fake_attach self.assertRaises( test.TestingException, self.compute.attach_volume, self.context, instance, fake_bdm) self.assertTrue(mock_unreserve.called) self.assertTrue(mock_destroy.called) def test_detach_volume_api_raises(self): fake_bdm = objects.BlockDeviceMapping(**self.fake_volume) instance = self._create_fake_instance_obj() with contextlib.nested( mock.patch.object(self.compute, '_detach_volume'), mock.patch.object(self.compute.volume_api, 'detach'), mock.patch.object(objects.BlockDeviceMapping, 'get_by_volume_id'), mock.patch.object(fake_bdm, 'destroy') ) as (mock_internal_detach, mock_detach, mock_get, mock_destroy): mock_detach.side_effect = test.TestingException mock_get.return_value = fake_bdm self.assertRaises( test.TestingException, self.compute.detach_volume, self.context, 'fake', instance) mock_internal_detach.assert_called_once_with(self.context, instance, fake_bdm) self.assertTrue(mock_destroy.called) def test_await_block_device_created_too_slow(self): self.flags(block_device_allocate_retries=2) self.flags(block_device_allocate_retries_interval=0.1) def never_get(context, vol_id): return { 'status': 'creating', 'id': 'blah', } self.stubs.Set(self.compute.volume_api, 'get', never_get) self.assertRaises(exception.VolumeNotCreated, self.compute._await_block_device_map_created, self.context, '1') def test_await_block_device_created_failed(self): c = self.compute fake_result = {'status': 'error', 'id': 'blah'} with mock.patch.object(c.volume_api, 'get', return_value=fake_result) as fake_get: self.assertRaises(exception.VolumeNotCreated, c._await_block_device_map_created, self.context, '1') fake_get.assert_called_once_with(self.context, '1') def test_await_block_device_created_slow(self): c = self.compute self.flags(block_device_allocate_retries=4) self.flags(block_device_allocate_retries_interval=0.1) def slow_get(context, vol_id): if self.fetched_attempts < 2: self.fetched_attempts += 1 return { 'status': 'creating', 'id': 'blah', } return { 'status': 'available', 'id': 'blah', } self.stubs.Set(c.volume_api, 'get', slow_get) attempts = c._await_block_device_map_created(self.context, '1') self.assertEqual(attempts, 3) def test_await_block_device_created_retries_negative(self): c = self.compute self.flags(block_device_allocate_retries=-1) self.flags(block_device_allocate_retries_interval=0.1) def volume_get(context, vol_id): return { 'status': 'available', 'id': 'blah', } self.stubs.Set(c.volume_api, 'get', volume_get) attempts = c._await_block_device_map_created(self.context, '1') self.assertEqual(1, attempts) def test_await_block_device_created_retries_zero(self): c = self.compute self.flags(block_device_allocate_retries=0) self.flags(block_device_allocate_retries_interval=0.1) def volume_get(context, vol_id): return { 'status': 'available', 'id': 'blah', } self.stubs.Set(c.volume_api, 'get', volume_get) attempts = c._await_block_device_map_created(self.context, '1') self.assertEqual(1, attempts) def test_boot_volume_serial(self): with ( mock.patch.object(objects.BlockDeviceMapping, 'save') ) as mock_save: block_device_mapping = [ block_device.BlockDeviceDict({ 'id': 1, 'no_device': None, 'source_type': 'volume', 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': self.volume_id, 'device_name': '/dev/vdb', 'delete_on_termination': False, })] prepped_bdm = self.compute._prep_block_device( self.context, self.instance_object, block_device_mapping) self.assertEqual(2, mock_save.call_count) volume_driver_bdm = prepped_bdm['block_device_mapping'][0] self.assertEqual(volume_driver_bdm['connection_info']['serial'], self.volume_id) def test_boot_volume_metadata(self, metadata=True): def volume_api_get(*args, **kwargs): if metadata: return { 'size': 1, 'volume_image_metadata': {'vol_test_key': 'vol_test_value', 'min_ram': u'128', 'min_disk': u'256', 'size': u'536870912' }, } else: return {} self.stubs.Set(self.compute_api.volume_api, 'get', volume_api_get) expected_no_metadata = {'min_disk': 0, 'min_ram': 0, 'properties': {}, 'size': 0, 'status': 'active'} block_device_mapping = [{ 'id': 1, 'device_name': 'vda', 'no_device': None, 'virtual_name': None, 'snapshot_id': None, 'volume_id': self.volume_id, 'delete_on_termination': False, }] image_meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping) if metadata: self.assertEqual(image_meta['properties']['vol_test_key'], 'vol_test_value') self.assertEqual(128, image_meta['min_ram']) self.assertEqual(256, image_meta['min_disk']) self.assertEqual(units.Gi, image_meta['size']) else: self.assertEqual(expected_no_metadata, image_meta) # Test it with new-style BDMs block_device_mapping = [{ 'boot_index': 0, 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': self.volume_id, 'delete_on_termination': False, }] image_meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping, legacy_bdm=False) if metadata: self.assertEqual(image_meta['properties']['vol_test_key'], 'vol_test_value') self.assertEqual(128, image_meta['min_ram']) self.assertEqual(256, image_meta['min_disk']) self.assertEqual(units.Gi, image_meta['size']) else: self.assertEqual(expected_no_metadata, image_meta) def test_boot_volume_no_metadata(self): self.test_boot_volume_metadata(metadata=False) def test_boot_image_metadata(self, metadata=True): def image_api_get(*args, **kwargs): if metadata: return { 'properties': {'img_test_key': 'img_test_value'} } else: return {} self.stubs.Set(self.compute_api.image_api, 'get', image_api_get) block_device_mapping = [{ 'boot_index': 0, 'source_type': 'image', 'destination_type': 'local', 'image_id': "fake-image", 'delete_on_termination': True, }] image_meta = self.compute_api._get_bdm_image_metadata( self.context, block_device_mapping, legacy_bdm=False) if metadata: self.assertEqual('img_test_value', image_meta['properties']['img_test_key']) else: self.assertEqual(image_meta, {}) def test_boot_image_no_metadata(self): self.test_boot_image_metadata(metadata=False) def test_poll_bandwidth_usage_not_implemented(self): ctxt = context.get_admin_context() self.mox.StubOutWithMock(self.compute.driver, 'get_all_bw_counters') self.mox.StubOutWithMock(utils, 'last_completed_audit_period') self.mox.StubOutWithMock(time, 'time') self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host') # Following methods will be called utils.last_completed_audit_period().AndReturn((0, 0)) time.time().AndReturn(10) # Note - time called two more times from Log time.time().AndReturn(20) time.time().AndReturn(21) objects.InstanceList.get_by_host(ctxt, 'fake-mini', use_slave=True).AndReturn([]) self.compute.driver.get_all_bw_counters([]).AndRaise( NotImplementedError) self.mox.ReplayAll() self.flags(bandwidth_poll_interval=1) self.compute._poll_bandwidth_usage(ctxt) # A second call won't call the stubs again as the bandwidth # poll is now disabled self.compute._poll_bandwidth_usage(ctxt) self.mox.UnsetStubs() @mock.patch.object(objects.InstanceList, 'get_by_host') @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host): fake_instance = mock.Mock(uuid='fake-instance-uuid') mock_get_by_host.return_value = [fake_instance] volume_bdm = mock.Mock(id=1, is_volume=True) not_volume_bdm = mock.Mock(id=2, is_volume=False) mock_get_by_inst.return_value = [volume_bdm, not_volume_bdm] expected_host_bdms = [{'instance': fake_instance, 'instance_bdms': [volume_bdm]}] got_host_bdms = self.compute._get_host_volume_bdms('fake-context') mock_get_by_host.assert_called_once_with('fake-context', self.compute.host, use_slave=False) mock_get_by_inst.assert_called_once_with('fake-context', 'fake-instance-uuid', use_slave=False) self.assertEqual(expected_host_bdms, got_host_bdms) def test_poll_volume_usage_disabled(self): ctxt = 'MockContext' self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(utils, 'last_completed_audit_period') # None of the mocks should be called. self.mox.ReplayAll() self.flags(volume_usage_poll_interval=0) self.compute._poll_volume_usage(ctxt) self.mox.UnsetStubs() def test_poll_volume_usage_returns_no_vols(self): ctxt = 'MockContext' self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(utils, 'last_completed_audit_period') self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage') # Following methods are called. utils.last_completed_audit_period().AndReturn((0, 0)) self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([]) self.mox.ReplayAll() self.flags(volume_usage_poll_interval=10) self.compute._poll_volume_usage(ctxt) self.mox.UnsetStubs() def test_poll_volume_usage_with_data(self): ctxt = 'MockContext' self.mox.StubOutWithMock(utils, 'last_completed_audit_period') self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(self.compute, '_update_volume_usage_cache') self.stubs.Set(self.compute.driver, 'get_all_volume_usage', lambda x, y: [3, 4]) # All the mocks are called utils.last_completed_audit_period().AndReturn((10, 20)) self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([1, 2]) self.compute._update_volume_usage_cache(ctxt, [3, 4]) self.mox.ReplayAll() self.flags(volume_usage_poll_interval=10) self.compute._poll_volume_usage(ctxt) self.mox.UnsetStubs() def test_detach_volume_usage(self): # Test that detach volume update the volume usage cache table correctly instance = self._create_fake_instance_obj() bdm = objects.BlockDeviceMapping(context=self.context, id=1, device_name='/dev/vdb', connection_info='{}', instance_uuid=instance['uuid'], source_type='volume', destination_type='volume', no_device=False, disk_bus='foo', device_type='disk', volume_id=1) host_volume_bdms = {'id': 1, 'device_name': '/dev/vdb', 'connection_info': '{}', 'instance_uuid': instance['uuid'], 'volume_id': 1} self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'get_by_volume_id') self.mox.StubOutWithMock(self.compute.driver, 'block_stats') self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage') # The following methods will be called objects.BlockDeviceMapping.get_by_volume_id(self.context, 1).AndReturn( bdm.obj_clone()) self.compute.driver.block_stats(instance, 'vdb').\ AndReturn([1, 30, 1, 20, None]) self.compute._get_host_volume_bdms(self.context, use_slave=True).AndReturn( host_volume_bdms) self.compute.driver.get_all_volume_usage( self.context, host_volume_bdms).AndReturn( [{'volume': 1, 'rd_req': 1, 'rd_bytes': 10, 'wr_req': 1, 'wr_bytes': 5, 'instance': instance}]) self.mox.ReplayAll() def fake_get_volume_encryption_metadata(self, context, volume_id): return {} self.stubs.Set(cinder.API, 'get_volume_encryption_metadata', fake_get_volume_encryption_metadata) self.compute.attach_volume(self.context, instance, bdm) # Poll volume usage & then detach the volume. This will update the # total fields in the volume usage cache. self.flags(volume_usage_poll_interval=10) self.compute._poll_volume_usage(self.context) # Check that a volume.usage and volume.attach notification was sent self.assertEqual(2, len(fake_notifier.NOTIFICATIONS)) self.compute.detach_volume(self.context, 1, instance) # Check that volume.attach, 2 volume.usage, and volume.detach # notifications were sent self.assertEqual(4, len(fake_notifier.NOTIFICATIONS)) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual('compute.instance.volume.attach', msg.event_type) msg = fake_notifier.NOTIFICATIONS[2] self.assertEqual('volume.usage', msg.event_type) payload = msg.payload self.assertEqual(instance['uuid'], payload['instance_id']) self.assertEqual('fake', payload['user_id']) self.assertEqual('fake', payload['tenant_id']) self.assertEqual(1, payload['reads']) self.assertEqual(30, payload['read_bytes']) self.assertEqual(1, payload['writes']) self.assertEqual(20, payload['write_bytes']) self.assertIsNone(payload['availability_zone']) msg = fake_notifier.NOTIFICATIONS[3] self.assertEqual('compute.instance.volume.detach', msg.event_type) # Check the database for the volume_usages = db.vol_get_usage_by_time(self.context, 0) self.assertEqual(1, len(volume_usages)) volume_usage = volume_usages[0] self.assertEqual(0, volume_usage['curr_reads']) self.assertEqual(0, volume_usage['curr_read_bytes']) self.assertEqual(0, volume_usage['curr_writes']) self.assertEqual(0, volume_usage['curr_write_bytes']) self.assertEqual(1, volume_usage['tot_reads']) self.assertEqual(30, volume_usage['tot_read_bytes']) self.assertEqual(1, volume_usage['tot_writes']) self.assertEqual(20, volume_usage['tot_write_bytes']) def test_prepare_image_mapping(self): swap_size = 1 ephemeral_size = 1 instance_type = {'swap': swap_size, 'ephemeral_gb': ephemeral_size} mappings = [ {'virtual': 'ami', 'device': 'sda1'}, {'virtual': 'root', 'device': '/dev/sda1'}, {'virtual': 'swap', 'device': 'sdb4'}, {'virtual': 'ephemeral0', 'device': 'sdc1'}, {'virtual': 'ephemeral1', 'device': 'sdc2'}, ] preped_bdm = self.compute_api._prepare_image_mapping( instance_type, mappings) expected_result = [ { 'device_name': '/dev/sdb4', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': swap_size }, { 'device_name': '/dev/sdc1', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': CONF.default_ephemeral_format, 'boot_index': -1, 'volume_size': ephemeral_size }, { 'device_name': '/dev/sdc2', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': CONF.default_ephemeral_format, 'boot_index': -1, 'volume_size': ephemeral_size } ] for expected, got in zip(expected_result, preped_bdm): self.assertThat(expected, matchers.IsSubDictOf(got)) def test_validate_bdm(self): def fake_get(self, context, res_id): return {'id': res_id} def fake_check_attach(*args, **kwargs): pass self.stubs.Set(cinder.API, 'get', fake_get) self.stubs.Set(cinder.API, 'get_snapshot', fake_get) self.stubs.Set(cinder.API, 'check_attach', fake_check_attach) volume_id = '55555555-aaaa-bbbb-cccc-555555555555' snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555' image_id = '77777777-aaaa-bbbb-cccc-555555555555' instance = self._create_fake_instance_obj() instance_type = {'swap': 1, 'ephemeral_gb': 2} mappings = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sdb4', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': 1 }, anon=True), fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'device_type': 'disk', 'volume_id': volume_id, 'guest_format': None, 'boot_index': 1, 'volume_size': 6 }, anon=True), fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': snapshot_id, 'device_type': 'disk', 'guest_format': None, 'boot_index': 0, 'volume_size': 4 }, anon=True), fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda3', 'source_type': 'image', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': None, 'boot_index': 2, 'volume_size': 1 }, anon=True) ] mappings = block_device_obj.block_device_make_list_from_dicts( self.context, mappings) # Make sure it passes at first self.compute_api._validate_bdm(self.context, instance, instance_type, mappings) # Boot sequence mappings[2].boot_index = 2 self.assertRaises(exception.InvalidBDMBootSequence, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings) mappings[2].boot_index = 0 # number of local block_devices self.flags(max_local_block_devices=1) self.assertRaises(exception.InvalidBDMLocalsLimit, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings) ephemerals = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'volume_id': volume_id, 'guest_format': None, 'boot_index': -1, 'volume_size': 1 }, anon=True), fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/vdc', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'volume_id': volume_id, 'guest_format': None, 'boot_index': -1, 'volume_size': 1 }, anon=True) ] ephemerals = block_device_obj.block_device_make_list_from_dicts( self.context, ephemerals) self.flags(max_local_block_devices=4) # More ephemerals are OK as long as they are not over the size limit mappings_ = mappings[:] mappings_.objects.extend(ephemerals) self.compute_api._validate_bdm(self.context, instance, instance_type, mappings_) # Ephemerals over the size limit ephemerals[0].volume_size = 3 mappings_ = mappings[:] mappings_.objects.extend(ephemerals) self.assertRaises(exception.InvalidBDMEphemeralSize, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings_) # Swap over the size limit mappings[0].volume_size = 3 self.assertRaises(exception.InvalidBDMSwapSize, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings) mappings[0].volume_size = 1 additional_swap = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': 1 }, anon=True) ] additional_swap = block_device_obj.block_device_make_list_from_dicts( self.context, additional_swap) # More than one swap mappings_ = mappings[:] mappings_.objects.extend(additional_swap) self.assertRaises(exception.InvalidBDMFormat, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings_) image_no_size = [ fake_block_device.FakeDbBlockDeviceDict({ 'device_name': '/dev/sda4', 'source_type': 'image', 'image_id': image_id, 'destination_type': 'volume', 'boot_index': -1, 'volume_size': None, }, anon=True) ] image_no_size = block_device_obj.block_device_make_list_from_dicts( self.context, image_no_size) mappings_ = mappings[:] mappings_.objects.extend(image_no_size) self.assertRaises(exception.InvalidBDM, self.compute_api._validate_bdm, self.context, instance, instance_type, mappings_) def test_validate_bdm_media_service_exceptions(self): instance_type = {'swap': 1, 'ephemeral_gb': 1} all_mappings = [fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'no_device': None, 'source_type': 'volume', 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': self.volume_id, 'device_name': 'vda', 'boot_index': 0, 'delete_on_termination': False}, anon=True)] all_mappings = block_device_obj.block_device_make_list_from_dicts( self.context, all_mappings) # First we test a list of invalid status values that should result # in an InvalidVolume exception being raised. status_values = ( # First two check that the status is 'available'. ('creating', 'detached'), ('error', 'detached'), # Checks that the attach_status is 'detached'. ('available', 'attached') ) for status, attach_status in status_values: def fake_volume_get(self, ctxt, volume_id): return {'id': volume_id, 'status': status, 'attach_status': attach_status} self.stubs.Set(cinder.API, 'get', fake_volume_get) self.assertRaises(exception.InvalidVolume, self.compute_api._validate_bdm, self.context, self.instance, instance_type, all_mappings) # Now we test a 404 case that results in InvalidBDMVolume. def fake_volume_get_not_found(self, context, volume_id): raise exception.VolumeNotFound(volume_id) self.stubs.Set(cinder.API, 'get', fake_volume_get_not_found) self.assertRaises(exception.InvalidBDMVolume, self.compute_api._validate_bdm, self.context, self.instance, instance_type, all_mappings) # Check that the volume status is 'available' and attach_status is # 'detached' and accept the request if so def fake_volume_get_ok(self, context, volume_id): return {'id': volume_id, 'status': 'available', 'attach_status': 'detached'} self.stubs.Set(cinder.API, 'get', fake_volume_get_ok) self.compute_api._validate_bdm(self.context, self.instance, instance_type, all_mappings) def test_volume_snapshot_create(self): self.assertRaises(messaging.ExpectedException, self.compute.volume_snapshot_create, self.context, self.instance_object, 'fake_id', {}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.volume_snapshot_create, self.context, self.instance_object, 'fake_id', {}) def test_volume_snapshot_delete(self): self.assertRaises(messaging.ExpectedException, self.compute.volume_snapshot_delete, self.context, self.instance_object, 'fake_id', 'fake_id2', {}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.volume_snapshot_delete, self.context, self.instance_object, 'fake_id', 'fake_id2', {}) @mock.patch.object(cinder.API, 'create', side_effect=exception.OverQuota(overs='volumes')) def test_prep_block_device_over_quota_failure(self, mock_create): instance = self._create_fake_instance_obj() bdms = [ block_device.BlockDeviceDict({ 'boot_index': 0, 'guest_format': None, 'connection_info': None, 'device_type': u'disk', 'source_type': 'image', 'destination_type': 'volume', 'volume_size': 1, 'image_id': 1, 'device_name': '/dev/vdb', })] self.assertRaises(exception.InvalidBDM, compute_manager.ComputeManager()._prep_block_device, self.context, instance, bdms) self.assertTrue(mock_create.called) @mock.patch.object(nova.virt.block_device, 'get_swap') @mock.patch.object(nova.virt.block_device, 'convert_blanks') @mock.patch.object(nova.virt.block_device, 'convert_images') @mock.patch.object(nova.virt.block_device, 'convert_snapshots') @mock.patch.object(nova.virt.block_device, 'convert_volumes') @mock.patch.object(nova.virt.block_device, 'convert_ephemerals') @mock.patch.object(nova.virt.block_device, 'convert_swap') @mock.patch.object(nova.virt.block_device, 'attach_block_devices') def test_prep_block_device_with_blanks(self, attach_block_devices, convert_swap, convert_ephemerals, convert_volumes, convert_snapshots, convert_images, convert_blanks, get_swap): instance = self._create_fake_instance_obj() instance['root_device_name'] = '/dev/vda' root_volume = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'instance_uuid': 'fake-instance', 'source_type': 'image', 'destination_type': 'volume', 'image_id': 'fake-image-id-1', 'volume_size': 1, 'boot_index': 0})) blank_volume1 = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'instance_uuid': 'fake-instance', 'source_type': 'blank', 'destination_type': 'volume', 'volume_size': 1, 'boot_index': 1})) blank_volume2 = objects.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict({ 'instance_uuid': 'fake-instance', 'source_type': 'blank', 'destination_type': 'volume', 'volume_size': 1, 'boot_index': 2})) bdms = [blank_volume1, blank_volume2, root_volume] def fake_attach_block_devices(bdm, *args, **kwargs): return bdm convert_swap.return_value = [] convert_ephemerals.return_value = [] convert_volumes.return_value = [blank_volume1, blank_volume2] convert_snapshots.return_value = [] convert_images.return_value = [root_volume] convert_blanks.return_value = [] attach_block_devices.side_effect = fake_attach_block_devices get_swap.return_value = [] expected_block_device_info = { 'root_device_name': '/dev/vda', 'swap': [], 'ephemerals': [], 'block_device_mapping': bdms } manager = compute_manager.ComputeManager() manager.use_legacy_block_device_info = False block_device_info = manager._prep_block_device(self.context, instance, bdms) convert_swap.assert_called_once_with(bdms) convert_ephemerals.assert_called_once_with(bdms) bdm_args = tuple(bdms) convert_volumes.assert_called_once_with(bdm_args) convert_snapshots.assert_called_once_with(bdm_args) convert_images.assert_called_once_with(bdm_args) convert_blanks.assert_called_once_with(bdm_args) self.assertEqual(expected_block_device_info, block_device_info) self.assertEqual(1, attach_block_devices.call_count) get_swap.assert_called_once_with([]) class ComputeTestCase(BaseTestCase): def setUp(self): super(ComputeTestCase, self).setUp() self.useFixture(fixtures.SpawnIsSynchronousFixture()) def test_wrap_instance_fault(self): inst = {"uuid": "fake_uuid"} called = {'fault_added': False} def did_it_add_fault(*args): called['fault_added'] = True self.stubs.Set(compute_utils, 'add_instance_fault_from_exc', did_it_add_fault) @compute_manager.wrap_instance_fault def failer(self2, context, instance): raise NotImplementedError() self.assertRaises(NotImplementedError, failer, self.compute, self.context, instance=inst) self.assertTrue(called['fault_added']) def test_wrap_instance_fault_instance_in_args(self): inst = {"uuid": "fake_uuid"} called = {'fault_added': False} def did_it_add_fault(*args): called['fault_added'] = True self.stubs.Set(compute_utils, 'add_instance_fault_from_exc', did_it_add_fault) @compute_manager.wrap_instance_fault def failer(self2, context, instance): raise NotImplementedError() self.assertRaises(NotImplementedError, failer, self.compute, self.context, inst) self.assertTrue(called['fault_added']) def test_wrap_instance_fault_no_instance(self): inst = {"uuid": "fake_uuid"} called = {'fault_added': False} def did_it_add_fault(*args): called['fault_added'] = True self.stubs.Set(compute_utils, 'add_instance_fault_from_exc', did_it_add_fault) @compute_manager.wrap_instance_fault def failer(self2, context, instance): raise exception.InstanceNotFound(instance_id=instance['uuid']) self.assertRaises(exception.InstanceNotFound, failer, self.compute, self.context, inst) self.assertFalse(called['fault_added']) @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') def test_wrap_instance_event(self, mock_finish, mock_start): inst = {"uuid": "fake_uuid"} @compute_manager.wrap_instance_event def fake_event(self, context, instance): pass fake_event(self.compute, self.context, instance=inst) self.assertTrue(mock_start.called) self.assertTrue(mock_finish.called) @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') def test_wrap_instance_event_return(self, mock_finish, mock_start): inst = {"uuid": "fake_uuid"} @compute_manager.wrap_instance_event def fake_event(self, context, instance): return True retval = fake_event(self.compute, self.context, instance=inst) self.assertTrue(retval) self.assertTrue(mock_start.called) self.assertTrue(mock_finish.called) @mock.patch.object(objects.InstanceActionEvent, 'event_start') @mock.patch.object(objects.InstanceActionEvent, 'event_finish_with_failure') def test_wrap_instance_event_log_exception(self, mock_finish, mock_start): inst = {"uuid": "fake_uuid"} @compute_manager.wrap_instance_event def fake_event(self2, context, instance): raise exception.NovaException() self.assertRaises(exception.NovaException, fake_event, self.compute, self.context, instance=inst) self.assertTrue(mock_start.called) self.assertTrue(mock_finish.called) args, kwargs = mock_finish.call_args self.assertIsInstance(kwargs['exc_val'], exception.NovaException) def test_object_compat(self): db_inst = fake_instance.fake_db_instance() @compute_manager.object_compat def test_fn(_self, context, instance): self.assertIsInstance(instance, objects.Instance) self.assertEqual(instance.uuid, db_inst['uuid']) test_fn(None, self.context, instance=db_inst) def test_object_compat_more_positional_args(self): db_inst = fake_instance.fake_db_instance() @compute_manager.object_compat def test_fn(_self, context, instance, pos_arg_1, pos_arg_2): self.assertIsInstance(instance, objects.Instance) self.assertEqual(instance.uuid, db_inst['uuid']) self.assertEqual(pos_arg_1, 'fake_pos_arg1') self.assertEqual(pos_arg_2, 'fake_pos_arg2') test_fn(None, self.context, db_inst, 'fake_pos_arg1', 'fake_pos_arg2') def test_create_instance_with_img_ref_associates_config_drive(self): # Make sure create associates a config drive. instance = self._create_fake_instance_obj( params={'config_drive': '1234', }) try: self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertTrue(instance['config_drive']) finally: db.instance_destroy(self.context, instance['uuid']) def test_create_instance_associates_config_drive(self): # Make sure create associates a config drive. instance = self._create_fake_instance_obj( params={'config_drive': '1234', }) try: self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertTrue(instance['config_drive']) finally: db.instance_destroy(self.context, instance['uuid']) def test_create_instance_unlimited_memory(self): # Default of memory limit=None is unlimited. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) params = {"memory_mb": 999999999999} filter_properties = {'limits': {'memory_mb': None}} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used']) def test_create_instance_unlimited_disk(self): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) params = {"root_gb": 999999999999, "ephemeral_gb": 99999999999} filter_properties = {'limits': {'disk_gb': None}} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) def test_create_multiple_instances_then_starve(self): self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) limits = {'memory_mb': 4096, 'disk_gb': 1000} params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(1024, self.rt.compute_node['memory_mb_used']) self.assertEqual(256, self.rt.compute_node['local_gb_used']) params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(3072, self.rt.compute_node['memory_mb_used']) self.assertEqual(768, self.rt.compute_node['local_gb_used']) params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(3072, self.rt.compute_node['memory_mb_used']) self.assertEqual(768, self.rt.compute_node['local_gb_used']) def test_create_multiple_instance_with_neutron_port(self): instance_type = flavors.get_default_flavor() def fake_is_neutron(): return True self.stubs.Set(utils, 'is_neutron', fake_is_neutron) requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest(port_id='adadds')]) self.assertRaises(exception.MultiplePortsNotApplicable, self.compute_api.create, self.context, instance_type=instance_type, image_href=None, max_count=2, requested_networks=requested_networks) def test_create_instance_with_oversubscribed_ram(self): # Test passing of oversubscribed ram policy from the scheduler. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) total_mem_mb = resources['memory_mb'] oversub_limit_mb = total_mem_mb * 1.5 instance_mb = int(total_mem_mb * 1.45) # build an instance, specifying an amount of memory that exceeds # total_mem_mb, but is less than the oversubscribed limit: params = {"memory_mb": instance_mb, "root_gb": 128, "ephemeral_gb": 128} instance = self._create_fake_instance_obj(params) limits = {'memory_mb': oversub_limit_mb} filter_properties = {'limits': limits} self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used']) def test_create_instance_with_oversubscribed_ram_fail(self): """Test passing of oversubscribed ram policy from the scheduler, but with insufficient memory. """ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) total_mem_mb = resources['memory_mb'] oversub_limit_mb = total_mem_mb * 1.5 instance_mb = int(total_mem_mb * 1.55) # build an instance, specifying an amount of memory that exceeds # both total_mem_mb and the oversubscribed limit: params = {"memory_mb": instance_mb, "root_gb": 128, "ephemeral_gb": 128} instance = self._create_fake_instance_obj(params) filter_properties = {'limits': {'memory_mb': oversub_limit_mb}} self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) def test_create_instance_with_oversubscribed_cpu(self): # Test passing of oversubscribed cpu policy from the scheduler. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) limits = {'vcpu': 3} filter_properties = {'limits': limits} # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) self.assertEqual(1, resources['vcpus']) # build an instance, specifying an amount of memory that exceeds # total_mem_mb, but is less than the oversubscribed limit: params = {"memory_mb": 10, "root_gb": 1, "ephemeral_gb": 1, "vcpus": 2} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(2, self.rt.compute_node['vcpus_used']) # create one more instance: params = {"memory_mb": 10, "root_gb": 1, "ephemeral_gb": 1, "vcpus": 1} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(3, self.rt.compute_node['vcpus_used']) # delete the instance: instance['vm_state'] = vm_states.DELETED self.rt.update_usage(self.context, instance=instance) self.assertEqual(2, self.rt.compute_node['vcpus_used']) # now oversubscribe vcpus and fail: params = {"memory_mb": 10, "root_gb": 1, "ephemeral_gb": 1, "vcpus": 2} instance = self._create_fake_instance_obj(params) limits = {'vcpu': 3} self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_create_instance_with_oversubscribed_disk(self): # Test passing of oversubscribed disk policy from the scheduler. self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) total_disk_gb = resources['local_gb'] oversub_limit_gb = total_disk_gb * 1.5 instance_gb = int(total_disk_gb * 1.45) # build an instance, specifying an amount of disk that exceeds # total_disk_gb, but is less than the oversubscribed limit: params = {"root_gb": instance_gb, "memory_mb": 10} instance = self._create_fake_instance_obj(params) limits = {'disk_gb': oversub_limit_gb} filter_properties = {'limits': limits} self.compute.build_and_run_instance(self.context, instance, {}, {}, filter_properties, block_device_mapping=[]) self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used']) def test_create_instance_with_oversubscribed_disk_fail(self): """Test passing of oversubscribed disk policy from the scheduler, but with insufficient disk. """ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0) self.rt.update_available_resource(self.context.elevated()) # get total memory as reported by virt driver: resources = self.compute.driver.get_available_resource(NODENAME) total_disk_gb = resources['local_gb'] oversub_limit_gb = total_disk_gb * 1.5 instance_gb = int(total_disk_gb * 1.55) # build an instance, specifying an amount of disk that exceeds # total_disk_gb, but is less than the oversubscribed limit: params = {"root_gb": instance_gb, "memory_mb": 10} instance = self._create_fake_instance_obj(params) limits = {'disk_gb': oversub_limit_gb} self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[], limits=limits) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_create_instance_without_node_param(self): instance = self._create_fake_instance_obj({'node': None}) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertEqual(NODENAME, instance['node']) def test_create_instance_no_image(self): # Create instance with no image provided. params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self._assert_state({'vm_state': vm_states.ACTIVE, 'task_state': None}) @testtools.skipIf(test_utils.is_osx(), 'IPv6 pretty-printing broken on OSX, see bug 1409135') def test_default_access_ip(self): self.flags(default_access_ip_network_name='test1') fake_network.unset_stub_network_methods(self.stubs) instance = self._create_fake_instance_obj() orig_update = self.compute._instance_update # Make sure the access_ip_* updates happen in the same DB # update as the set to ACTIVE. def _instance_update(ctxt, instance_uuid, **kwargs): if kwargs.get('vm_state', None) == vm_states.ACTIVE: self.assertEqual(kwargs['access_ip_v4'], '192.168.1.100') self.assertEqual(kwargs['access_ip_v6'], '2001:db8:0:1::1') return orig_update(ctxt, instance_uuid, **kwargs) self.stubs.Set(self.compute, '_instance_update', _instance_update) try: self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertEqual(instance['access_ip_v4'], '192.168.1.100') self.assertEqual(instance['access_ip_v6'], '2001:db8:0:1:dcad:beff:feef:1') finally: db.instance_destroy(self.context, instance['uuid']) def test_no_default_access_ip(self): instance = self._create_fake_instance_obj() try: self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) instance = instances[0] self.assertFalse(instance['access_ip_v4']) self.assertFalse(instance['access_ip_v6']) finally: db.instance_destroy(self.context, instance['uuid']) def test_fail_to_schedule_persists(self): # check the persistence of the ERROR(scheduling) state. params = {'vm_state': vm_states.ERROR, 'task_state': task_states.SCHEDULING} self._create_fake_instance_obj(params=params) # check state is failed even after the periodic poll self.compute.periodic_tasks(context.get_admin_context()) self._assert_state({'vm_state': vm_states.ERROR, 'task_state': task_states.SCHEDULING}) def test_run_instance_setup_block_device_mapping_fail(self): """block device mapping failure test. Make sure that when there is a block device mapping problem, the instance goes to ERROR state, keeping the task state """ def fake(*args, **kwargs): raise exception.InvalidBDM() self.stubs.Set(nova.compute.manager.ComputeManager, '_prep_block_device', fake) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance( self.context, instance=instance, image={}, request_spec={}, block_device_mapping=[], filter_properties={}, requested_networks=[], injected_files=None, admin_password=None, node=None) # check state is failed even after the periodic poll self._assert_state({'vm_state': vm_states.ERROR, 'task_state': task_states.BLOCK_DEVICE_MAPPING}) self.compute.periodic_tasks(context.get_admin_context()) self._assert_state({'vm_state': vm_states.ERROR, 'task_state': task_states.BLOCK_DEVICE_MAPPING}) @mock.patch('nova.compute.manager.ComputeManager._prep_block_device', side_effect=exception.OverQuota(overs='volumes')) def test_setup_block_device_over_quota_fail(self, mock_prep_block_dev): """block device mapping over quota failure test. Make sure when we're over volume quota according to Cinder client, the appropriate exception is raised and the instances to ERROR state, keep the task state. """ instance = self._create_fake_instance_obj() self.compute.build_and_run_instance( self.context, instance=instance, request_spec={}, filter_properties={}, requested_networks=[], injected_files=None, admin_password=None, node=None, block_device_mapping=[], image={}) # check state is failed even after the periodic poll self._assert_state({'vm_state': vm_states.ERROR, 'task_state': task_states.BLOCK_DEVICE_MAPPING}) self.compute.periodic_tasks(context.get_admin_context()) self._assert_state({'vm_state': vm_states.ERROR, 'task_state': task_states.BLOCK_DEVICE_MAPPING}) self.assertTrue(mock_prep_block_dev.called) def test_run_instance_spawn_fail(self): """spawn failure test. Make sure that when there is a spawning problem, the instance goes to ERROR state, keeping the task state. """ def fake(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute.driver, 'spawn', fake) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance( self.context, instance=instance, request_spec={}, filter_properties={}, requested_networks=[], injected_files=None, admin_password=None, block_device_mapping=[], image={}, node=None) # check state is failed even after the periodic poll self._assert_state({'vm_state': vm_states.ERROR, 'task_state': task_states.SPAWNING}) self.compute.periodic_tasks(context.get_admin_context()) self._assert_state({'vm_state': vm_states.ERROR, 'task_state': task_states.SPAWNING}) def test_run_instance_dealloc_network_instance_not_found(self): """spawn network deallocate test. Make sure that when an instance is not found during spawn that the network is deallocated """ instance = self._create_fake_instance_obj() def fake(*args, **kwargs): raise exception.InstanceNotFound(instance_id="fake") self.stubs.Set(self.compute.driver, 'spawn', fake) self.mox.StubOutWithMock(self.compute, '_deallocate_network') self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) def test_run_instance_bails_on_missing_instance(self): # Make sure that run_instance() will quickly ignore a deleted instance instance = self._create_fake_instance_obj() with mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = exception.InstanceNotFound(instance_id=1) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(mock_save.called) def test_run_instance_bails_on_deleting_instance(self): # Make sure that run_instance() will quickly ignore a deleting instance instance = self._create_fake_instance_obj() with mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = exception.UnexpectedDeletingTaskStateError( actual='foo', expected='bar') self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(mock_save.called) def test_can_terminate_on_error_state(self): # Make sure that the instance can be terminated in ERROR state. # check failed to schedule --> terminate params = {'vm_state': vm_states.ERROR} instance = self._create_fake_instance_obj(params=params) self.compute.terminate_instance(self.context, instance, [], []) self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, self.context, instance['uuid']) # Double check it's not there for admins, either. self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, self.context.elevated(), instance['uuid']) def test_run_terminate(self): # Make sure it is possible to run and terminate instance. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) self.compute.terminate_instance(self.context, instance, [], []) instances = db.instance_get_all(self.context) LOG.info("After terminating instances: %s", instances) self.assertEqual(len(instances), 0) admin_deleted_context = context.get_admin_context( read_deleted="only") instance = db.instance_get_by_uuid(admin_deleted_context, instance['uuid']) self.assertEqual(instance['vm_state'], vm_states.DELETED) self.assertIsNone(instance['task_state']) def test_run_terminate_with_vol_attached(self): """Make sure it is possible to run and terminate instance with volume attached """ instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) def fake_check_attach(*args, **kwargs): pass def fake_reserve_volume(*args, **kwargs): pass def fake_volume_get(self, context, volume_id): return {'id': volume_id} def fake_terminate_connection(self, context, volume_id, connector): pass def fake_detach(self, context, volume_id): pass bdms = [] def fake_rpc_reserve_block_device_name(self, context, instance, device, volume_id, **kwargs): bdm = objects.BlockDeviceMapping( **{'context': context, 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 1, 'instance_uuid': instance['uuid'], 'device_name': '/dev/vdc'}) bdm.create() bdms.append(bdm) return bdm self.stubs.Set(cinder.API, 'get', fake_volume_get) self.stubs.Set(cinder.API, 'check_attach', fake_check_attach) self.stubs.Set(cinder.API, 'reserve_volume', fake_reserve_volume) self.stubs.Set(cinder.API, 'terminate_connection', fake_terminate_connection) self.stubs.Set(cinder.API, 'detach', fake_detach) self.stubs.Set(compute_rpcapi.ComputeAPI, 'reserve_block_device_name', fake_rpc_reserve_block_device_name) self.compute_api.attach_volume(self.context, instance, 1, '/dev/vdc') self.compute.terminate_instance(self.context, instance, bdms, []) instances = db.instance_get_all(self.context) LOG.info("After terminating instances: %s", instances) self.assertEqual(len(instances), 0) bdms = db.block_device_mapping_get_all_by_instance(self.context, instance['uuid']) self.assertEqual(len(bdms), 0) def test_run_terminate_no_image(self): """Make sure instance started without image (from volume) can be termintad without issues """ params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self._assert_state({'vm_state': vm_states.ACTIVE, 'task_state': None}) self.compute.terminate_instance(self.context, instance, [], []) instances = db.instance_get_all(self.context) self.assertEqual(len(instances), 0) def test_terminate_no_network(self): # This is as reported in LP bug 1008875 instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) self.mox.ReplayAll() self.compute.terminate_instance(self.context, instance, [], []) instances = db.instance_get_all(self.context) LOG.info("After terminating instances: %s", instances) self.assertEqual(len(instances), 0) def test_run_terminate_timestamps(self): # Make sure timestamps are set for launched and destroyed. instance = self._create_fake_instance_obj() instance['launched_at'] = None self.assertIsNone(instance['launched_at']) self.assertIsNone(instance['deleted_at']) launch = timeutils.utcnow() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.refresh() self.assertTrue(instance['launched_at'].replace(tzinfo=None) > launch) self.assertIsNone(instance['deleted_at']) terminate = timeutils.utcnow() self.compute.terminate_instance(self.context, instance, [], []) with utils.temporary_mutation(self.context, read_deleted='only'): instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertTrue(instance['launched_at'].replace( tzinfo=None) < terminate) self.assertTrue(instance['deleted_at'].replace( tzinfo=None) > terminate) def test_run_terminate_deallocate_net_failure_sets_error_state(self): instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instances = db.instance_get_all(self.context) LOG.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) def _fake_deallocate_network(*args, **kwargs): raise test.TestingException() self.stubs.Set(self.compute, '_deallocate_network', _fake_deallocate_network) try: self.compute.terminate_instance(self.context, instance, [], []) except test.TestingException: pass instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.assertEqual(instance['vm_state'], vm_states.ERROR) def test_stop(self): # Ensure instance can be stopped. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF}) inst_uuid = instance['uuid'] extra = ['system_metadata', 'metadata'] inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) self.compute.terminate_instance(self.context, instance, [], []) def test_start(self): # Ensure instance can be started. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF}) extra = ['system_metadata', 'metadata'] inst_uuid = instance['uuid'] inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) self.compute.terminate_instance(self.context, instance, [], []) def test_start_shelved_instance(self): # Ensure shelved instance can be started. self.deleted_image_id = None def fake_delete(self_, ctxt, image_id): self.deleted_image_id = image_id fake_image.stub_out_image_service(self.stubs) self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete) instance = self._create_fake_instance_obj() image = {'id': 'fake_id'} # Adding shelved information to instance system metadata. shelved_time = timeutils.strtime(at=timeutils.utcnow()) instance.system_metadata['shelved_at'] = shelved_time instance.system_metadata['shelved_image_id'] = image['id'] instance.system_metadata['shelved_host'] = 'fake-mini' instance.save() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF, "vm_state": vm_states.SHELVED}) extra = ['system_metadata', 'metadata'] inst_uuid = instance['uuid'] inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) self.assertEqual(image['id'], self.deleted_image_id) self.assertNotIn('shelved_at', inst_obj.system_metadata) self.assertNotIn('shelved_image_id', inst_obj.system_metadata) self.assertNotIn('shelved_host', inst_obj.system_metadata) self.compute.terminate_instance(self.context, instance, [], []) def test_stop_start_no_image(self): params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.POWERING_OFF}) extra = ['system_metadata', 'metadata'] inst_uuid = instance['uuid'] inst_obj = objects.Instance.get_by_uuid(self.context, inst_uuid, expected_attrs=extra) self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) self.compute.terminate_instance(self.context, instance, [], []) def test_rescue(self): # Ensure instance can be rescued and unrescued. called = {'rescued': False, 'unrescued': False} def fake_rescue(self, context, instance_ref, network_info, image_meta, rescue_password): called['rescued'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue) def fake_unrescue(self, instance_ref, network_info): called['unrescued'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue', fake_unrescue) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.RESCUING instance.save() self.compute.rescue_instance(self.context, instance, None, None, True) self.assertTrue(called['rescued']) instance.task_state = task_states.UNRESCUING instance.save() self.compute.unrescue_instance(self.context, instance) self.assertTrue(called['unrescued']) self.compute.terminate_instance(self.context, instance, [], []) def test_rescue_notifications(self): # Ensure notifications on instance rescue. def fake_rescue(self, context, instance_ref, network_info, image_meta, rescue_password): pass self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) fake_notifier.NOTIFICATIONS = [] instance.task_state = task_states.RESCUING instance.save() self.compute.rescue_instance(self.context, instance, None, True, True) expected_notifications = ['compute.instance.rescue.start', 'compute.instance.exists', 'compute.instance.rescue.end'] self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS], expected_notifications) for n, msg in enumerate(fake_notifier.NOTIFICATIONS): self.assertEqual(msg.event_type, expected_notifications[n]) self.assertEqual(msg.priority, 'INFO') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance.uuid) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) msg = fake_notifier.NOTIFICATIONS[0] self.assertIn('rescue_image_name', msg.payload) self.compute.terminate_instance(self.context, instance, [], []) def test_unrescue_notifications(self): # Ensure notifications on instance rescue. def fake_unrescue(self, instance_ref, network_info): pass self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue', fake_unrescue) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) fake_notifier.NOTIFICATIONS = [] instance.task_state = task_states.UNRESCUING instance.save() self.compute.unrescue_instance(self.context, instance) expected_notifications = ['compute.instance.unrescue.start', 'compute.instance.unrescue.end'] self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS], expected_notifications) for n, msg in enumerate(fake_notifier.NOTIFICATIONS): self.assertEqual(msg.event_type, expected_notifications[n]) self.assertEqual(msg.priority, 'INFO') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance.uuid) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_rescue_handle_err(self): # If the driver fails to rescue, instance state should remain the same # and the exception should be converted to InstanceNotRescuable inst_obj = self._create_fake_instance_obj() self.mox.StubOutWithMock(self.compute, '_get_rescue_image') self.mox.StubOutWithMock(nova.virt.fake.FakeDriver, 'rescue') self.compute._get_rescue_image( mox.IgnoreArg(), inst_obj, mox.IgnoreArg()).AndReturn({}) nova.virt.fake.FakeDriver.rescue( mox.IgnoreArg(), inst_obj, [], mox.IgnoreArg(), 'password' ).AndRaise(RuntimeError("Try again later")) self.mox.ReplayAll() expected_message = ('Instance %s cannot be rescued: ' 'Driver Error: Try again later' % inst_obj.uuid) inst_obj.vm_state = 'some_random_state' with testtools.ExpectedException( exception.InstanceNotRescuable, expected_message): self.compute.rescue_instance( self.context, instance=inst_obj, rescue_password='password', rescue_image_ref=None, clean_shutdown=True) self.assertEqual('some_random_state', inst_obj.vm_state) @mock.patch.object(image_api.API, "get") @mock.patch.object(nova.virt.fake.FakeDriver, "rescue") def test_rescue_with_image_specified(self, mock_rescue, mock_image_get): image_ref = "image-ref" rescue_image_meta = {} params = {"task_state": task_states.RESCUING} instance = self._create_fake_instance_obj(params=params) ctxt = context.get_admin_context() mock_context = mock.Mock() mock_context.elevated.return_value = ctxt mock_image_get.return_value = rescue_image_meta self.compute.rescue_instance(mock_context, instance=instance, rescue_password="password", rescue_image_ref=image_ref, clean_shutdown=True) mock_image_get.assert_called_with(ctxt, image_ref) mock_rescue.assert_called_with(ctxt, instance, [], rescue_image_meta, 'password') self.compute.terminate_instance(ctxt, instance, [], []) @mock.patch.object(image_api.API, "get") @mock.patch.object(nova.virt.fake.FakeDriver, "rescue") def test_rescue_with_base_image_when_image_not_specified(self, mock_rescue, mock_image_get): image_ref = "image-ref" system_meta = {"image_base_image_ref": image_ref} rescue_image_meta = {} params = {"task_state": task_states.RESCUING, "system_metadata": system_meta} instance = self._create_fake_instance_obj(params=params) ctxt = context.get_admin_context() mock_context = mock.Mock() mock_context.elevated.return_value = ctxt mock_image_get.return_value = rescue_image_meta self.compute.rescue_instance(mock_context, instance=instance, rescue_password="password", rescue_image_ref=None, clean_shutdown=True) mock_image_get.assert_called_with(ctxt, image_ref) mock_rescue.assert_called_with(ctxt, instance, [], rescue_image_meta, 'password') self.compute.terminate_instance(self.context, instance, [], []) def test_power_on(self): # Ensure instance can be powered on. called = {'power_on': False} def fake_driver_power_on(self, context, instance, network_info, block_device_info): called['power_on'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on', fake_driver_power_on) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) extra = ['system_metadata', 'metadata'] inst_obj = objects.Instance.get_by_uuid(self.context, instance['uuid'], expected_attrs=extra) inst_obj.task_state = task_states.POWERING_ON inst_obj.save() self.compute.start_instance(self.context, instance=inst_obj) self.assertTrue(called['power_on']) self.compute.terminate_instance(self.context, inst_obj, [], []) def test_power_off(self): # Ensure instance can be powered off. called = {'power_off': False} def fake_driver_power_off(self, instance, shutdown_timeout, shutdown_attempts): called['power_off'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off', fake_driver_power_off) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) extra = ['system_metadata', 'metadata'] inst_obj = objects.Instance.get_by_uuid(self.context, instance['uuid'], expected_attrs=extra) inst_obj.task_state = task_states.POWERING_OFF inst_obj.save() self.compute.stop_instance(self.context, instance=inst_obj, clean_shutdown=True) self.assertTrue(called['power_off']) self.compute.terminate_instance(self.context, inst_obj, [], []) def test_pause(self): # Ensure instance can be paused and unpaused. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.PAUSING instance.save() fake_notifier.NOTIFICATIONS = [] self.compute.pause_instance(self.context, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.pause.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.pause.end') instance.task_state = task_states.UNPAUSING instance.save() fake_notifier.NOTIFICATIONS = [] self.compute.unpause_instance(self.context, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.unpause.start') msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.event_type, 'compute.instance.unpause.end') self.compute.terminate_instance(self.context, instance, [], []) def test_suspend(self): # ensure instance can be suspended and resumed. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.SUSPENDING instance.save() self.compute.suspend_instance(self.context, instance) instance.task_state = task_states.RESUMING instance.save() self.compute.resume_instance(self.context, instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 6) msg = fake_notifier.NOTIFICATIONS[2] self.assertEqual(msg.event_type, 'compute.instance.suspend.start') msg = fake_notifier.NOTIFICATIONS[3] self.assertEqual(msg.event_type, 'compute.instance.suspend.end') self.compute.terminate_instance(self.context, instance, [], []) def test_suspend_error(self): # Ensure vm_state is ERROR when suspend error occurs. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) with mock.patch.object(self.compute.driver, 'suspend', side_effect=test.TestingException): self.assertRaises(test.TestingException, self.compute.suspend_instance, self.context, instance=instance) instance = db.instance_get_by_uuid(self.context, instance.uuid) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_suspend_not_implemented(self): # Ensure expected exception is raised and the vm_state of instance # restore to original value if suspend is not implemented by driver instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) with mock.patch.object(self.compute.driver, 'suspend', side_effect=NotImplementedError('suspend test')): self.assertRaises(NotImplementedError, self.compute.suspend_instance, self.context, instance=instance) instance = db.instance_get_by_uuid(self.context, instance.uuid) self.assertEqual(vm_states.ACTIVE, instance.vm_state) def test_suspend_rescued(self): # ensure rescued instance can be suspended and resumed. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.vm_state = vm_states.RESCUED instance.task_state = task_states.SUSPENDING instance.save() self.compute.suspend_instance(self.context, instance) self.assertEqual(instance.vm_state, vm_states.SUSPENDED) instance.task_state = task_states.RESUMING instance.save() self.compute.resume_instance(self.context, instance) self.assertEqual(instance.vm_state, vm_states.RESCUED) self.compute.terminate_instance(self.context, instance, [], []) def test_resume_notifications(self): # ensure instance can be suspended and resumed. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.SUSPENDING instance.save() self.compute.suspend_instance(self.context, instance) instance.task_state = task_states.RESUMING instance.save() self.compute.resume_instance(self.context, instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 6) msg = fake_notifier.NOTIFICATIONS[4] self.assertEqual(msg.event_type, 'compute.instance.resume.start') msg = fake_notifier.NOTIFICATIONS[5] self.assertEqual(msg.event_type, 'compute.instance.resume.end') self.compute.terminate_instance(self.context, instance, [], []) def test_resume_no_old_state(self): # ensure a suspended instance with no old_vm_state is resumed to the # ACTIVE state instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.vm_state = vm_states.SUSPENDED instance.task_state = task_states.RESUMING instance.save() self.compute.resume_instance(self.context, instance) self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.compute.terminate_instance(self.context, instance, [], []) def test_resume_error(self): # Ensure vm_state is ERROR when resume error occurs. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.SUSPENDING instance.save() self.compute.suspend_instance(self.context, instance) instance.task_state = task_states.RESUMING instance.save() with mock.patch.object(self.compute.driver, 'resume', side_effect=test.TestingException): self.assertRaises(test.TestingException, self.compute.resume_instance, self.context, instance) instance = db.instance_get_by_uuid(self.context, instance.uuid) self.assertEqual(vm_states.ERROR, instance.vm_state) def test_rebuild(self): # Ensure instance can be rebuilt. instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=[], new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[], recreate=False, on_shared_storage=False) self.compute.terminate_instance(self.context, instance, [], []) def test_rebuild_driver(self): # Make sure virt drivers can override default rebuild called = {'rebuild': False} def fake(**kwargs): instance = kwargs['instance'] instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING instance.save(expected_task_state=[task_states.REBUILDING]) instance.task_state = task_states.REBUILD_SPAWNING instance.save( expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING]) called['rebuild'] = True self.stubs.Set(self.compute.driver, 'rebuild', fake) instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=[], new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[], recreate=False, on_shared_storage=False) self.assertTrue(called['rebuild']) self.compute.terminate_instance(self.context, instance, [], []) def test_rebuild_no_image(self): # Ensure instance can be rebuilt when started with no image. params = {'image_ref': ''} instance = self._create_fake_instance_obj(params) sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, '', '', injected_files=[], new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[], recreate=False, on_shared_storage=False) self.compute.terminate_instance(self.context, instance, [], []) def test_rebuild_launched_at_time(self): # Ensure instance can be rebuilt. old_time = datetime.datetime(2012, 4, 1) cur_time = datetime.datetime(2012, 12, 21, 12, 21) timeutils.set_time_override(old_time) instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) timeutils.set_time_override(cur_time) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=[], new_pass="new_password", orig_sys_metadata={}, bdms=[], recreate=False, on_shared_storage=False) instance.refresh() self.assertEqual(cur_time, instance['launched_at'].replace(tzinfo=None)) self.compute.terminate_instance(self.context, instance, [], []) def test_rebuild_with_injected_files(self): # Ensure instance can be rebuilt with injected files. injected_files = [ ('/a/b/c', base64.b64encode('foobarbaz')), ] self.decoded_files = [ ('/a/b/c', 'foobarbaz'), ] def _spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info): self.assertEqual(self.decoded_files, injected_files) self.stubs.Set(self.compute.driver, 'spawn', _spawn) instance = self._create_fake_instance_obj() image_ref = instance['image_ref'] sys_metadata = db.instance_system_metadata_get(self.context, instance['uuid']) db.instance_update(self.context, instance['uuid'], {"task_state": task_states.REBUILDING}) self.compute.rebuild_instance(self.context, instance, image_ref, image_ref, injected_files=injected_files, new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[], recreate=False, on_shared_storage=False) self.compute.terminate_instance(self.context, instance, [], []) def _test_reboot(self, soft, test_delete=False, test_unrescue=False, fail_reboot=False, fail_running=False): reboot_type = soft and 'SOFT' or 'HARD' task_pending = (soft and task_states.REBOOT_PENDING or task_states.REBOOT_PENDING_HARD) task_started = (soft and task_states.REBOOT_STARTED or task_states.REBOOT_STARTED_HARD) expected_task = (soft and task_states.REBOOTING or task_states.REBOOTING_HARD) expected_tasks = (soft and (task_states.REBOOTING, task_states.REBOOT_PENDING, task_states.REBOOT_STARTED) or (task_states.REBOOTING_HARD, task_states.REBOOT_PENDING_HARD, task_states.REBOOT_STARTED_HARD)) # This is a true unit test, so we don't need the network stubs. fake_network.unset_stub_network_methods(self.stubs) self.mox.StubOutWithMock(self.compute, '_get_instance_block_device_info') self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') self.mox.StubOutWithMock(self.compute, '_instance_update') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.compute.driver, 'reboot') # FIXME(comstud): I don't feel like the context needs to # be elevated at all. Hopefully remove elevated from # reboot_instance and remove the stub here in a future patch. # econtext would just become self.context below then. econtext = self.context.elevated() db_instance = fake_instance.fake_db_instance( **dict(uuid='fake-instance', power_state=power_state.NOSTATE, vm_state=vm_states.ACTIVE, task_state=expected_task, launched_at=timeutils.utcnow())) instance = objects.Instance._from_db_object(econtext, objects.Instance(), db_instance) updated_dbinstance1 = fake_instance.fake_db_instance( **dict(uuid='updated-instance1', power_state=10003, vm_state=vm_states.ACTIVE, task_state=expected_task, instance_type=flavors.get_default_flavor(), launched_at=timeutils.utcnow())) updated_dbinstance2 = fake_instance.fake_db_instance( **dict(uuid='updated-instance2', power_state=10003, vm_state=vm_states.ACTIVE, instance_type=flavors.get_default_flavor(), task_state=expected_task, launched_at=timeutils.utcnow())) if test_unrescue: instance.vm_state = vm_states.RESCUED instance.obj_reset_changes() fake_nw_model = network_model.NetworkInfo() fake_block_dev_info = 'fake_block_dev_info' fake_power_state1 = 10001 fake_power_state2 = power_state.RUNNING fake_power_state3 = 10002 # Beginning of calls we expect. self.mox.StubOutWithMock(self.context, 'elevated') self.context.elevated().AndReturn(econtext) self.compute._get_instance_block_device_info( econtext, instance).AndReturn(fake_block_dev_info) self.compute.network_api.get_instance_nw_info( econtext, instance).AndReturn(fake_nw_model) self.compute._notify_about_instance_usage(econtext, instance, 'reboot.start') self.compute._get_power_state(econtext, instance).AndReturn(fake_power_state1) db.instance_update_and_get_original(econtext, instance['uuid'], {'task_state': task_pending, 'expected_task_state': expected_tasks, 'power_state': fake_power_state1}, update_cells=False, columns_to_join=['system_metadata', 'extra', 'extra.flavor'] ).AndReturn((None, updated_dbinstance1)) expected_nw_info = fake_nw_model db.instance_update_and_get_original(econtext, updated_dbinstance1['uuid'], {'task_state': task_started, 'expected_task_state': task_pending}, update_cells=False, columns_to_join=['system_metadata', 'extra', 'extra.flavor'] ).AndReturn((None, updated_dbinstance1)) # Annoying. driver.reboot is wrapped in a try/except, and # doesn't re-raise. It eats exception generated by mox if # this is called with the wrong args, so we have to hack # around it. reboot_call_info = {} expected_call_info = { 'args': (econtext, instance, expected_nw_info, reboot_type), 'kwargs': {'block_device_info': fake_block_dev_info}} fault = exception.InstanceNotFound(instance_id='instance-0000') def fake_reboot(*args, **kwargs): reboot_call_info['args'] = args reboot_call_info['kwargs'] = kwargs # NOTE(sirp): Since `bad_volumes_callback` is a function defined # within `reboot_instance`, we don't have access to its value and # can't stub it out, thus we skip that comparison. kwargs.pop('bad_volumes_callback') if fail_reboot: raise fault self.stubs.Set(self.compute.driver, 'reboot', fake_reboot) # Power state should be updated again if not fail_reboot or fail_running: new_power_state = fake_power_state2 self.compute._get_power_state(econtext, instance).AndReturn(fake_power_state2) else: new_power_state = fake_power_state3 self.compute._get_power_state(econtext, instance).AndReturn(fake_power_state3) if test_delete: fault = exception.InstanceNotFound( instance_id=instance['uuid']) db.instance_update_and_get_original( econtext, updated_dbinstance1['uuid'], {'power_state': new_power_state, 'task_state': None, 'vm_state': vm_states.ACTIVE}, update_cells=False, columns_to_join=['system_metadata', 'extra', 'extra.flavor'], ).AndRaise(fault) self.compute._notify_about_instance_usage( econtext, instance, 'reboot.end') elif fail_reboot and not fail_running: db.instance_update_and_get_original( econtext, updated_dbinstance1['uuid'], {'vm_state': vm_states.ERROR}, update_cells=False, columns_to_join=['system_metadata', 'extra', 'extra.flavor'], ).AndRaise(fault) else: db.instance_update_and_get_original( econtext, updated_dbinstance1['uuid'], {'power_state': new_power_state, 'task_state': None, 'vm_state': vm_states.ACTIVE}, update_cells=False, columns_to_join=['system_metadata', 'extra', 'extra.flavor'], ).AndReturn((None, updated_dbinstance2)) if fail_running: self.compute._notify_about_instance_usage(econtext, instance, 'reboot.error', fault=fault) self.compute._notify_about_instance_usage( econtext, instance, 'reboot.end') self.mox.ReplayAll() if not fail_reboot or fail_running: self.compute.reboot_instance(self.context, instance=instance, block_device_info=None, reboot_type=reboot_type) else: self.assertRaises(exception.InstanceNotFound, self.compute.reboot_instance, self.context, instance=instance, block_device_info=None, reboot_type=reboot_type) self.assertEqual(expected_call_info, reboot_call_info) def test_reboot_soft(self): self._test_reboot(True) def test_reboot_soft_and_delete(self): self._test_reboot(True, True) def test_reboot_soft_and_rescued(self): self._test_reboot(True, False, True) def test_reboot_soft_and_delete_and_rescued(self): self._test_reboot(True, True, True) def test_reboot_hard(self): self._test_reboot(False) def test_reboot_hard_and_delete(self): self._test_reboot(False, True) def test_reboot_hard_and_rescued(self): self._test_reboot(False, False, True) def test_reboot_hard_and_delete_and_rescued(self): self._test_reboot(False, True, True) @mock.patch.object(jsonutils, 'to_primitive') def test_reboot_fail(self, mock_to_primitive): self._test_reboot(False, fail_reboot=True) def test_reboot_fail_running(self): self._test_reboot(False, fail_reboot=True, fail_running=True) def test_get_instance_block_device_info_source_image(self): bdms = block_device_obj.block_device_make_list(self.context, [fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'volume_id': u'4cbc9e62-6ba0-45dd-b647-934942ead7d6', 'instance_uuid': 'fake-instance', 'device_name': '/dev/vda', 'connection_info': '{"driver_volume_type": "rbd"}', 'source_type': 'image', 'destination_type': 'volume', 'image_id': 'fake-image-id-1', 'boot_index': 0 })]) with (mock.patch.object( objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=bdms) ) as mock_get_by_instance: block_device_info = ( self.compute._get_instance_block_device_info( self.context, self._create_fake_instance_obj()) ) expected = { 'swap': None, 'ephemerals': [], 'root_device_name': None, 'block_device_mapping': [{ 'connection_info': { 'driver_volume_type': 'rbd' }, 'mount_device': '/dev/vda', 'delete_on_termination': False }] } self.assertTrue(mock_get_by_instance.called) self.assertEqual(block_device_info, expected) def test_get_instance_block_device_info_passed_bdms(self): bdms = block_device_obj.block_device_make_list(self.context, [fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'volume_id': u'4cbc9e62-6ba0-45dd-b647-934942ead7d6', 'device_name': '/dev/vdd', 'connection_info': '{"driver_volume_type": "rbd"}', 'source_type': 'volume', 'destination_type': 'volume'}) ]) with (mock.patch.object( objects.BlockDeviceMappingList, 'get_by_instance_uuid')) as mock_get_by_instance: block_device_info = ( self.compute._get_instance_block_device_info( self.context, self._create_fake_instance_obj(), bdms=bdms) ) expected = { 'swap': None, 'ephemerals': [], 'root_device_name': None, 'block_device_mapping': [{ 'connection_info': { 'driver_volume_type': 'rbd' }, 'mount_device': '/dev/vdd', 'delete_on_termination': False }] } self.assertFalse(mock_get_by_instance.called) self.assertEqual(block_device_info, expected) def test_get_instance_block_device_info_swap_and_ephemerals(self): instance = self._create_fake_instance_obj() ephemeral0 = fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'disk_bus': 'virtio', 'delete_on_termination': True, 'guest_format': None, 'volume_size': 1, 'boot_index': -1 }) ephemeral1 = fake_block_device.FakeDbBlockDeviceDict({ 'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vdc', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'disk_bus': 'virtio', 'delete_on_termination': True, 'guest_format': None, 'volume_size': 2, 'boot_index': -1 }) swap = fake_block_device.FakeDbBlockDeviceDict({ 'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vdd', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'disk_bus': 'virtio', 'delete_on_termination': True, 'guest_format': 'swap', 'volume_size': 1, 'boot_index': -1 }) bdms = block_device_obj.block_device_make_list(self.context, [swap, ephemeral0, ephemeral1]) with ( mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid', return_value=bdms) ) as mock_get_by_instance_uuid: expected_block_device_info = { 'swap': {'device_name': '/dev/vdd', 'swap_size': 1}, 'ephemerals': [{'device_name': '/dev/vdb', 'num': 0, 'size': 1, 'virtual_name': 'ephemeral0'}, {'device_name': '/dev/vdc', 'num': 1, 'size': 2, 'virtual_name': 'ephemeral1'}], 'block_device_mapping': [], 'root_device_name': None } block_device_info = ( self.compute._get_instance_block_device_info( self.context, instance) ) mock_get_by_instance_uuid.assert_called_once_with(self.context, instance['uuid']) self.assertEqual(expected_block_device_info, block_device_info) def test_inject_network_info(self): # Ensure we can inject network info. called = {'inject': False} def fake_driver_inject_network(self, instance, network_info): called['inject'] = True self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_network_info', fake_driver_inject_network) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.compute.inject_network_info(self.context, instance=instance) self.assertTrue(called['inject']) self.compute.terminate_instance(self.context, instance, [], []) def test_reset_network(self): # Ensure we can reset networking on an instance. called = {'count': 0} def fake_driver_reset_network(self, instance): called['count'] += 1 self.stubs.Set(nova.virt.fake.FakeDriver, 'reset_network', fake_driver_reset_network) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.compute.reset_network(self.context, instance) self.assertEqual(called['count'], 1) self.compute.terminate_instance(self.context, instance, [], []) def _get_snapshotting_instance(self): # Ensure instance can be snapshotted. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING instance.save() return instance def test_snapshot(self): inst_obj = self._get_snapshotting_instance() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def test_snapshot_no_image(self): inst_obj = self._get_snapshotting_instance() inst_obj.image_ref = '' inst_obj.save() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def _test_snapshot_fails(self, raise_during_cleanup, method, expected_state=True): def fake_snapshot(*args, **kwargs): raise test.TestingException() self.fake_image_delete_called = False def fake_delete(self_, context, image_id): self.fake_image_delete_called = True if raise_during_cleanup: raise Exception() self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot) fake_image.stub_out_image_service(self.stubs) self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete) inst_obj = self._get_snapshotting_instance() if method == 'snapshot': self.assertRaises(test.TestingException, self.compute.snapshot_instance, self.context, image_id='fakesnap', instance=inst_obj) else: self.assertRaises(test.TestingException, self.compute.backup_instance, self.context, image_id='fakesnap', instance=inst_obj, backup_type='fake', rotation=1) self.assertEqual(expected_state, self.fake_image_delete_called) self._assert_state({'task_state': None}) @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups') def test_backup_fails(self, mock_rotate): self._test_snapshot_fails(False, 'backup') @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups') def test_backup_fails_cleanup_ignores_exception(self, mock_rotate): self._test_snapshot_fails(True, 'backup') @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups') @mock.patch.object(nova.compute.manager.ComputeManager, '_do_snapshot_instance') def test_backup_fails_rotate_backup(self, mock_snap, mock_rotate): mock_rotate.side_effect = test.TestingException() self._test_snapshot_fails(True, 'backup', False) def test_snapshot_fails(self): self._test_snapshot_fails(False, 'snapshot') def test_snapshot_fails_cleanup_ignores_exception(self): self._test_snapshot_fails(True, 'snapshot') def _test_snapshot_deletes_image_on_failure(self, status, exc): self.fake_image_delete_called = False def fake_show(self_, context, image_id, **kwargs): self.assertEqual('fakesnap', image_id) image = {'id': image_id, 'status': status} return image self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) def fake_delete(self_, context, image_id): self.fake_image_delete_called = True self.assertEqual('fakesnap', image_id) self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete) def fake_snapshot(*args, **kwargs): raise exc self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot) fake_image.stub_out_image_service(self.stubs) inst_obj = self._get_snapshotting_instance() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def test_snapshot_fails_with_glance_error(self): image_not_found = exception.ImageNotFound(image_id='fakesnap') self._test_snapshot_deletes_image_on_failure('error', image_not_found) self.assertFalse(self.fake_image_delete_called) self._assert_state({'task_state': None}) def test_snapshot_fails_with_task_state_error(self): deleting_state_error = exception.UnexpectedDeletingTaskStateError( expected=task_states.IMAGE_SNAPSHOT, actual=task_states.DELETING) self._test_snapshot_deletes_image_on_failure( 'error', deleting_state_error) self.assertTrue(self.fake_image_delete_called) self._test_snapshot_deletes_image_on_failure( 'active', deleting_state_error) self.assertFalse(self.fake_image_delete_called) def test_snapshot_fails_with_instance_not_found(self): instance_not_found = exception.InstanceNotFound(instance_id='uuid') self._test_snapshot_deletes_image_on_failure( 'error', instance_not_found) self.assertTrue(self.fake_image_delete_called) self._test_snapshot_deletes_image_on_failure( 'active', instance_not_found) self.assertFalse(self.fake_image_delete_called) def test_snapshot_handles_cases_when_instance_is_deleted(self): inst_obj = self._get_snapshotting_instance() inst_obj.task_state = task_states.DELETING inst_obj.save() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def test_snapshot_handles_cases_when_instance_is_not_found(self): inst_obj = self._get_snapshotting_instance() inst_obj2 = objects.Instance.get_by_uuid(self.context, inst_obj.uuid) inst_obj2.destroy() self.compute.snapshot_instance(self.context, image_id='fakesnap', instance=inst_obj) def _assert_state(self, state_dict): """Assert state of VM is equal to state passed as parameter.""" instances = db.instance_get_all(self.context) self.assertEqual(len(instances), 1) if 'vm_state' in state_dict: self.assertEqual(state_dict['vm_state'], instances[0]['vm_state']) if 'task_state' in state_dict: self.assertEqual(state_dict['task_state'], instances[0]['task_state']) if 'power_state' in state_dict: self.assertEqual(state_dict['power_state'], instances[0]['power_state']) def test_console_output(self): # Make sure we can get console output from instance. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) output = self.compute.get_console_output(self.context, instance=instance, tail_length=None) self.assertEqual(output, 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE') self.compute.terminate_instance(self.context, instance, [], []) def test_console_output_tail(self): # Make sure we can get console output from instance. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) output = self.compute.get_console_output(self.context, instance=instance, tail_length=2) self.assertEqual(output, 'ANOTHER\nLAST LINE') self.compute.terminate_instance(self.context, instance, [], []) def test_console_output_not_implemented(self): def fake_not_implemented(*args, **kwargs): raise NotImplementedError() self.stubs.Set(self.compute.driver, 'get_console_output', fake_not_implemented) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_console_output, self.context, instance, 0) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.get_console_output, self.context, instance, 0) self.compute.terminate_instance(self.context, instance, [], []) def test_console_output_instance_not_found(self): def fake_not_found(*args, **kwargs): raise exception.InstanceNotFound(instance_id='fake-instance') self.stubs.Set(self.compute.driver, 'get_console_output', fake_not_found) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_console_output, self.context, instance, 0) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.InstanceNotFound, self.compute.get_console_output, self.context, instance, 0) self.compute.terminate_instance(self.context, instance, [], []) def test_novnc_vnc_console(self): # Make sure we can a vnc console for an instance. self.flags(vnc_enabled=True) self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Try with the full instance console = self.compute.get_vnc_console(self.context, 'novnc', instance=instance) self.assertTrue(console) self.compute.terminate_instance(self.context, instance, [], []) def test_validate_console_port_vnc(self): self.flags(vnc_enabled=True) self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() def fake_driver_get_console(*args, **kwargs): return ctype.ConsoleVNC(host="fake_host", port=5900) self.stubs.Set(self.compute.driver, "get_vnc_console", fake_driver_get_console) self.assertTrue(self.compute.validate_console_port( context=self.context, instance=instance, port=5900, console_type="novnc")) def test_validate_console_port_spice(self): self.flags(vnc_enabled=True) self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() def fake_driver_get_console(*args, **kwargs): return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88) self.stubs.Set(self.compute.driver, "get_spice_console", fake_driver_get_console) self.assertTrue(self.compute.validate_console_port( context=self.context, instance=instance, port=5900, console_type="spice-html5")) def test_validate_console_port_rdp(self): self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj() def fake_driver_get_console(*args, **kwargs): return ctype.ConsoleRDP(host="fake_host", port=5900) self.stubs.Set(self.compute.driver, "get_rdp_console", fake_driver_get_console) self.assertTrue(self.compute.validate_console_port( context=self.context, instance=instance, port=5900, console_type="rdp-html5")) def test_validate_console_port_wrong_port(self): self.flags(vnc_enabled=True) self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() def fake_driver_get_console(*args, **kwargs): return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88) self.stubs.Set(self.compute.driver, "get_vnc_console", fake_driver_get_console) self.assertFalse(self.compute.validate_console_port( context=self.context, instance=instance, port="wrongport", console_type="spice-html5")) def test_xvpvnc_vnc_console(self): # Make sure we can a vnc console for an instance. self.flags(vnc_enabled=True) self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) console = self.compute.get_vnc_console(self.context, 'xvpvnc', instance=instance) self.assertTrue(console) self.compute.terminate_instance(self.context, instance, [], []) def test_invalid_vnc_console_type(self): # Raise useful error if console type is an unrecognised string. self.flags(vnc_enabled=True) self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_vnc_console, self.context, 'invalid', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_vnc_console, self.context, 'invalid', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_missing_vnc_console_type(self): # Raise useful error is console type is None. self.flags(vnc_enabled=True) self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_vnc_console, self.context, None, instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_vnc_console, self.context, None, instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_get_vnc_console_not_implemented(self): self.stubs.Set(self.compute.driver, 'get_vnc_console', fake_not_implemented) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_vnc_console, self.context, 'novnc', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.get_vnc_console, self.context, 'novnc', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_spicehtml5_spice_console(self): # Make sure we can a spice console for an instance. self.flags(vnc_enabled=False) self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Try with the full instance console = self.compute.get_spice_console(self.context, 'spice-html5', instance=instance) self.assertTrue(console) self.compute.terminate_instance(self.context, instance, [], []) def test_invalid_spice_console_type(self): # Raise useful error if console type is an unrecognised string self.flags(vnc_enabled=False) self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_spice_console, self.context, 'invalid', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_spice_console, self.context, 'invalid', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_get_spice_console_not_implemented(self): self.stubs.Set(self.compute.driver, 'get_spice_console', fake_not_implemented) self.flags(vnc_enabled=False) self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_spice_console, self.context, 'spice-html5', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(NotImplementedError, self.compute.get_spice_console, self.context, 'spice-html5', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_missing_spice_console_type(self): # Raise useful error is console type is None self.flags(vnc_enabled=False) self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_spice_console, self.context, None, instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_spice_console, self.context, None, instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_rdphtml5_rdp_console(self): # Make sure we can a rdp console for an instance. self.flags(vnc_enabled=False) self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) # Try with the full instance console = self.compute.get_rdp_console(self.context, 'rdp-html5', instance=instance) self.assertTrue(console) self.compute.terminate_instance(self.context, instance, [], []) def test_invalid_rdp_console_type(self): # Raise useful error if console type is an unrecognised string self.flags(vnc_enabled=False) self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_rdp_console, self.context, 'invalid', instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_rdp_console, self.context, 'invalid', instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_missing_rdp_console_type(self): # Raise useful error is console type is None self.flags(vnc_enabled=False) self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertRaises(messaging.ExpectedException, self.compute.get_rdp_console, self.context, None, instance=instance) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_rdp_console, self.context, None, instance=instance) self.compute.terminate_instance(self.context, instance, [], []) def test_vnc_console_instance_not_ready(self): self.flags(vnc_enabled=True) self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) def fake_driver_get_console(*args, **kwargs): raise exception.InstanceNotFound(instance_id=instance['uuid']) self.stubs.Set(self.compute.driver, "get_vnc_console", fake_driver_get_console) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.InstanceNotReady, self.compute.get_vnc_console, self.context, 'novnc', instance=instance) def test_spice_console_instance_not_ready(self): self.flags(vnc_enabled=False) self.flags(enabled=True, group='spice') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) def fake_driver_get_console(*args, **kwargs): raise exception.InstanceNotFound(instance_id=instance['uuid']) self.stubs.Set(self.compute.driver, "get_spice_console", fake_driver_get_console) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.InstanceNotReady, self.compute.get_spice_console, self.context, 'spice-html5', instance=instance) def test_rdp_console_instance_not_ready(self): self.flags(vnc_enabled=False) self.flags(enabled=True, group='rdp') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) def fake_driver_get_console(*args, **kwargs): raise exception.InstanceNotFound(instance_id=instance['uuid']) self.stubs.Set(self.compute.driver, "get_rdp_console", fake_driver_get_console) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.InstanceNotReady, self.compute.get_rdp_console, self.context, 'rdp-html5', instance=instance) def test_vnc_console_disabled(self): self.flags(vnc_enabled=False) instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeUnavailable, self.compute.get_vnc_console, self.context, 'novnc', instance=instance) def test_spice_console_disabled(self): self.flags(enabled=False, group='spice') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeUnavailable, self.compute.get_spice_console, self.context, 'spice-html5', instance=instance) def test_rdp_console_disabled(self): self.flags(enabled=False, group='rdp') instance = self._create_fake_instance_obj( params={'vm_state': vm_states.BUILDING}) self.compute = utils.ExceptionHelper(self.compute) self.assertRaises(exception.ConsoleTypeUnavailable, self.compute.get_rdp_console, self.context, 'rdp-html5', instance=instance) def test_diagnostics(self): # Make sure we can get diagnostics for an instance. expected_diagnostic = {'cpu0_time': 17300000000, 'memory': 524288, 'vda_errors': -1, 'vda_read': 262144, 'vda_read_req': 112, 'vda_write': 5778432, 'vda_write_req': 488, 'vnet1_rx': 2070139, 'vnet1_rx_drop': 0, 'vnet1_rx_errors': 0, 'vnet1_rx_packets': 26701, 'vnet1_tx': 140208, 'vnet1_tx_drop': 0, 'vnet1_tx_errors': 0, 'vnet1_tx_packets': 662, } instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) diagnostics = self.compute.get_diagnostics(self.context, instance=instance) self.assertEqual(diagnostics, expected_diagnostic) self.compute.terminate_instance(self.context, instance, [], []) def test_instance_diagnostics(self): # Make sure we can get diagnostics for an instance. instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) diagnostics = self.compute.get_instance_diagnostics(self.context, instance=instance) expected = {'config_drive': True, 'cpu_details': [{'time': 17300000000}], 'disk_details': [{'errors_count': 0, 'id': 'fake-disk-id', 'read_bytes': 262144, 'read_requests': 112, 'write_bytes': 5778432, 'write_requests': 488}], 'driver': 'fake', 'hypervisor_os': 'fake-os', 'memory_details': {'maximum': 524288, 'used': 0}, 'nic_details': [{'mac_address': '01:23:45:67:89:ab', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 2070139, 'rx_packets': 26701, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 140208, 'tx_packets': 662}], 'state': 'running', 'uptime': 46664, 'version': '1.0'} self.assertEqual(expected, diagnostics) self.compute.terminate_instance(self.context, instance, [], []) def test_add_fixed_ip_usage_notification(self): def dummy(*args, **kwargs): pass self.stubs.Set(network_api.API, 'add_fixed_ip_to_instance', dummy) self.stubs.Set(nova.compute.manager.ComputeManager, 'inject_network_info', dummy) self.stubs.Set(nova.compute.manager.ComputeManager, 'reset_network', dummy) instance = self._create_fake_instance_obj() self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0) self.compute.add_fixed_ip_to_instance(self.context, network_id=1, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) self.compute.terminate_instance(self.context, instance, [], []) def test_remove_fixed_ip_usage_notification(self): def dummy(*args, **kwargs): pass self.stubs.Set(network_api.API, 'remove_fixed_ip_from_instance', dummy) self.stubs.Set(nova.compute.manager.ComputeManager, 'inject_network_info', dummy) self.stubs.Set(nova.compute.manager.ComputeManager, 'reset_network', dummy) instance = self._create_fake_instance_obj() self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0) self.compute.remove_fixed_ip_from_instance(self.context, 1, instance=instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) self.compute.terminate_instance(self.context, instance, [], []) def test_run_instance_usage_notification(self, request_spec=None): # Ensure run instance generates appropriate usage notification. request_spec = request_spec or {} instance = self._create_fake_instance_obj() expected_image_name = request_spec.get('image', {}).get('name', '') self.compute.build_and_run_instance(self.context, instance, request_spec=request_spec, filter_properties={}, image={'name': expected_image_name}, block_device_mapping=[]) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2) instance.refresh() msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') # The last event is the one with the sugar in it. msg = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.create.end') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) self.assertEqual(payload['state'], 'active') self.assertIn('display_name', payload) self.assertIn('created_at', payload) self.assertIn('launched_at', payload) self.assertIn('fixed_ips', payload) self.assertTrue(payload['launched_at']) image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF) self.assertEqual(payload['image_ref_url'], image_ref_url) self.assertEqual('Success', payload['message']) self.compute.terminate_instance(self.context, instance, [], []) def test_run_instance_image_usage_notification(self): request_spec = {'image': {'name': 'fake_name', 'key': 'value'}} self.test_run_instance_usage_notification(request_spec=request_spec) def test_run_instance_usage_notification_volume_meta(self): # Volume's image metadata won't contain the image name request_spec = {'image': {'key': 'value'}} self.test_run_instance_usage_notification(request_spec=request_spec) def test_run_instance_end_notification_on_abort(self): # Test that an error notif is sent if the build is aborted instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] def build_inst_abort(*args, **kwargs): raise exception.BuildAbortException(reason="already deleted", instance_uuid=instance_uuid) self.stubs.Set(self.compute.driver, 'spawn', build_inst_abort) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.event_type, 'compute.instance.create.error') self.assertEqual('ERROR', msg.priority) payload = msg.payload message = payload['message'] self.assertNotEqual(-1, message.find("already deleted")) def test_run_instance_error_notification_on_reschedule(self): # Test that error notif is sent if the build got rescheduled instance = self._create_fake_instance_obj() instance_uuid = instance['uuid'] def build_inst_fail(*args, **kwargs): raise exception.RescheduledException(instance_uuid=instance_uuid, reason="something bad happened") self.stubs.Set(self.compute.driver, 'spawn', build_inst_fail) self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.event_type, 'compute.instance.create.error') self.assertEqual('ERROR', msg.priority) payload = msg.payload message = payload['message'] self.assertNotEqual(-1, message.find("something bad happened")) def test_run_instance_error_notification_on_failure(self): # Test that error notif is sent if build fails hard instance = self._create_fake_instance_obj() def build_inst_fail(*args, **kwargs): raise test.TestingException("i'm dying") self.stubs.Set(self.compute.driver, 'spawn', build_inst_fail) self.compute.build_and_run_instance( self.context, instance, {}, {}, {}, block_device_mapping=[]) self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.event_type, 'compute.instance.create.start') msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.event_type, 'compute.instance.create.error') self.assertEqual('ERROR', msg.priority) payload = msg.payload message = payload['message'] self.assertNotEqual(-1, message.find("i'm dying")) def test_terminate_usage_notification(self): # Ensure terminate_instance generates correct usage notification. old_time = datetime.datetime(2012, 4, 1) cur_time = datetime.datetime(2012, 12, 21, 12, 21) timeutils.set_time_override(old_time) instance = self._create_fake_instance_obj() self.compute.build_and_run_instance(self.context, instance, {}, {}, {}, block_device_mapping=[]) fake_notifier.NOTIFICATIONS = [] timeutils.set_time_override(cur_time) self.compute.terminate_instance(self.context, instance, [], []) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.delete.start') msg1 = fake_notifier.NOTIFICATIONS[1] self.assertEqual(msg1.event_type, 'compute.instance.shutdown.start') msg1 = fake_notifier.NOTIFICATIONS[2] self.assertEqual(msg1.event_type
codeparrot/github-code-clean
# Copyright (c) 2014 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import uuid from cinder import context from cinder import exception from cinder.objects import fields from cinder import test from cinder.volume.drivers.dell import dell_storagecenter_api from cinder.volume.drivers.dell import dell_storagecenter_iscsi from cinder.volume import volume_types # We patch these here as they are used by every test to keep # from trying to contact a Dell Storage Center. @mock.patch.object(dell_storagecenter_api.StorageCenterApi, '__init__', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'open_connection') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'close_connection') class DellSCSanISCSIDriverTestCase(test.TestCase): VOLUME = {u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3494, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da8', u'active': True, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False} SCSERVER = {u'scName': u'Storage Center 64702', u'volumeCount': 0, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 4, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'Server_21000024ff30441d', u'instanceId': u'64702.47', u'serverFolderPath': u'devstacksrv/', u'portType': [u'FibreChannel'], u'type': u'Physical', u'statusMessage': u'Only 5 of 6 expected paths are up', u'status': u'Degraded', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.4', u'instanceName': u'devstacksrv', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Partial', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 5, u'name': u'Server_21000024ff30441d', u'hbaPresent': True, u'hbaCount': 2, u'notes': u'Created by Dell Cinder Driver', u'mapped': False, u'operatingSystem': {u'instanceId': u'64702.38', u'instanceName': u'Red Hat Linux 6.x', u'objectType': u'ScServerOperatingSystem'} } MAPPINGS = [{u'profile': {u'instanceId': u'64702.104', u'instanceName': u'92-30', u'objectType': u'ScMappingProfile'}, u'status': u'Down', u'statusMessage': u'', u'instanceId': u'64702.969.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.30', u'instanceName': u'Server_iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.92', u'instanceName': u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'lunUsed': [1], u'serverHba': {u'instanceId': u'64702.3454975614', u'instanceName': u'iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64702.31.8', u'instanceName': u'iqn.1993-08.org.debian:' '01:3776df826e4f-5000D31000FCBE43', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736131.91', u'instanceName': u'5000D31000FCBE43', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-969', u'transport': u'Iscsi', u'objectType': u'ScMapping'}] RPLAY = {u'scSerialNumber': 64702, u'globalIndex': u'64702-46-250', u'description': u'Cinder Clone Replay', u'parent': {u'instanceId': u'64702.46.249', u'instanceName': u'64702-46-249', u'objectType': u'ScReplay'}, u'instanceId': u'64702.46.250', u'scName': u'Storage Center 64702', u'consistent': False, u'expires': True, u'freezeTime': u'12/09/2014 03:52:08 PM', u'createVolume': {u'instanceId': u'64702.46', u'instanceName': u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b', u'objectType': u'ScVolume'}, u'expireTime': u'12/09/2014 04:52:08 PM', u'source': u'Manual', u'spaceRecovery': False, u'writesHeldDuration': 7910, u'active': False, u'markedForExpiration': False, u'objectType': u'ScReplay', u'instanceName': u'12/09/2014 03:52:08 PM', u'size': u'0.0 Bytes' } SCRPLAYPROFILE = {u'ruleCount': 0, u'name': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3', u'volumeCount': 0, u'scName': u'Storage Center 64702', u'notes': u'Created by Dell Cinder Driver', u'scSerialNumber': 64702, u'userCreated': True, u'instanceName': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3', u'instanceId': u'64702.11', u'enforceReplayCreationTimeout': False, u'replayCreationTimeout': 20, u'objectType': u'ScReplayProfile', u'type': u'Consistent', u'expireIncompleteReplaySets': True} IQN = 'iqn.2002-03.com.compellent:5000D31000000001' ISCSI_PROPERTIES = {'access_mode': 'rw', 'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'iqn.2002-03.com.compellent:5000d31000fcbe44'], 'target_lun': 1, 'target_luns': [1, 1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260', u'192.168.0.22:3260']} def setUp(self): super(DellSCSanISCSIDriverTestCase, self).setUp() # configuration is a mock. A mock is pretty much a blank # slate. I believe mock's done in setup are not happy time # mocks. So we just do a few things like driver config here. self.configuration = mock.Mock() self.configuration.san_is_local = False self.configuration.san_ip = "192.168.0.1" self.configuration.san_login = "admin" self.configuration.san_password = "mmm" self.configuration.dell_sc_ssn = 12345 self.configuration.dell_sc_server_folder = 'opnstktst' self.configuration.dell_sc_volume_folder = 'opnstktst' self.configuration.dell_sc_api_port = 3033 self.configuration.iscsi_ip_address = '192.168.1.1' self.configuration.iscsi_port = 3260 self._context = context.get_admin_context() self.driver = dell_storagecenter_iscsi.DellStorageCenterISCSIDriver( configuration=self.configuration) self.driver.do_setup(None) self.driver._stats = {'QoS_support': False, 'volume_backend_name': 'dell-1', 'free_capacity_gb': 12123, 'driver_version': '1.0.1', 'total_capacity_gb': 12388, 'reserved_percentage': 0, 'vendor_name': 'Dell', 'storage_protocol': 'iSCSI'} # Start with none. Add in the specific tests later. # Mock tests bozo this. self.driver.backends = None self.driver.replication_enabled = False self.volid = str(uuid.uuid4()) self.volume_name = "volume" + self.volid self.connector = { 'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:2227dab76162', 'host': 'fakehost'} self.connector_multipath = { 'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:2227dab76162', 'host': 'fakehost', 'multipath': True} self.access_record_output = [ "ID Initiator Ipaddress AuthMethod UserName Apply-To", "--- --------------- ------------- ---------- ---------- --------", "1 iqn.1993-08.org.debian:01:222 *.*.*.* none both", " 7dab76162"] self.fake_iqn = 'iqn.2002-03.com.compellent:5000D31000000001' self.properties = { 'target_discovered': True, 'target_portal': '%s:3260' % self.driver.configuration.dell_sc_iscsi_ip, 'target_iqn': self.fake_iqn, 'volume_id': 1} self._model_update = { 'provider_location': "%s:3260,1 %s 0" % (self.driver.configuration.dell_sc_iscsi_ip, self.fake_iqn) # , # 'provider_auth': 'CHAP %s %s' % ( # self.configuration.eqlx_chap_login, # self.configuration.eqlx_chap_password) } @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_get_volume_extra_specs') def test__create_replications(self, mock_get_volume_extra_specs, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends mock_get_volume_extra_specs.return_value = { 'replication_enabled': '<is> True'} model_update = {'replication_status': 'enabled', 'replication_driver_data': '12345,67890'} vol = {'id': 'guid', 'replication_driver_data': ''} scvol = {'name': 'guid'} self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'managed_backend_name': 'host@dell2', 'qosnode': 'otherqos'}] mock_api = mock.MagicMock() mock_api.create_replication = mock.MagicMock( return_value={'instanceId': '1'}) # Create regular replication test. res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_replication.assert_any_call( scvol, '12345', 'cinderqos', False, None, False) mock_api.create_replication.assert_any_call( scvol, '67890', 'otherqos', False, None, False) self.assertEqual(model_update, res) # Create replication with activereplay set. mock_get_volume_extra_specs.return_value = { 'replication:activereplay': '<is> True', 'replication_enabled': '<is> True'} res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_replication.assert_any_call( scvol, '12345', 'cinderqos', False, None, True) mock_api.create_replication.assert_any_call( scvol, '67890', 'otherqos', False, None, True) self.assertEqual(model_update, res) # Create replication with sync set. mock_get_volume_extra_specs.return_value = { 'replication:activereplay': '<is> True', 'replication_enabled': '<is> True', 'replication_type': '<in> sync'} res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_replication.assert_any_call( scvol, '12345', 'cinderqos', True, None, True) mock_api.create_replication.assert_any_call( scvol, '67890', 'otherqos', True, None, True) self.assertEqual(model_update, res) # Create replication with disk folder set. self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos', 'diskfolder': 'ssd'}, {'target_device_id': '67890', 'managed_backend_name': 'host@dell2', 'qosnode': 'otherqos', 'diskfolder': 'ssd'}] mock_get_volume_extra_specs.return_value = { 'replication:activereplay': '<is> True', 'replication_enabled': '<is> True', 'replication_type': '<in> sync'} res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_replication.assert_any_call( scvol, '12345', 'cinderqos', True, 'ssd', True) mock_api.create_replication.assert_any_call( scvol, '67890', 'otherqos', True, 'ssd', True) self.assertEqual(model_update, res) # Failed to create replication test. mock_api.create_replication.return_value = None self.assertRaises(exception.VolumeBackendAPIException, self.driver._create_replications, mock_api, vol, scvol) # Replication not enabled test mock_get_volume_extra_specs.return_value = {} res = self.driver._create_replications(mock_api, vol, scvol) self.assertEqual({}, res) self.driver.backends = backends @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_get_volume_extra_specs') def test__delete_replications(self, mock_get_volume_extra_specs, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends vol = {'id': 'guid'} scvol = {'instanceId': '1'} mock_api = mock.MagicMock() mock_api.delete_replication = mock.MagicMock() mock_api.find_volume = mock.MagicMock(return_value=scvol) # Start replication disabled. Should fail immediately. mock_get_volume_extra_specs.return_value = {} self.driver._delete_replications(mock_api, vol) self.assertFalse(mock_api.delete_replication.called) # Replication enabled. No replications listed. mock_get_volume_extra_specs.return_value = { 'replication_enabled': '<is> True'} vol = {'id': 'guid', 'replication_driver_data': ''} self.driver._delete_replications(mock_api, vol) self.assertFalse(mock_api.delete_replication.called) # Something to call. vol = {'id': 'guid', 'replication_driver_data': '12345,67890'} self.driver._delete_replications(mock_api, vol) mock_api.delete_replication.assert_any_call(scvol, 12345) mock_api.delete_replication.assert_any_call(scvol, 67890) self.driver.backends = backends @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_create_volume(self, mock_find_sc, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with(self.volume_name, 1, None, None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_cg_volumes') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_create_volume_consistency_group(self, mock_find_sc, mock_create_volume, mock_update_cg_volumes, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1, 'consistencygroup_id': 'guid'} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with(self.volume_name, 1, None, None) self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_update_cg_volumes.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype:storageprofile': 'HighPriority'}) def test_create_volume_storage_profile(self, mock_extra, mock_find_sc, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1, 'volume_type_id': 'abc'} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with(self.volume_name, 1, "HighPriority", None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype:replayprofiles': 'Daily'}) def test_create_volume_replay_profiles(self, mock_extra, mock_find_sc, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1, 'volume_type_id': 'abc'} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with(self.volume_name, 1, None, 'Daily') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value={'replication_status': 'enabled', 'replication_driver_data': 'ssn'}) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_create_volume_replication(self, mock_find_sc, mock_create_replications, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1} ret = self.driver.create_volume(volume) self.assertEqual({'replication_status': 'enabled', 'replication_driver_data': 'ssn'}, ret) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_create_volume_replication_raises(self, mock_find_sc, mock_create_replications, mock_delete_volume, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1} mock_create_replications.side_effect = ( exception.VolumeBackendAPIException(data='abc')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') def test_create_volume_failure(self, mock_delete_volume, mock_find_sc, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_delete_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_delete_volume(self, mock_find_sc, mock_delete_volume, mock_delete_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1} self.driver.delete_volume(volume) mock_delete_volume.assert_called_once_with(self.volume_name) self.assertTrue(mock_delete_replications.called) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_delete_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_delete_volume_failure(self, mock_find_sc, mock_delete_volume, mock_delete_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name, 'size': 1} self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, volume) self.assertTrue(mock_delete_replications.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPINGS[0]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_iscsi_properties', return_value=ISCSI_PROPERTIES) def test_initialize_connection(self, mock_find_iscsi_props, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name} connector = self.connector data = self.driver.initialize_connection(volume, connector) self.assertEqual('iscsi', data['driver_volume_type']) # verify find_volume has been called and that is has been called twice mock_find_volume.assert_any_call(self.volume_name) self.assertEqual(2, mock_find_volume.call_count) expected = {'data': self.ISCSI_PROPERTIES, 'driver_volume_type': 'iscsi'} self.assertEqual(expected, data, 'Unexpected return value') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPINGS[0]) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_iscsi_properties', return_value=ISCSI_PROPERTIES) def test_initialize_connection_multi_path(self, mock_find_iscsi_props, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): # Test case where connection is multipath volume = {'id': self.volume_name} connector = self.connector_multipath data = self.driver.initialize_connection(volume, connector) self.assertEqual('iscsi', data['driver_volume_type']) # verify find_volume has been called and that is has been called twice mock_find_volume.assert_any_call(self.volume_name) self.assertEqual(2, mock_find_volume.call_count) props = self.ISCSI_PROPERTIES expected = {'data': props, 'driver_volume_type': 'iscsi'} self.assertEqual(expected, data, 'Unexpected return value') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_iscsi_properties', return_value=None) def test_initialize_connection_no_iqn(self, mock_find_iscsi_properties, mock_map_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name} connector = {} mock_find_iscsi_properties.side_effect = Exception('abc') self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_iscsi_properties', return_value=None) def test_initialize_connection_no_server(self, mock_find_iscsi_properties, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name} connector = {} self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_iscsi_properties', return_value=None) def test_initialize_connection_vol_not_found(self, mock_find_iscsi_properties, mock_map_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = {} self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'map_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_iscsi_properties', return_value=ISCSI_PROPERTIES) def test_initialize_connection_map_vol_fail(self, mock_find_iscsi_props, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): # Test case where map_volume returns None (no mappings) volume = {'id': self.volume_name} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) def test_terminate_connection(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name} connector = self.connector res = self.driver.terminate_connection(volume, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) self.assertIsNone(res, 'None expected') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) def test_terminate_connection_no_server(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = {'initiator': ''} self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=True) def test_terminate_connection_no_volume(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = {'initiator': ''} self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_server', return_value=SCSERVER) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmap_volume', return_value=False) def test_terminate_connection_failure(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name} connector = {'initiator': ''} self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_replay', return_value='fake') def test_create_snapshot(self, mock_create_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': self.volume_name, 'id': self.volume_name} self.driver.create_snapshot(snapshot) self.assertEqual('available', snapshot['status']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_replay', return_value=None) def test_create_snapshot_no_volume(self, mock_create_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': self.volume_name, 'id': self.volume_name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snapshot) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_replay', return_value=None) def test_create_snapshot_failure(self, mock_create_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': self.volume_name, 'id': self.volume_name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snapshot) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_view_volume', return_value=VOLUME) def test_create_volume_from_snapshot(self, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_find_sc, mock_find_replay_profile, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): model_update = {'something': 'something'} mock_create_replications.return_value = model_update volume = {'id': 'fake'} snapshot = {'id': 'fake', 'volume_id': 'fake'} res = self.driver.create_volume_from_snapshot(volume, snapshot) mock_create_view_volume.assert_called_once_with('fake', 'fake', None) self.assertTrue(mock_find_replay.called) self.assertTrue(mock_find_volume.called) self.assertFalse(mock_find_replay_profile.called) # This just makes sure that we created self.assertTrue(mock_create_replications.called) self.assertEqual(model_update, res) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_cg_volumes') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_view_volume', return_value=VOLUME) def test_create_volume_from_snapshot_cg(self, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_find_sc, mock_update_cg_volumes, mock_find_replay_profile, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): model_update = {'something': 'something'} mock_create_replications.return_value = model_update volume = {'id': 'fake', 'consistencygroup_id': 'guid'} snapshot = {'id': 'fake', 'volume_id': 'fake'} res = self.driver.create_volume_from_snapshot(volume, snapshot) mock_create_view_volume.assert_called_once_with('fake', 'fake', None) self.assertTrue(mock_find_replay.called) self.assertTrue(mock_find_volume.called) self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_update_cg_volumes.called) # This just makes sure that we created self.assertTrue(mock_create_replications.called) self.assertEqual(model_update, res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_view_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') def test_create_volume_from_snapshot_failed(self, mock_delete_volume, mock_create_view_volume, mock_find_replay_profile, mock_find_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 'fake'} snapshot = {'id': 'fake', 'volume_id': 'fake'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volume, snapshot) self.assertTrue(mock_find_replay.called) self.assertTrue(mock_find_volume.called) self.assertFalse(mock_find_replay_profile.called) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_view_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') def test_create_volume_from_snapshot_failed_replication( self, mock_delete_volume, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_find_sc, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): mock_create_replications.side_effect = ( exception.VolumeBackendAPIException(data='abc')) volume = {'id': 'fake'} snapshot = {'id': 'fake', 'volume_id': 'fake'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volume, snapshot) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_view_volume', return_value=VOLUME) def test_create_volume_from_snapshot_no_replay(self, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 'fake'} snapshot = {'id': 'fake', 'volume_id': 'fake'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volume, snapshot) self.assertTrue(mock_find_volume.called) self.assertTrue(mock_find_replay.called) self.assertFalse(mock_create_view_volume.called) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value={}) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_cloned_volume', return_value=VOLUME) def test_create_cloned_volume(self, mock_create_cloned_volume, mock_find_volume, mock_find_sc, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name + '_clone'} src_vref = {'id': self.volume_name} ret = self.driver.create_cloned_volume(volume, src_vref) mock_create_cloned_volume.assert_called_once_with( self.volume_name + '_clone', self.VOLUME, None) self.assertTrue(mock_find_volume.called) self.assertEqual({}, ret) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value={}) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_cloned_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') def test_create_cloned_volume_failed(self, mock_delete_volume, mock_create_cloned_volume, mock_find_volume, mock_find_sc, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name + '_clone'} src_vref = {'id': self.volume_name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume, src_vref) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_volume') @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_cloned_volume', return_value=VOLUME) def test_create_cloned_volume_replication_fail(self, mock_create_cloned_volume, mock_find_volume, mock_find_sc, mock_create_replications, mock_delete_volume, mock_close_connection, mock_open_connection, mock_init): mock_create_replications.side_effect = ( exception.VolumeBackendAPIException(data='abc')) volume = {'id': self.volume_name + '_clone'} src_vref = {'id': self.volume_name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume, src_vref) self.assertTrue(mock_delete_volume.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value='fake') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_cg_volumes') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_cloned_volume', return_value=VOLUME) def test_create_cloned_volume_consistency_group(self, mock_create_cloned_volume, mock_find_volume, mock_find_sc, mock_update_cg_volumes, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name + '_clone', 'consistencygroup_id': 'guid'} src_vref = {'id': self.volume_name} self.driver.create_cloned_volume(volume, src_vref) mock_create_cloned_volume.assert_called_once_with( self.volume_name + '_clone', self.VOLUME, None) self.assertTrue(mock_find_volume.called) self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_update_cg_volumes.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_cloned_volume', return_value=VOLUME) def test_create_cloned_volume_no_volume(self, mock_create_cloned_volume, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': self.volume_name + '_clone'} src_vref = {'id': self.volume_name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume, src_vref) self.assertTrue(mock_find_volume.called) self.assertFalse(mock_create_cloned_volume.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_replay', return_value=True) def test_delete_snapshot(self, mock_delete_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': self.volume_name, 'id': self.volume_name} self.driver.delete_snapshot(snapshot) mock_delete_replay.assert_called_once_with( self.VOLUME, self.volume_name) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_replay', return_value=True) def test_delete_snapshot_no_volume(self, mock_delete_replay, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': self.volume_name, 'id': self.volume_name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, snapshot) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) def test_ensure_export(self, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): context = {} volume = {'id': self.VOLUME.get(u'name')} self.driver.ensure_export(context, volume) mock_find_volume.assert_called_once_with( self.VOLUME.get(u'name')) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) def test_ensure_export_failed(self, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): context = {} volume = {'id': self.VOLUME.get(u'name')} self.assertRaises(exception.VolumeBackendAPIException, self.driver.ensure_export, context, volume) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) def test_ensure_export_no_volume(self, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): context = {} volume = {'id': self.VOLUME.get(u'name')} self.assertRaises(exception.VolumeBackendAPIException, self.driver.ensure_export, context, volume) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'expand_volume', return_value=VOLUME) def test_extend_volume(self, mock_expand_volume, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name, 'size': 1} new_size = 2 self.driver.extend_volume(volume, new_size) mock_expand_volume.assert_called_once_with(self.VOLUME, new_size) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'expand_volume', return_value=None) def test_extend_volume_no_volume(self, mock_expand_volume, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'name': self.volume_name, 'size': 1} new_size = 2 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, volume, new_size) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_update_volume_stats_with_refresh(self, mock_get_storage_usage, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): stats = self.driver.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertTrue(mock_get_storage_usage.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_update_volume_stats_with_refresh_and_repl( self, mock_get_storage_usage, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends repliation_enabled = self.driver.replication_enabled self.driver.backends = [{'a': 'a'}, {'b': 'b'}, {'c': 'c'}] self.driver.replication_enabled = True stats = self.driver.get_volume_stats(True) self.assertEqual(3, stats['replication_count']) self.assertEqual(['async', 'sync'], stats['replication_type']) self.assertTrue(stats['replication_enabled']) self.assertTrue(mock_get_storage_usage.called) self.driver.backends = backends self.driver.replication_enabled = repliation_enabled @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=64702) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_get_volume_stats_no_refresh(self, mock_get_storage_usage, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): stats = self.driver.get_volume_stats(False) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertFalse(mock_get_storage_usage.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'rename_volume', return_value=True) def test_update_migrated_volume(self, mock_rename_volume, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 111} backend_volume = {'id': 112} model_update = {'_name_id': None} rt = self.driver.update_migrated_volume(None, volume, backend_volume, 'available') mock_rename_volume.assert_called_once_with(self.VOLUME, volume['id']) self.assertEqual(model_update, rt) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'rename_volume', return_value=False) def test_update_migrated_volume_rename_fail(self, mock_rename_volume, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 111} backend_volume = {'id': 112, '_name_id': 113} rt = self.driver.update_migrated_volume(None, volume, backend_volume, 'available') mock_rename_volume.assert_called_once_with(self.VOLUME, volume['id']) self.assertEqual({'_name_id': 113}, rt) def test_update_migrated_volume_no_volume_id(self, mock_close_connection, mock_open_connection, mock_init): volume = {'id': None} backend_volume = {'id': 112, '_name_id': 113} rt = self.driver.update_migrated_volume(None, volume, backend_volume, 'available') self.assertEqual({'_name_id': 113}, rt) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) def test_update_migrated_volume_no_backend_id(self, mock_find_volume, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 111} backend_volume = {'id': None, '_name_id': None} rt = self.driver.update_migrated_volume(None, volume, backend_volume, 'available') mock_find_sc.assert_called_once_with() mock_find_volume.assert_called_once_with(None) self.assertEqual({'_name_id': None}, rt) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_replay_profile', return_value=SCRPLAYPROFILE) def test_create_consistencygroup(self, mock_create_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} self.driver.create_consistencygroup(context, group) mock_create_replay_profile.assert_called_once_with(group['id']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'create_replay_profile', return_value=None) def test_create_consistencygroup_fail(self, mock_create_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_consistencygroup, context, group) mock_create_replay_profile.assert_called_once_with(group['id']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_replay_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, 'delete_volume') def test_delete_consistencygroup(self, mock_delete_volume, mock_find_replay_profile, mock_delete_replay_profile, mock_close_connection, mock_open_connection, mock_init): self.driver.db = mock.Mock() mock_volume = mock.MagicMock() expected_volumes = [mock_volume] self.driver.db.volume_get_all_by_group.return_value = expected_volumes context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'status': fields.ConsistencyGroupStatus.DELETED} model_update, volumes = self.driver.delete_consistencygroup(context, group, []) mock_find_replay_profile.assert_called_once_with(group['id']) mock_delete_replay_profile.assert_called_once_with(self.SCRPLAYPROFILE) mock_delete_volume.assert_called_once_with(mock_volume) self.assertEqual(group['status'], model_update['status']) self.assertEqual(expected_volumes, volumes) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_replay_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=None) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, 'delete_volume') def test_delete_consistencygroup_not_found(self, mock_delete_volume, mock_find_replay_profile, mock_delete_replay_profile, mock_close_connection, mock_open_connection, mock_init): self.driver.db = mock.Mock() mock_volume = mock.MagicMock() expected_volumes = [mock_volume] self.driver.db.volume_get_all_by_group.return_value = expected_volumes context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'status': fields.ConsistencyGroupStatus.DELETED} model_update, volumes = self.driver.delete_consistencygroup(context, group, []) mock_find_replay_profile.assert_called_once_with(group['id']) self.assertFalse(mock_delete_replay_profile.called) mock_delete_volume.assert_called_once_with(mock_volume) self.assertEqual(group['status'], model_update['status']) self.assertEqual(expected_volumes, volumes) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_cg_volumes', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) def test_update_consistencygroup(self, mock_find_replay_profile, mock_update_cg_volumes, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} add_volumes = [{'id': '101'}] remove_volumes = [{'id': '102'}] rt1, rt2, rt3 = self.driver.update_consistencygroup(context, group, add_volumes, remove_volumes) mock_update_cg_volumes.assert_called_once_with(self.SCRPLAYPROFILE, add_volumes, remove_volumes) mock_find_replay_profile.assert_called_once_with(group['id']) self.assertIsNone(rt1) self.assertIsNone(rt2) self.assertIsNone(rt3) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=None) def test_update_consistencygroup_not_found(self, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} add_volumes = [{'id': '101'}] remove_volumes = [{'id': '102'}] self.assertRaises(exception.VolumeBackendAPIException, self.driver.update_consistencygroup, context, group, add_volumes, remove_volumes) mock_find_replay_profile.assert_called_once_with(group['id']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_cg_volumes', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) def test_update_consistencygroup_error(self, mock_find_replay_profile, mock_update_cg_volumes, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} add_volumes = [{'id': '101'}] remove_volumes = [{'id': '102'}] self.assertRaises(exception.VolumeBackendAPIException, self.driver.update_consistencygroup, context, group, add_volumes, remove_volumes) mock_find_replay_profile.assert_called_once_with(group['id']) mock_update_cg_volumes.assert_called_once_with(self.SCRPLAYPROFILE, add_volumes, remove_volumes) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'snap_cg_replay', return_value={'instanceId': '100'}) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_create_cgsnapshot(self, mock_get_all_for_cgsnapshot, mock_find_replay_profile, mock_snap_cg_replay, mock_close_connection, mock_open_connection, mock_init): mock_snapshot = mock.MagicMock() expected_snapshots = [mock_snapshot] mock_get_all_for_cgsnapshot.return_value = (expected_snapshots) context = {} cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'id': '100'} model_update, snapshots = self.driver.create_cgsnapshot(context, cggrp, []) mock_find_replay_profile.assert_called_once_with( cggrp['consistencygroup_id']) mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, cggrp['id'], 0) self.assertEqual('available', model_update['status']) self.assertEqual(expected_snapshots, snapshots) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=None) def test_create_cgsnapshot_profile_not_found(self, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'id': '100'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cgsnapshot, context, cggrp, []) mock_find_replay_profile.assert_called_once_with( cggrp['consistencygroup_id']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'snap_cg_replay', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) def test_create_cgsnapshot_fail(self, mock_find_replay_profile, mock_snap_cg_replay, mock_close_connection, mock_open_connection, mock_init): context = {} cggrp = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'id': '100'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cgsnapshot, context, cggrp, []) mock_find_replay_profile.assert_called_once_with( cggrp['consistencygroup_id']) mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, cggrp['id'], 0) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_cg_replay', return_value=True) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_delete_cgsnapshot(self, mock_get_all_for_cgsnapshot, mock_find_replay_profile, mock_delete_cg_replay, mock_close_connection, mock_open_connection, mock_init): mock_snapshot = mock.MagicMock() expected_snapshots = [mock_snapshot] mock_get_all_for_cgsnapshot.return_value = (expected_snapshots) context = {} cgsnap = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'id': '100', 'status': 'deleted'} model_update, snapshots = self.driver.delete_cgsnapshot(context, cgsnap, []) mock_find_replay_profile.assert_called_once_with( cgsnap['consistencygroup_id']) mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, cgsnap['id']) self.assertEqual({'status': cgsnap['status']}, model_update) self.assertEqual(expected_snapshots, snapshots) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_cg_replay') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=None) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_delete_cgsnapshot_profile_not_found(self, mock_get_all_for_cgsnapshot, mock_find_replay_profile, mock_delete_cg_replay, mock_close_connection, mock_open_connection, mock_init): mock_snapshot = mock.MagicMock() expected_snapshots = [mock_snapshot] mock_get_all_for_cgsnapshot.return_value = (expected_snapshots) context = {} cgsnap = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'id': '100', 'status': 'deleted'} model_update, snapshots = self.driver.delete_cgsnapshot(context, cgsnap, []) mock_find_replay_profile.assert_called_once_with( cgsnap['consistencygroup_id']) self.assertFalse(mock_delete_cg_replay.called) self.assertEqual({'status': cgsnap['status']}, model_update) self.assertEqual(expected_snapshots, snapshots) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'delete_cg_replay', return_value=False) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) def test_delete_cgsnapshot_profile_failed_delete(self, mock_find_replay_profile, mock_delete_cg_replay, mock_close_connection, mock_open_connection, mock_init): context = {} cgsnap = {'consistencygroup_id': 'fc8f2fec-fab2-4e34-9148-c094c913b9a3', 'id': '100', 'status': 'available'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_cgsnapshot, context, cgsnap, []) mock_find_replay_profile.assert_called_once_with( cgsnap['consistencygroup_id']) mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, cgsnap['id']) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value={'id': 'guid'}) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'manage_existing') def test_manage_existing(self, mock_manage_existing, mock_create_replications, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): # Very little to do in this one. The call is sent # straight down. volume = {'id': 'guid'} existing_ref = {'source-name': 'imavolumename'} self.driver.manage_existing(volume, existing_ref) mock_manage_existing.assert_called_once_with(volume['id'], existing_ref) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value={'id': 'guid'}) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'manage_existing') def test_manage_existing_id(self, mock_manage_existing, mock_create_replications, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): # Very little to do in this one. The call is sent # straight down. volume = {'id': 'guid'} existing_ref = {'source-id': 'imadeviceid'} self.driver.manage_existing(volume, existing_ref) mock_manage_existing.assert_called_once_with(volume['id'], existing_ref) def test_manage_existing_bad_ref(self, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 'guid'} existing_ref = {'banana-name': 'imavolumename'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, existing_ref) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_unmanaged_volume_size', return_value=4) def test_manage_existing_get_size(self, mock_get_unmanaged_volume_size, mock_close_connection, mock_open_connection, mock_init): # Almost nothing to test here. Just that we call our function. volume = {'id': 'guid'} existing_ref = {'source-name': 'imavolumename'} res = self.driver.manage_existing_get_size(volume, existing_ref) mock_get_unmanaged_volume_size.assert_called_once_with(existing_ref) # The above is 4GB and change. self.assertEqual(4, res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'get_unmanaged_volume_size', return_value=4) def test_manage_existing_get_size_id(self, mock_get_unmanaged_volume_size, mock_close_connection, mock_open_connection, mock_init): # Almost nothing to test here. Just that we call our function. volume = {'id': 'guid'} existing_ref = {'source-id': 'imadeviceid'} res = self.driver.manage_existing_get_size(volume, existing_ref) mock_get_unmanaged_volume_size.assert_called_once_with(existing_ref) # The above is 4GB and change. self.assertEqual(4, res) def test_manage_existing_get_size_bad_ref(self, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 'guid'} existing_ref = {'banana-name': 'imavolumename'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, existing_ref) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_storage_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_replay_profiles') @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_replicate_active_replay') def test_retype_not_our_extra_specs(self, mock_update_replicate_active_replay, mock_create_replications, mock_update_replay_profile, mock_update_storage_profile, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': None}, None) self.assertTrue(res) self.assertFalse(mock_update_replicate_active_replay.called) self.assertFalse(mock_create_replications.called) self.assertFalse(mock_update_replay_profile.called) self.assertFalse(mock_update_storage_profile.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_replay_profiles') def test_retype_replay_profiles(self, mock_update_replay_profiles, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_update_replay_profiles.side_effect = [True, False] # Normal successful run. res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'storagetype:replayprofiles': ['A', 'B']}}, None) mock_update_replay_profiles.assert_called_once_with(self.VOLUME, 'B') self.assertTrue(res) # Run fails. Make sure this returns False. res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'storagetype:replayprofiles': ['B', 'A']}}, None) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_create_replications', return_value={'replication_status': 'enabled', 'replication_driver_data': '54321'}) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_delete_replications') def test_retype_create_replications(self, mock_delete_replications, mock_create_replications, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'replication_enabled': [False, True]}}, None) self.assertTrue(mock_create_replications.called) self.assertFalse(mock_delete_replications.called) self.assertEqual({'replication_status': 'enabled', 'replication_driver_data': '54321'}, res) res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'replication_enabled': [True, False]}}, None) self.assertTrue(mock_delete_replications.called) self.assertEqual({'replication_status': 'disabled', 'replication_driver_data': ''}, res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_replicate_active_replay') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) def test_retype_active_replay(self, mock_find_volume, mock_update_replicate_active_replay, mock_close_connection, mock_open_connection, mock_init): # Success, Success, Not called and fail. mock_update_replicate_active_replay.side_effect = [True, True, False] res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'replication:activereplay': ['', '<is> True']}}, None) self.assertTrue(res) res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'replication:activereplay': ['<is> True', '']}}, None) self.assertTrue(res) res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'replication:activereplay': ['', '']}}, None) self.assertTrue(res) res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'replication:activereplay': ['', '<is> True']}}, None) self.assertFalse(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) def test_retype_same(self, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): res = self.driver.retype( None, {'id': 'guid'}, None, {'extra_specs': {'storagetype:storageprofile': ['A', 'A']}}, None) self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmanage') def test_unmanage(self, mock_unmanage, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 'guid'} self.driver.unmanage(volume) mock_find_volume.assert_called_once_with(volume['id']) mock_unmanage.assert_called_once_with(self.VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=None) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'unmanage') def test_unmanage_volume_not_found(self, mock_unmanage, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': 'guid'} self.driver.unmanage(volume) mock_find_volume.assert_called_once_with(volume['id']) self.assertFalse(mock_unmanage.called) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'update_storage_profile') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc', return_value=12345) def test_retype(self, mock_find_sc, mock_find_volume, mock_update_storage_profile, mock_close_connection, mock_open_connection, mock_init): res = self.driver.retype( None, {'id': 'volid'}, None, {'extra_specs': {'storagetype:storageprofile': ['A', 'B']}}, None) mock_update_storage_profile.assert_called_once_with( self.VOLUME, 'B') self.assertTrue(res) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'resume_replication') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_do_repl') def test_replication_enable(self, mock_do_repl, mock_find_volume, mock_resume_replication, mock_close_connection, mock_open_connection, mock_init): # Note that since we do nothing with sync or async here # at all we do not bother testing it. mock_do_repl.side_effect = [(False, False), # No run. (True, False), # Good run. (True, False), # Bad run. (True, False), # Multiple replications. (True, False)] # Multiple fail. mock_resume_replication.side_effect = [True, # Good run. False, # Bad run. True, # Multiple replications. True, False] # Multiple fail. vref = {'replication_driver_data': '', 'id': 'guid'} model_update = {} # No run ret = self.driver.replication_enable({}, vref) self.assertEqual(model_update, ret) # we didn't try to resume, right? self.assertEqual(0, mock_resume_replication.call_count) # Good run vref = {'replication_driver_data': '12345', 'id': 'guid'} ret = self.driver.replication_enable({}, vref) self.assertEqual(model_update, ret) # Hard to distinguish good from bad. Make sure we tried. self.assertEqual(1, mock_resume_replication.call_count) # Bad run model_update = {'replication_status': 'error'} ret = self.driver.replication_enable({}, vref) self.assertEqual(model_update, ret) # Make sure we actually sent this down. self.assertEqual(2, mock_resume_replication.call_count) mock_resume_replication.assert_called_with(self.VOLUME, 12345) # Multiple replications. vref = {'replication_driver_data': '12345,67890', 'id': 'guid'} model_update = {} ret = self.driver.replication_enable({}, vref) self.assertEqual(model_update, ret) # Should be called two more times. self.assertEqual(4, mock_resume_replication.call_count) # This checks the last call mock_resume_replication.assert_called_with(self.VOLUME, 67890) # Multiple fail. model_update = {'replication_status': 'error'} ret = self.driver.replication_enable({}, vref) self.assertEqual(model_update, ret) # We are set to fail on the first call so one more. self.assertEqual(5, mock_resume_replication.call_count) # This checks the last call. mock_resume_replication.assert_called_with(self.VOLUME, 12345) @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'pause_replication') @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_volume', return_value=VOLUME) @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_do_repl') def test_replication_disable(self, mock_do_repl, mock_find_volume, mock_pause_replication, mock_close_connection, mock_open_connection, mock_init): # Note that since we do nothing with sync or async here # at all we do not bother testing it. mock_do_repl.side_effect = [(False, False), # No run. (True, False), # Good run. (True, False), # Bad run. (True, False), # Multiple replications. (True, False)] # Multiple fail. mock_pause_replication.side_effect = [True, # Good run. False, # Bad run. True, # Multiple replications. True, False] # Multiple fail. vref = {'replication_driver_data': '', 'id': 'guid'} model_update = {} # No run ret = self.driver.replication_disable({}, vref) self.assertEqual(model_update, ret) # we didn't try to resume, right? self.assertEqual(0, mock_pause_replication.call_count) # Good run vref = {'replication_driver_data': '12345', 'id': 'guid'} ret = self.driver.replication_disable({}, vref) self.assertEqual(model_update, ret) # Hard to distinguish good from bad. Make sure we tried. self.assertEqual(1, mock_pause_replication.call_count) # Bad run model_update = {'replication_status': 'error'} ret = self.driver.replication_disable({}, vref) self.assertEqual(model_update, ret) # Make sure we actually sent this down. self.assertEqual(2, mock_pause_replication.call_count) mock_pause_replication.assert_called_with(self.VOLUME, 12345) # Multiple replications. vref = {'replication_driver_data': '12345,67890', 'id': 'guid'} model_update = {} ret = self.driver.replication_disable({}, vref) self.assertEqual(model_update, ret) # Should be called two more times. self.assertEqual(4, mock_pause_replication.call_count) # This checks the last call mock_pause_replication.assert_called_with(self.VOLUME, 67890) # Multiple fail. model_update = {'replication_status': 'error'} ret = self.driver.replication_disable({}, vref) self.assertEqual(model_update, ret) # We are set to fail on the first call so one more. self.assertEqual(5, mock_pause_replication.call_count) # This checks the last call. mock_pause_replication.assert_called_with(self.VOLUME, 12345) def test__find_host(self, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'managed_backend_name': 'host@dell2', 'qosnode': 'cinderqos'}] # Just make sure we are turning the correct bit.. # Good run expected = 'host@dell2' ret = self.driver._find_host('67890') self.assertEqual(expected, ret) # Bad run ret = self.driver._find_host('54321') self.assertIsNone(ret) self.driver.backends = backends def test__parse_secondary(self, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends vref = {'id': 'guid', 'replication_driver_data': '67890'} self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'managed_backend_name': 'host@dell2', 'qosnode': 'cinderqos'}] mock_api = mock.MagicMock() # Good run. Secondary in replication_driver_data and backend. sc up. destssn, host = self.driver._parse_secondary(mock_api, vref, '67890') self.assertEqual(67890, destssn) self.assertEqual('host@dell2', host) # Bad run. Secondary not in replication_driver_data destssn, host = self.driver._parse_secondary(mock_api, vref, '12345') self.assertIsNone(destssn) self.assertIsNone(host) # Bad run. Secondary not in backend. vref['replication_driver_data'] = '67891' destssn, host = self.driver._parse_secondary(mock_api, vref, '67890') self.assertIsNone(destssn) self.assertIsNone(host) # Bad run. no driver data vref['replication_driver_data'] = '' destssn, host = self.driver._parse_secondary(mock_api, vref, '67890') self.assertIsNone(destssn) self.assertIsNone(host) # Good run. No secondary selected. vref['replication_driver_data'] = '12345' destssn, host = self.driver._parse_secondary(mock_api, vref, '12345') self.assertEqual(12345, destssn) self.assertEqual('host@dell1', host) self.driver.backends = backends @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'find_sc') def test__parse_secondary_sc_down(self, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends vref = {'id': 'guid', 'replication_driver_data': '12345'} self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'managed_backend_name': 'host@dell2', 'qosnode': 'cinderqos'}] mock_api = mock.MagicMock() # Bad run. Good selection. SC down. vref['replication_driver_data'] = '12345' mock_api.find_sc = mock.MagicMock( side_effect=exception.VolumeBackendAPIException(data='1234')) destssn, host = self.driver._parse_secondary(mock_api, vref, '12345') self.assertIsNone(destssn) self.assertIsNone(host) self.driver.backends = backends @mock.patch.object(dell_storagecenter_api.StorageCenterApi, 'break_replication') @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_parse_secondary') @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_do_repl') def test_replication_failover(self, mock_do_repl, mock_parse_secondary, mock_break_replication, mock_close_connection, mock_open_connection, mock_init): mock_parse_secondary.side_effect = [(12345, 'host@host#be'), # Good. (12345, 'host@host#be'), # Bad. (None, None)] # Not found. mock_break_replication.side_effect = [True, # Good run. False] # Bad run. mock_do_repl.side_effect = [(False, False), # No run. (True, False), # Good run. (True, False), # Bad run. (True, False)] # Secondary not found. vref = {'id': 'guid'} # No run. Not doing repl. Should raise. self.assertRaises(exception.ReplicationError, self.driver.replication_failover, {}, vref, '12345') # Good run expected = {'host': 'host@host#be', 'replication_driver_data': None} ret = self.driver.replication_failover({}, vref, '12345') self.assertEqual(expected, ret) # Bad run. (break_replication fails) self.assertRaises(exception.ReplicationError, self.driver.replication_failover, {}, vref, '12345') # Secondary not found. self.assertRaises(exception.ReplicationError, self.driver.replication_failover, {}, vref, '54321') @mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver, '_do_repl') def test_list_replication_targets(self, mock_do_repl, mock_close_connection, mock_open_connection, mock_init): mock_do_repl.side_effect = [(False, False), # No repl. (True, False), # Good run. (True, False)] # Target not found. backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'managed_backend_name': 'host@dell2', 'qosnode': 'cinderqos'}] # No repl. expected = {'volume_id': 'guid', 'targets': []} vref = {'replication_driver_data': '', 'id': 'guid'} ret = self.driver.list_replication_targets({}, vref) self.assertEqual(expected, ret) # Good run. expected = {'volume_id': 'guid', 'targets': [{'type': 'managed', 'target_device_id': '12345', 'backend_name': 'host@dell1'}, {'type': 'managed', 'target_device_id': '67890', 'backend_name': 'host@dell2'}]} vref = {'replication_driver_data': '12345,67890', 'id': 'guid'} ret = self.driver.list_replication_targets({}, vref) self.assertEqual(expected, ret) # Target not found. # We find one target but not another. This could happen for a variety # of reasons most of them administrator negligence. But the main one # is that someone reconfigured their backends without taking into # account how this would affect the children. expected = {'volume_id': 'guid', 'targets': [{'type': 'managed', 'target_device_id': '12345', 'backend_name': 'host@dell1'}]} vref = {'replication_driver_data': '12345,99999', 'id': 'guid'} ret = self.driver.list_replication_targets({}, vref) self.assertEqual(expected, ret) self.driver.backends = backends
codeparrot/github-code-clean
#! /usr/bin/env python ##################################################################################################################### # CUICUILCO: A general purpose framework that allows the construction and evaluation of # # hierarchical networks for supervised learning # # # # By Alberto Escalante. Alberto.Escalante@ini.rub.de # # Ruhr-University-Bochum, Institute of Neural Computation, Group of Prof. Dr. Wiskott # ##################################################################################################################### ##################################################################################################################### # USAGE EXAMPLES: # python -u cuicuilco_run.py --Experiment=ParamsMNISTFunc --HierarchicalNetwork=SFANetwork1L # or a much more involved example: ;) # python -u cuicuilco_run.py --EnableDisplay=0 --CacheAvailable=1 --NetworkCacheReadDir=/local/SavedNetworks # --NetworkCacheWriteDir=/local/SavedNetworks --NodeCacheReadDir=/local/SavedNodes # --NodeCacheWriteDir=/local/SavedNodes --ClassifierCacheWriteDir=None --SaveSubimagesTraining=0 # --SaveAverageSubimageTraining=0 --NumFeaturesSup=8 --SaveSorted_AE_GaussNewid=0 # --SaveSortedIncorrectClassGaussNewid=0 --ComputeSlowFeaturesNewidAcrossNet=0 --UseFilter=0 --EnableGC=1 --EnableKNN=0 # --kNN_k=3 --EnableNCC=0 --EnableSVM=0 --SVM_C=0.125 --SVM_gamma=1.0 --EnableLR=0 --AskNetworkLoading=0 # --LoadNetworkNumber=-1 --NParallel=2 --EnableScheduler=0 --EstimateExplainedVarWithInverse=0 # --EstimateExplainedVarWithKNN_k=0 --EstimateExplainedVarWithKNNLinApp=0 --EstimateExplainedVarLinGlobal_N=0 # --AddNormalizationNode=0 --MakeLastPCANodeWhithening=0 --FeatureCutOffLevel=0.0 --ExportDataToLibsvm=0 # --IntegerLabelEstimation=1 --MapDaysToYears=1 --CumulativeScores=0 --DatasetForDisplayNewid=0 # --GraphExactLabelLearning=0 --OutputInsteadOfSVM2=0 --NumberTargetLabels=0 --EnableSVR=1 --SVR_gamma=auto --SVR_C=4.0 # --SVR_epsilon=0.1 --SVRInsteadOfSVM2=1 --ExperimentalDataset=ParamsRAgeFunc_48 # --HierarchicalNetwork=HiGSFANetworkU11L_Overlap6x6L0_Sigmoids_GUO_3Labels_48x48 --SleepM=-1 ##################################################################################################################### from __future__ import absolute_import from __future__ import print_function from __future__ import division import numpy import scipy import scipy.misc import sklearn.svm as slsvm # mpl.style.use('classic') import PIL import os import sys import time import string import getopt from lockfile import LockFile from inspect import getmembers import subprocess # import mkl import matplotlib as mpl # mpl.use('Qt4Agg') import matplotlib.pyplot as plt import socket import mdp from . import more_nodes from . import patch_mdp from . import object_cache as cache from . import sfa_libs from .sfa_libs import (scale_to, distance_squared_Euclidean, str3, wider_1Darray, ndarray_to_string) from .exact_label_learning import (ConstructGammaFromLabels, RemoveNegativeEdgeWeights, MapGammaToEdgeWeights) from . import system_parameters from .system_parameters import (scale_sSeq, take_first_02D, take_0_k_th_from_2D_list, sSeq_force_image_size, sSeq_getinfo_format, convert_sSeq_to_funcs_params_sets) from .image_loader import * from . import classifiers_regressions as classifiers from . import network_builder __version__ = "0.8.0" # For Python 2 and 3 compatibility import functools from builtins import input cmp = lambda x, y: (x > y) - (x < y) # benchmark is a list that contains benchmark information (running times) with entries: ("description", time as float # in seconds). This will be upgraded to a Benchmark object. benchmark = None # mkl.set_num_threads(18) # Number of threads used by mlk to parallelize matrix operations of numpy. # Adjust this value according to the number of cores in your system. MKL might decide to ignore this. print("Running on hostname= " + socket.gethostname()) random_seed = 123456 # Default seed used by hierarchical_networks numpy.random.seed(random_seed) enable_display = False save_plots = False input_filename = None # This field is only used by the ParamsNatural experimental dataset output_filename = None # The delta values of the extracted features are saved to this file cache_available = True # This should be enabled for any cache (network, node, signal, classifier caches) to work load_and_append_output_features_dir = None num_features_to_append_to_input = 0 save_output_features_dir = None network_cache_read_dir = None # "/local/tmp/escalafl/Alberto/SavedNetworks" network_cache_write_dir = None # "/local/tmp/escalafl/Alberto/SavedNetworks" node_cache_read_dir = None # "/local/tmp/escalafl/Alberto/SavedNodes" node_cache_write_dir = None # "/local/tmp/escalafl/Alberto/SavedNodes" signal_cache_read_dir = None # "/local/tmp/escalafl/Alberto/SavedSignals" signal_cache_write_dir = None # "/local/tmp/escalafl/Alberto/SavedSignals" classifier_cache_read_dir = None # "/local/tmp/escalafl/Alberto/SavedClassifiers" classifier_cache_write_dir = None # "/local/tmp/escalafl/Alberto/SavedClassifiers" enable_command_line = True reg_num_signals = 4 skip_num_signals = 0 use_full_sl_output = False enable_kNN = False enable_NCC = False enable_GC = False kNN_k = 1 enable_svm = False svm_gamma = 0 svm_C = 1.0 svm_min = -1.0 svm_max = 1.0 enable_svr = False svr_gamma = 'auto' # 0.1 svr_C = 1.0 svr_epsilon = 0.1 svr_instead_of_SVM2 = False enable_lr = False load_network_number = None ask_network_loading = True n_parallel = None # 5 enable_scheduler = False save_subimages_training = False # or True save_images_training_supplementary_info = None save_average_subimage_training = False # or True save_sorted_AE_Gauss_newid = False # or True save_sorted_incorrect_class_Gauss_newid = False # or True compute_slow_features_newid_across_net = 0 # or 1,2,3 estimate_explained_var_with_inverse = False estimate_explained_var_with_kNN_k = 0 estimate_explained_var_with_kNN_lin_app_k = 0 estimate_explained_var_linear_global_N = 0 add_normalization_node = False make_last_PCA_node_whithening = False feature_cut_off_level = 0.0 use_filter = None export_data_to_libsvm = False integer_label_estimation = False cumulative_scores = False confusion_matrix = False features_residual_information = 5000 # 0 compute_input_information = True convert_labels_days_to_years = False sfa_gc_reduced_dim = 0 transfer_learning_feature_normalization = False clip_seenid_newid_to_training = False add_noise_to_seenid = False dataset_for_display_train = 0 dataset_for_display_newid = 0 objective_label = 0 # ELL options graph_exact_label_learning = False output_instead_of_SVM2 = False # The total number of labels is num_orig_labels *number_of_target_labels_per_orig_label number_of_target_labels_per_orig_label = 0 coherent_seeds = False or True cuicuilco_queue = "queue_cuicuilco.txt" cuicuilco_lock_file = "queue_cuicuilco" minutes_sleep = 0 t0 = time.time() print("LOADING INPUT/SETUP INFORMATION") from . import hierarchical_networks from . import experimental_datasets print("Using mdp version:", mdp.__version__, "file:", mdp.__file__) print(hierarchical_networks.__file__) print(experimental_datasets.__file__) print("Attempting to retrieve hash of current git commit") try: print("output of \"$git describe --tags\":", subprocess.check_output(["git", "describe", "--tags"]).strip()) print("output of \"$git rev-parse HEAD\":", subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()) except subprocess.CalledProcessError as e: print("\nFailed to determine current git commit:", str(e), "\n") print("List of modules and their versions:") obj_names = sys.modules.keys() for obj_name in obj_names: obj_value = sys.modules[obj_name] obj_members = dir(obj_value) # getmembers(obj_value) if "__version__" in obj_members: print(" using ", obj_name, " version: ", obj_value.__version__) available_experiments = {} print("Creating list of available experiments:") for (obj_name, obj_value) in getmembers(experimental_datasets): if isinstance(obj_value, system_parameters.ParamsSystem): print(" ", obj_name) available_experiments[obj_name] = obj_value # print "object", obj.__name__ available_networks = {} print("Creating list of available networks:") for (obj_name, obj_value) in getmembers(hierarchical_networks): if isinstance(obj_value, system_parameters.ParamsNetwork) and obj_name != "network": print(" ", obj_name) available_networks[obj_name] = obj_value # print "object", obj.__name__ name_default_experiment = "ParamsMNISTFunc" name_default_network = "voidNetwork1L" DefaultExperimentalDataset = available_experiments[name_default_experiment] DefaultNetwork = available_networks[name_default_network] from .experimental_datasets import experiment_seed from .experimental_datasets import DAYS_IN_A_YEAR def my_sigmoid(x): return numpy.tanh(5 * x) def svm_compute_range(data): mins = data.min(axis=0) maxs = data.max(axis=0) return mins, maxs def svm_scale(data, mins, maxs, svm_min, svm_max): return (data - mins) * (svm_max - svm_min) / (maxs - mins) + svm_min if coherent_seeds: print("experimental_datasets.experiment_seed=", experiment_seed) numpy.random.seed(experiment_seed + 111111) print("(cuicuilco_run) __package__=", __package__) if __name__ == "__main__": # ############## Parse command line arguments #################### if enable_command_line: argv = None if argv is None: argv = sys.argv print("Apparent command line arguments: \n", " ".join(argv)) if len(argv) >= 2: try: opts, args = getopt.getopt(argv[1:], "", ["InputFilename=", "OutputFilename=", "EnableDisplay=", "SavePlots=", "CacheAvailable=", "NumFeaturesSup=", "SkipFeaturesSup=", 'SVM_gamma=', 'SVM_C=', 'EnableSVM=', "LoadNetworkNumber=", 'SVR_gamma=', 'SVR_C=', 'SVR_epsilon=', 'EnableSVR=', "SVRInsteadOfSVM2=", "AskNetworkLoading=", 'EnableLR=', "NParallel=", "EnableScheduler=", "SaveOutputFeaturesDir=", "LoadAndAppendOutputFeaturesDir=", "NumFeaturesToAppendToInput=", "NetworkCacheReadDir=", "NetworkCacheWriteDir=", "NodeCacheReadDir=", "NodeCacheWriteDir=", "SignalCacheReadDir=", "SignalCacheWriteDir=", "ClassifierCacheReadDir=", "ClassifierCacheWriteDir=", "SaveSubimagesTraining=", "SaveAverageSubimageTraining=", "SaveSorted_AE_GaussNewid=", "SaveSortedIncorrectClassGaussNewid=", "ComputeSlowFeaturesNewidAcrossNet=", "UseFilter=", "kNN_k=", 'EnableKNN=', "EnableNCC=", "EnableGC=", "SaveSubimagesTrainingSupplementaryInfo=", "EstimateExplainedVarWithInverse=", "EstimateExplainedVarWithKNN_k=", "EstimateExplainedVarWithKNNLinApp_k=", "EstimateExplainedVarLinGlobal_N=", "AddNormalizationNode=", "MakeLastPCANodeWhithening=", "FeatureCutOffLevel=", "ExportDataToLibsvm=", "IntegerLabelEstimation=", "CumulativeScores=", "FeaturesResidualInformation=", "ComputeInputInformation=", "SleepM=", "DatasetForDisplayTrain=", "DatasetForDisplayNewid=", "GraphExactLabelLearning=", "OutputInsteadOfSVM2=", "NumberTargetLabels=", "ConfusionMatrix=", "MapDaysToYears=", "AddNoiseToSeenid=", "ClipSeenidNewid=", "HierarchicalNetwork=", "ExperimentalDataset=", "SFAGCReducedDim=", "ObjectiveLabel=", "ImportNetworksFromFile=", "ImportDatasetsFromFile=", "TransferLearningFeatureNormalization=", "help"]) print("opts=", opts) print("args=", args) if len(args) > 0: print("Arguments not understood:", args) sys.exit(2) for opt, arg in opts: if opt in ('--InputFilename',): input_filename = arg print("Using the following input file:", input_filename) elif opt in ('--OutputFilename',): output_filename = arg print("Using the following output file:", output_filename) elif opt in ('--EnableDisplay',): if arg == '1': enable_display = True else: enable_display = False print("Setting enable_display to", enable_display) elif opt in ('--SavePlots',): if arg == '1': save_plots = True else: save_plots = False print("Setting save_plots to", save_plots) elif opt in ('--CacheAvailable',): if arg == '1': cache_available = True else: cache_available = False print("Setting cache_available to", cache_available) elif opt in ('--NumFeaturesSup',): reg_num_signals = int(arg) print("Setting reg_num_signals to", reg_num_signals) elif opt in ('--SkipFeaturesSup',): skip_num_signals = int(arg) print("Setting skip_num_signals to", skip_num_signals) elif opt in ('--SVM_gamma',): svm_gamma = float(arg) print("Setting svm_gamma to", svm_gamma) elif opt in ('--SVM_C',): svm_C = float(arg) print("Setting svm_C to", svm_C) elif opt in ('--EnableSVM',): enable_svm = int(arg) print("Setting enable_svm to", enable_svm) elif opt in ('--SVR_gamma',): if arg == 'auto': svr_gamma = 'auto' else: svr_gamma = float(arg) print("Setting svr_gamma to", svr_gamma) elif opt in ('--SVR_epsilon',): svr_epsilon = float(arg) print("Setting svr_epsilon to", svr_epsilon) elif opt in ('--SVR_C',): svr_C = float(arg) print("Setting svr_C to", svr_C) elif opt in ('--EnableSVR',): enable_svr = int(arg) print("Setting enable_svr to", enable_svr) elif opt in ('--SVRInsteadOfSVM2',): svr_instead_of_SVM2 = bool(int(arg)) print("Setting svr_instead_of_SVM2 to", svr_instead_of_SVM2) elif opt in ('--LoadNetworkNumber',): load_network_number = int(arg) print("Setting load_network_number to", load_network_number) elif opt in ('--AskNetworkLoading',): ask_network_loading = int(arg) print("Setting ask_network_loading to", ask_network_loading) elif opt in ('--EnableLR',): enable_lr = int(arg) print("Setting enable_lr to", enable_lr) elif opt in ('--NParallel',): n_parallel = int(arg) print("Setting n_parallel to", n_parallel) elif opt in ('--EnableScheduler',): enable_scheduler = int(arg) print("Setting enable_scheduler to", enable_scheduler) elif opt in ('--SaveOutputFeaturesDir',): if arg == "None": save_output_features_dir = None else: save_output_features_dir = arg print("Setting save_output_features_dir to", save_output_features_dir) elif opt in ('--LoadAndAppendOutputFeaturesDir',): if arg == "None": load_and_append_output_features_dir = None else: load_and_append_output_features_dir = arg print("Setting load_and_append_output_features_dir to", load_and_append_output_features_dir) elif opt in ('--NumFeaturesToAppendToInput',): if arg == "None": num_features_to_append_to_input = None else: num_features_to_append_to_input = int(arg) print("Setting num_features_to_append_to_input to", num_features_to_append_to_input) elif opt in ('--NetworkCacheReadDir',): if arg == "None": network_cache_read_dir = None else: network_cache_read_dir = arg print("Setting network_cache_read_dir to", network_cache_read_dir) elif opt in ('--NetworkCacheWriteDir',): if arg == "None": network_cache_write_dir = None else: network_cache_write_dir = arg print("Setting network_cache_write_dir to", network_cache_write_dir) elif opt in ('--NodeCacheReadDir',): if arg == "None": node_cache_read_dir = None else: node_cache_read_dir = arg print("Setting node_cache_read_dir to", node_cache_read_dir) elif opt in ('--NodeCacheWriteDir',): if arg == "None": node_cache_write_dir = None else: node_cache_write_dir = arg print("Setting node_cache_write_dir to", node_cache_write_dir) elif opt in ('--SignalCacheReadDir',): if arg == "None": signal_cache_read_dir = None else: signal_cache_read_dir = arg print("Setting signal_cache_read_dir to", signal_cache_read_dir) elif opt in ('--SignalCacheWriteDir',): if arg == "None": signal_cache_write_dir = None else: signal_cache_write_dir = arg print("Setting signal_cache_write_dir to", signal_cache_write_dir) elif opt in ('--ClassifierCacheReadDir',): if arg == "None": classifier_cache_read_dir = None else: classifier_cache_read_dir = arg print("Setting classifier_cache_read_dir to", classifier_cache_read_dir) er = "ClassifierCacheReadDir: Option not supported yet" raise Exception(er) elif opt in ('--ClassifierCacheWriteDir',): if arg == "None": classifier_cache_write_dir = None else: classifier_cache_write_dir = arg print("Setting classifier_cache_write_dir to", classifier_cache_write_dir) elif opt in ('--SaveSubimagesTraining',): save_subimages_training = bool(int(arg)) print("Setting save_subimages_training to", save_subimages_training) elif opt in ('--SaveAverageSubimageTraining',): save_average_subimage_training = bool(int(arg)) print("Setting save_average_subimage_training to", save_average_subimage_training) elif opt in ('--SaveSorted_AE_GaussNewid',): save_sorted_AE_Gauss_newid = bool(int(arg)) print("Setting save_sorted_AE_Gauss_newid to", save_sorted_AE_Gauss_newid) elif opt in ('--SaveSortedIncorrectClassGaussNewid',): save_sorted_incorrect_class_Gauss_newid = bool(int(arg)) print("Setting save_sorted_incorrect_class_Gauss_newid to %d" % save_sorted_incorrect_class_Gauss_newid) elif opt in ('--ComputeSlowFeaturesNewidAcrossNet',): compute_slow_features_newid_across_net = int(arg) print("Setting compute_slow_features_newid_across_net to %d" % compute_slow_features_newid_across_net) elif opt in ('--UseFilter',): use_filter = arg print("Setting use_filter to", use_filter) elif opt in ('--kNN_k',): kNN_k = int(arg) print("Setting kNN_k to", kNN_k) elif opt in ('--EnableKNN',): enable_kNN = bool(int(arg)) print("Setting enable_kNN to", enable_kNN) elif opt in ('--EnableNCC',): enable_NCC = bool(int(arg)) print("Setting enable_NCC to", enable_NCC) elif opt in ('--EnableGC',): enable_GC = bool(int(arg)) print("Setting enable_GC to", enable_GC) elif opt in ('--SaveSubimagesTrainingSupplementaryInfo',): save_images_training_supplementary_info = arg print("Setting save_images_training_supplementary_info to", save_images_training_supplementary_info) elif opt in ('--EstimateExplainedVarWithInverse',): estimate_explained_var_with_inverse = bool(int(arg)) print("Setting estimate_explained_var_with_inverse to", estimate_explained_var_with_inverse) elif opt in ('--EstimateExplainedVarWithKNN_k',): estimate_explained_var_with_kNN_k = int(arg) print("Setting estimate_explained_var_with_kNN_k to", estimate_explained_var_with_kNN_k) elif opt in ('--EstimateExplainedVarWithKNNLinApp_k',): estimate_explained_var_with_kNN_lin_app_k = int(arg) print("Setting estimate_explained_var_with_kNN_lin_app_k to %d" % estimate_explained_var_with_kNN_lin_app_k) elif opt in ('--EstimateExplainedVarLinGlobal_N',): estimate_explained_var_linear_global_N = int(arg) print("Setting estimate_explained_var_linear_global_N to %d" % estimate_explained_var_linear_global_N) elif opt in ('--AddNormalizationNode',): add_normalization_node = bool(int(arg)) print("Setting add_normalization_node to", add_normalization_node) elif opt in ('--MakeLastPCANodeWhithening',): make_last_PCA_node_whithening = bool(int(arg)) print("Setting make_last_PCA_node_whithening to", make_last_PCA_node_whithening) elif opt in ('--FeatureCutOffLevel',): feature_cut_off_level = float(arg) print("Setting feature_cut_off_level to", feature_cut_off_level) elif opt in ('--ExportDataToLibsvm',): export_data_to_libsvm = bool(int(arg)) print("Setting export_data_to_libsvm to", export_data_to_libsvm) elif opt in ('--IntegerLabelEstimation',): integer_label_estimation = bool(int(arg)) print("Setting integer_label_estimation to", integer_label_estimation) elif opt in ('--CumulativeScores',): cumulative_scores = bool(int(arg)) print("Setting cumulative_scores to", cumulative_scores) elif opt in ('--FeaturesResidualInformation',): features_residual_information = int(arg) print("Setting features_residual_information to", features_residual_information) elif opt in ('--ComputeInputInformation',): compute_input_information = bool(int(arg)) print("Setting compute_input_information to", compute_input_information) elif opt in ('--SleepM',): minutes_sleep = float(arg) if minutes_sleep >= 0: print("Sleeping for %f minutes..." % minutes_sleep) time.sleep(minutes_sleep * 60) print("... and awoke") else: print("Sleeping until execution in cuicuilco queue") t_wa = time.time() lock = LockFile(cuicuilco_lock_file) pid = os.getpid() print("process pid is:", pid) # Add the current process to the queue print("adding process to queue...") lock.acquire() q = open(cuicuilco_queue, "a") q.write("%d\n" % pid) q.close() lock.release() served = False while not served: lock.acquire() q = open(cuicuilco_queue, "r") next_pid = int(q.readline()) print("top of queue:", next_pid) q.close() lock.release() if next_pid == pid: print("our turn in queue") served = True else: print("sleeping 60 seconds") time.sleep(60) # sleep for 10 seconds before trying again t_wb = time.time() print("process is executing now. Total waiting time: %f min" % ((t_wb - t_wa) / 60.0)) elif opt in ('--DatasetForDisplayTrain',): dataset_for_display_train = int(arg) print("Setting dataset_for_display_train to", dataset_for_display_train) elif opt in ('--DatasetForDisplayNewid',): dataset_for_display_newid = int(arg) print("Setting dataset_for_display_newid to", dataset_for_display_newid) elif opt in ('--GraphExactLabelLearning',): graph_exact_label_learning = bool(int(arg)) print("Setting graph_exact_label_learning to", graph_exact_label_learning) elif opt in ('--OutputInsteadOfSVM2',): output_instead_of_SVM2 = bool(int(arg)) print("Setting output_instead_of_SVM2 to", output_instead_of_SVM2) elif opt in ('--NumberTargetLabels',): number_of_target_labels_per_orig_label = int(arg) print("Setting number_of_target_labels_per_orig_label to %d" % number_of_target_labels_per_orig_label) elif opt in ('--ConfusionMatrix',): confusion_matrix = bool(int(arg)) print("Setting confusion_matrix to", confusion_matrix) elif opt in ('--MapDaysToYears',): convert_labels_days_to_years = bool(int(arg)) print("Setting convert_labels_days_to_years to", convert_labels_days_to_years) elif opt in ('--AddNoiseToSeenid',): add_noise_to_seenid = bool(int(arg)) print("Setting add_noise_to_seenid to", add_noise_to_seenid) elif opt in ('--ClipSeenidNewid',): clip_seenid_newid_to_training = bool(int(arg)) print("Setting clip_seenid_newid_to_training to", clip_seenid_newid_to_training) elif opt in ('--HierarchicalNetwork',): name_default_network = arg print("Setting default_network to", name_default_network) DefaultNetwork = available_networks[name_default_network] elif opt in ('--ExperimentalDataset',): name_default_experiment = arg print("Setting name_default_experiment to", name_default_experiment) DefaultExperimentalDataset = available_experiments[name_default_experiment] elif opt in ('--SFAGCReducedDim',): sfa_gc_reduced_dim = int(arg) print("Setting sfa_gc_reduced_dim to", sfa_gc_reduced_dim) elif opt in ('--ObjectiveLabel',): objective_label = int(arg) print("Setting objective_label to", objective_label) elif opt in ('--ImportNetworksFromFile',): try: module = __import__(arg) reload(module) print("Extending list of available networks:") for (obj_name, obj_value) in getmembers(module): if isinstance(obj_value, system_parameters.ParamsNetwork) and obj_name != "network": print("Adding network: ", obj_name) available_networks[obj_name] = obj_value except ImportError: raise ImportError("It was not possible to import the provided filename:" + arg) elif opt in ('--ImportDatasetsFromFile',): try: module = __import__(arg) reload(module) print("Extending list of available experiments:") for (obj_name, obj_value) in getmembers(module): if isinstance(obj_value, system_parameters.ParamsSystem): print("Adding dataset: ", obj_name) available_experiments[obj_name] = obj_value except ImportError: raise ImportError("It was not possible to import the provided filename:" + arg) elif opt in ('--TransferLearningFeatureNormalization',): transfer_learning_feature_normalization = bool(int(arg)) print("Setting transfer_learning_feature_normalization to", transfer_learning_feature_normalization) elif opt in ('--help',): txt = \ """Cuicuilco: displaying help information Usage: python cuicuilco_run.py [OPTION]... Executes a single run of the Cuicuilco framework. The following global variables must be specified on beforehand (integer values): CUICUILCO_TUNING_PARAMETER (value of the tuning parameter used by the datasets) CUICUILCO_EXPERIMENT_SEED (seed used for the dataset radomizations) CUICUILCO_IMAGE_LOADING_NUM_PROC (max number of processes used by MKL) CUICUILCO_EXPERIMENT_BASEDIR (base directory in which the datasets are stored) The options below may be used: **General options --EnableDisplay={1/0}. Enables the graphical interface --ExperimentalDataset={ParamsRAgeFunc/ParamsMNISTFunc/ParamsRTransXYScaleFunc/...}. Selects a particular dataset --HierarchicalNetwork={voidNetwork1L/PCANetwork1L/u08expoNetworkU11L/...}. Selects a particular network --NumFeaturesSup=N. Specifies the number of output features N used in the supervised step --SkipFeaturesSup=S. Specifies number of output features S that are skipped (ignored) --SleepM=M. Specifies a delay before Cuicuilco starts loading the dataset. (useful to prevent memory or processor clogging). if M>0 the current Cuicuilco process is paused for M minutes if M=0 there is no delay if M<0 the program joins a waiting list (specified by the lock file named queue_cuicuilco.txt), sleeps until its turn is reached, and deletes itself from the list after the labels/classes have been estimated **Network options --AddNormalizationNode={1/0} Adds a normalization node at the end of the network --MakeLastPCANodeWhithening={1/0} Changes the last PCANode into a WhitheningNode --FeatureCutOffLevel=f Trims the feature values between -f and f **Cache options --CacheAvailable={1/0} Specifies whether any type of cache might be available --NetworkCacheReadDir=directory Specifies a directory used to load previously trained networks --NetworkCacheWriteDir=directory Specifies a directory used to save trained networks --LoadNetworkNumber=M Loads the Mth network in cache instead of training a new network --AskNetworkLoading={1/0} If the option is enabled, Cuicuilco requests in the command line the number of the network to be loaded --NodeCacheReadDir=directory Specifies a directory used to search for nodes trained previously on the same data and parameters (can significantly speed up network training) --NodeCacheWriteDir=directory Specifies a directory where trained nodes are saved **Feature options --AddNoiseToSeenid={1/0} Adds noise to the data used to train the supervised step --ClipSeenidNewid={1/0} Trims the range of the data used to train the supervised step and the test data according to the range of the training data of the network **Supervised step options --EnableLR={1/0} Enables linear regression (OLS) as supervised step --EnableKNN={1/0} Enables k-nearest neighbors (kNN) as supervised step --kNN_k=k Sets the value of k if kNN is enabled --EnableNCC={1/0} Enables a nearest centroid classifier as supervised step --EnableGC={1/0} Enables a Gaussian classifier as supervised step --EnableSVM={1/0} Enables a support vector machine as supervised step (requires libsvm) --SVM_gamma=gamma Sets the value of gamma if SVM is enabled (RBF, multiclass, one against one) --SVM_C=C Sets the value of C if SVM is enabled **Result options --SaveSubimagesTraining={1/0} Saves (a fraction of) the training images to disk (after data distortion and other operations) --SaveSubimagesTrainingSupplementaryInfo={Class/Label} If the option above is enabled, this option adds the correct label or class information to the image filenames --SaveAverageSubimageTraining={1/0} Saves the average training image to disk (after data distortion and other operations) --SaveSorted_AE_GaussNewid={1/0} Saves (a fraction of) the training images to disk ordered by the absolute error for label estimation --SaveSortedIncorrectClassGaussNewid={1/0} Saves (a fraction of) the training images to disk that were classified incorrectly --ExportDataToLibsvm={1/0} Saves the output features and labels in the format of libsvm **Options to control computation of explained variance (1-reconstruction error). --EstimateExplainedVarWithInverse={1/0} Reconstructions are computed using flow.inverse --EstimateExplainedVarWithKNN_k=k If k>0 reconstructions are computed as the average of the k nearest neighbors --EstimateExplainedVarWithKNNLinApp_k=k If k>0 reconstructions are a linear average of the k nearest neighbors --EstimateExplainedVarLinGlobal_N=N Reconstructions are given by a linear model trained with N samples chosen randomly from the training data. If N=-1, all training samples are used. **Label estimation options --MapDaysToYears={1/0} Divides the ground-truth labels and label estimations by 365.242 --IntegerLabelEstimation={1/0} Truncates all label estimations to integer values --CumulativeScores={1/0} Computes cumulative scores for test data --ConfusionMatrix={1/0} Computes the confusion matrix for test data **Exact label learning graph options --GraphExactLabelLearning={1/0} Computes an ELL graph based on the available labels --NumberTargetLabels=N Defines the number of target labels (if N>1 there N-1 auxiliary labels are created) --OutputInsteadOfSVM2={1/0} If the option is enabled, the network output replaces the SVM2 label estimation **Undocumented or in development options (consult the source code) --InputFilename=filename, --OutputFilename=filename, --SignalCacheReadDir=directory, --SignalCacheWriteDir=directory, --ClassifierCacheReadDir=directory, --ClassifierCacheWriteDir=directory, --EnableScheduler={1/0}, --NParallel=N, --UseFilter={1/0}, --FeaturesResidualInformation=N, --ComputeInputInformation={1/0}, --ComputeSlowFeaturesNewidAcrossNet={1/0}, --DatasetForDisplayTrain=N, --DatasetForDisplayNewid=N **Other options --TransferLearningFeatureNormalization){1/0} Removes the mean and normalizes the variance (to 1.0) of the features extracted from the training, supervised, and test data independently (three separate models). --help Displays this help information """ print(txt) quit() else: print("Argument not handled: ", opt) quit() except getopt.GetoptError as err: print("Error while parsing the following arguments: ", argv[1:]) print("Error: ", err, "use the --help option for a short guide on the available arguments") # print "option:", getopt.GetoptError.opt, "message:", getopt.GetoptError.msg sys.exit(2) def main(): global benchmark, num_features_to_append_to_input, reg_num_signals, use_full_sl_output, svm_gamma, \ compute_input_information if enable_svm: import svm as libsvm if load_and_append_output_features_dir is None: num_features_to_append_to_input = 0 if coherent_seeds: print("experimental_datasets.experiment_seed=", experiment_seed) numpy.random.seed(experiment_seed + 12121212) if enable_scheduler and n_parallel > 1: scheduler = mdp.parallel.ThreadScheduler(n_threads=n_parallel) else: scheduler = None if features_residual_information <= 0 and compute_input_information: print("ignoring flag compute_input_information=%d because features_residual_information=%d <= 0" % (compute_input_information, features_residual_information)) compute_input_information = False Parameters = DefaultExperimentalDataset Parameters.create() Network = DefaultNetwork # Specific code for setting up the ParamsNatural experiment (requires run-time computations) if Parameters == experimental_datasets.ParamsNatural and input_filename is not None: (magic_num, iteration, numSamples, rbm_sfa_numHid, sampleSpan) = read_binary_header("", input_filename) print("Iteration Number=%d," % iteration, "numSamples=%d" % numSamples, "rbm_sfa_numHid=%d," % rbm_sfa_numHid) Parameters.sTrain.subimage_width = rbm_sfa_numHid // 8 Parameters.sTrain.subimage_height = rbm_sfa_numHid // Parameters.sTrain.subimage_width Parameters.sTrain.name = "RBM Natural. 8x8 (exp 64=%d), iter %d, num_images %d" % (rbm_sfa_numHid, iteration, Parameters.sTrain.num_images) Parameters.sSeenid.subimage_width = rbm_sfa_numHid // 8 Parameters.sSeenid.subimage_height = rbm_sfa_numHid // Parameters.sSeenid.subimage_width Parameters.sSeenid.name = "RBM Natural. 8x8 (exp 64=%d), iter %d, num_images %d" % \ (rbm_sfa_numHid, iteration, Parameters.sSeenid.num_images) Parameters.sNewid.subimage_width = rbm_sfa_numHid // 8 Parameters.sNewid.subimage_height = rbm_sfa_numHid // Parameters.sNewid.subimage_width Parameters.sNewid.name = "RBM Natural. 8x8 (exp 64=%d), iter %d, num_images %d" % \ (rbm_sfa_numHid, iteration, Parameters.sNewid.num_images) Parameters.sTrain.data_base_dir = Parameters.sSeenid.data_base_dir = Parameters.sNewid.data_base_dir = "" Parameters.sTrain.base_filename = Parameters.sSeenid.base_filename = Parameters.sNewid.base_filename = \ input_filename if numSamples != 5000: er = "wrong number of Samples %d, 5000 were assumed" % numSamples raise Exception(er) enable_reduced_image_sizes = Parameters.enable_reduced_image_sizes reduction_factor = Parameters.reduction_factor print("reduction_factor=", reduction_factor) hack_image_size = Parameters.hack_image_size enable_hack_image_size = Parameters.enable_hack_image_size if enable_reduced_image_sizes: Parameters.name += "_Resized images" for sSeq in (Parameters.sTrain, Parameters.sSeenid, Parameters.sNewid): print("sSeq", sSeq) if isinstance(sSeq, list): for i, sSeq_vect in enumerate(sSeq): print("sSeq_vect", sSeq_vect) if sSeq_vect is not None: # is not None: for j, sSeq_entry in enumerate(sSeq_vect): if isinstance(sSeq_entry, system_parameters.ParamsDataLoading): # TODO: Avoid code repetition, even though readability compromised scale_sSeq(sSeq_entry, reduction_factor) else: er = "Unexpected data structure" raise Exception(er) else: scale_sSeq(sSeq, reduction_factor) if coherent_seeds: numpy.random.seed(experiment_seed + 34343434) iTrain_set = Parameters.iTrain sTrain_set = Parameters.sTrain iTrain = take_0_k_th_from_2D_list(iTrain_set, k=dataset_for_display_train) sTrain = take_0_k_th_from_2D_list(sTrain_set, k=dataset_for_display_train) if graph_exact_label_learning: if isinstance(iTrain_set, list): iTrain0 = iTrain_set[len(iTrain_set) - 1][0] else: iTrain0 = take_0_k_th_from_2D_list(iTrain_set, k=0) Q = iTrain0.num_images if len(iTrain0.correct_labels.shape) == 2: num_orig_labels = iTrain0.correct_labels.shape[1] else: num_orig_labels = 1 iTrain0.correct_labels.reshape((-1, num_orig_labels)) # number_of_target_labels_per_orig_label = 2 #1 or more for auxiliary labels if number_of_target_labels_per_orig_label >= 1: min_label = iTrain0.correct_labels.min(axis=0) max_label = iTrain0.correct_labels.max(axis=0) plain_labels = iTrain0.correct_labels.reshape((-1, num_orig_labels)) num_samples = len(plain_labels) auxiliary_labels = numpy.zeros((num_samples, num_orig_labels * number_of_target_labels_per_orig_label)) auxiliary_labels[:, 0:num_orig_labels] = plain_labels for i in range(1, number_of_target_labels_per_orig_label): auxiliary_labels[:, i * num_orig_labels:(i + 1) * num_orig_labels] = numpy.cos( (plain_labels - min_label) * (1.0 + i) * numpy.pi / (max_label - min_label)) print(auxiliary_labels) else: auxiliary_labels = iTrain0.correct_labels.reshape((-1, num_orig_labels)) print("iTrain0.correct_labels.shape", iTrain0.correct_labels.shape) orig_train_label_min = auxiliary_labels[:, objective_label].min() orig_train_label_max = auxiliary_labels[:, objective_label].max() orig_train_labels_mean = numpy.array(auxiliary_labels).mean(axis=0) orig_train_labels_std = numpy.array(auxiliary_labels).std(axis=0) orig_train_label_mean = orig_train_labels_mean[objective_label] orig_train_label_std = orig_train_labels_std[objective_label] orig_train_labels = auxiliary_labels orig_train_labels_mean = numpy.array(orig_train_labels).mean(axis=0) orig_train_labels_std = numpy.array(orig_train_labels).std(axis=0) train_feasible_labels = (orig_train_labels - orig_train_labels_mean) / orig_train_labels_std print("original feasible (perhaps correlated) label.T: ", train_feasible_labels.T) if len(iTrain0.correct_labels.shape) == 2: iTrain0.correct_labels = iTrain0.correct_labels[:, objective_label].flatten() Parameters.iSeenid.correct_labels = Parameters.iSeenid.correct_labels[:, objective_label].flatten() Parameters.iNewid[0][0].correct_labels = \ Parameters.iNewid[0][0].correct_labels[:, objective_label].flatten() iTrain0.correct_classes = iTrain0.correct_classes[:, objective_label].flatten() Parameters.iSeenid.correct_classes = Parameters.iSeenid.correct_classes[:, objective_label].flatten() Parameters.iNewid[0][0].correct_classes = \ Parameters.iNewid[0][0].correct_classes[:, objective_label].flatten() node_weights = numpy.ones(Q) Gamma = ConstructGammaFromLabels(train_feasible_labels, node_weights, constant_deltas=False) print("Resulting Gamma is", Gamma) Gamma = RemoveNegativeEdgeWeights(node_weights, Gamma) print("Removed negative weighs. Gamma=", Gamma) edge_weights = Gamma if isinstance(sTrain_set, list): sTrain0 = sTrain_set[len(sTrain_set) - 1][0] else: sTrain0 = take_0_k_th_from_2D_list(sTrain_set, k=0) sTrain0.train_mode = "graph" sTrain0.node_weights = node_weights sTrain0.edge_weights = edge_weights print("sTrain=", sTrain) iSeenid = Parameters.iSeenid sSeenid = Parameters.sSeenid if coherent_seeds: print("Setting coherent seed") numpy.random.seed(experiment_seed + 56565656) iNewid_set = Parameters.iNewid sNewid_set = Parameters.sNewid print("dataset_for_display_newid=", dataset_for_display_newid) iNewid = take_0_k_th_from_2D_list(iNewid_set, k=dataset_for_display_newid) sNewid = take_0_k_th_from_2D_list(sNewid_set, k=dataset_for_display_newid) image_files_training = iTrain.input_files # print image_files_training num_images_training = num_images = iTrain.num_images seq_sets = sTrain_set seq = sTrain if enable_hack_image_size: print("changing the native image size (width and height) to: ", hack_image_size) sSeq_force_image_size(sTrain_set, hack_image_size, hack_image_size) sSeq_force_image_size(sSeenid, hack_image_size, hack_image_size) sSeq_force_image_size(sNewid_set, hack_image_size, hack_image_size) subimage_shape, max_clip, signals_per_image, in_channel_dim = sSeq_getinfo_format(sTrain) # Filter used for loading images with transparent background # filter = generate_color_filter2((seq.subimage_height, seq.subimage_width)) if use_filter == "ColoredNoise" or use_filter == "1": alpha = 4.0 # mask 1 / f^(alpha/2) => power 1/f^alpha my_filter = filter_colored_noise2D_imp((seq.subimage_height, seq.subimage_width), alpha) # back_type = None # filter = None elif use_filter == "None" or (use_filter is None) or (use_filter == "0"): my_filter = None else: print("Unknown filter: ", use_filter) quit() sTrain.filter = my_filter sSeenid.filter = my_filter sNewid.filter = my_filter network_read_enabled = True # and False if network_read_enabled and cache_available: network_read = cache.Cache(network_cache_read_dir, "") else: network_read = None network_saving_enabled = True # and False if network_saving_enabled and cache_available and (network_cache_write_dir is not None): network_write = cache.Cache(network_cache_write_dir, "") else: network_write = None node_cache_read_enabled = True # and False if node_cache_read_enabled and cache_available and (node_cache_read_dir is not None): node_cache_read = cache.Cache(node_cache_read_dir, "") else: node_cache_read = None signal_cache_read_enabled = True # and False if signal_cache_read_enabled and cache_available and (signal_cache_read_dir is not None): signal_cache_read = cache.Cache(signal_cache_read_dir, "") else: signal_cache_read = None node_cache_write_enabled = True if node_cache_write_enabled and cache_available and (node_cache_write_dir is not None): node_cache_write = cache.Cache(node_cache_write_dir, "") else: node_cache_write = None signal_cache_write_enabled = True # and False #or network_saving_enabled if signal_cache_write_enabled and cache_available and (signal_cache_write_dir is not None): signal_cache_write = cache.Cache(signal_cache_write_dir, "") else: signal_cache_write = None classifier_read_enabled = False if classifier_read_enabled and cache_available and (classifier_cache_read_dir is not None): classifier_read = cache.Cache(classifier_cache_read_dir, "") else: classifier_read = None classifier_saving_enabled = True # and False if classifier_saving_enabled and cache_available and (classifier_cache_write_dir is not None): classifier_write = cache.Cache(classifier_cache_write_dir, "") else: classifier_write = None network_hashes_base_filenames = [] if network_cache_read_dir and network_read: network_filenames = cache.find_filenames_beginning_with(network_cache_read_dir, "Network", recursion=False, extension=".pckl") for i, network_filename in enumerate(network_filenames): network_base_filename = string.split(network_filename, sep=".")[0] network_hash = string.split(network_base_filename, sep="_")[-1] network_hashes_base_filenames.append((network_base_filename, network_hash)) else: network_hashes_base_filenames = [] #network_hashes_base_filenames.sort(lambda x, y: cmp(x[1], y[1])) network_hashes_base_filenames.sort(key=functools.cmp_to_key(cmp)) print("%d networks found:" % len(network_hashes_base_filenames)) for i, (network_filename, network_hash) in enumerate(network_hashes_base_filenames): print("[%d]" % i, network_filename) network_filename = None if len(network_hashes_base_filenames) > 0 and (ask_network_loading or load_network_number is not None): if ask_network_loading or load_network_number is None: selected_network = int(input("Please select a network (-1=Train new network):")) else: print("Network selected from program parameters: ", load_network_number) selected_network = load_network_number if selected_network == -1: print("Selected: Train new network") else: print("Selected: Load Network", selected_network) network_filename = network_hashes_base_filenames[selected_network][0] if network_filename is not None: network_base_filename = string.split(network_filename, sep=".")[0] network_hash = string.split(network_base_filename, sep="_")[-1] print("******************************************") print("Loading Trained Network and Display Data from Disk ") print("******************************************") print("network_cach_read_dir", network_cache_read_dir) print("network_cach_write_dir", network_cache_write_dir) print("network_filename:", network_filename) print("network_basefilename:", network_base_filename) print("network_hash:", network_hash) # network_write.update_cache([flow, layers, benchmark, Network], None, network_base_dir, "Network"+ # Network.name+"_ParName"+Parameters.name+"_"+network_hash, overwrite=True, use_hash=network_hash, verbose=True) # network_write.update_cache([iSeq, sSeq], None, network_base_dir, "iSeqsSeqData", overwrite=True, # use_hash=network_hash, verbose=True) # network_write.update_cache(subimages_train, None, network_base_dir, "TrainData", overwrite=True, # use_hash=network_hash, verbose=True) # network_write.update_cache(sl_seq_training, None, network_base_dir, "SLSeqData", overwrite=True, # use_hash=network_hash, verbose=True) # flow, layers, benchmark, Network = cache.unpickle_from_disk(network_filename) flow, layers, benchmark, Network = network_read.load_obj_from_cache(None, "", network_base_filename, verbose=True) print("Done loading network: " + Network.name) print(flow) iTrain, sTrain = network_read.load_obj_from_cache(network_hash, network_cache_read_dir, "iTrainsTrainData", verbose=True) print("Done loading iTrain sTrain data: " + sTrain.name) block_size = sTrain.block_size train_mode = iTrain.train_mode print("Train mode is:", train_mode) subimages_train = network_read.load_array_from_cache(network_hash, network_cache_read_dir, "TrainData", verbose=True) print("Done loading subimages_train: ", subimages_train.shape) sl_seq_training = network_read.load_array_from_cache(network_hash, network_cache_read_dir, "SLSeqData", verbose=True) print("Done loading sl_seq_training: ", sl_seq_training.shape) else: print("Generating Network...") # Usually the option below is true for voidNetwork1L, but might be also activated for other networks use_full_sl_output = False # Network.patch_network_for_RGB = True and False # Expand network output dimensions in case the original data is RGB or HOG02 features if (sTrain.convert_format == "RGB" or sTrain.convert_format == "HOG02") and Parameters.patch_network_for_RGB: if sTrain.convert_format == "RGB": # factors = [3, 2, 1.5] factors = [2, 1.7, 1.5] print("Unusual condition detected!", Network.patch_network_for_RGB) quit() elif sTrain.convert_format == "HOG02": factors = [8, 4, 2] else: er = "unknown conversion factor in network correction for in_channel_dim" raise Exception(er) for i, layer in enumerate(Network.layers[0:3]): # L0, Network.L1, Network.L2)): factor = factors[i] if layer is not None: if layer.pca_out_dim is not None and layer.pca_out_dim >= 1: layer.pca_out_dim = int(factor * layer.pca_out_dim) if layer.red_out_dim is not None and layer.red_out_dim >= 1: layer.red_out_dim = int(factor * layer.red_out_dim) # What about ord? It usually keeps the same dimensionality, thus it is not specified here if layer.sfa_out_dim is not None and layer.sfa_out_dim >= 1: layer.sfa_out_dim = int(factor * layer.sfa_out_dim) print("testing...") # NOTE: trim_network_layers is obsolete, one must manually chose the proper network and data version # WARNING: The code below is deprecated and will be removed soon # Possibly skip some of the last layers of the network if the data resolution has been artificially reduced skip_layers = 0 trim_network_layers = False if trim_network_layers: if (hack_image_size == 8) and enable_hack_image_size: Network.L3 = None Network.L4 = None Network.L5 = None Network.L6 = None Network.L7 = None Network.L8 = None Network.L9 = None Network.L10 = None skip_layers = 8 if (hack_image_size == 16) and enable_hack_image_size: Network.L5 = None Network.L6 = None Network.L7 = None Network.L8 = None Network.L9 = None Network.L10 = None skip_layers = 6 if (hack_image_size == 32) and enable_hack_image_size: Network.L7 = None Network.L8 = None Network.L9 = None Network.L10 = None skip_layers = 4 if (hack_image_size == 64 or hack_image_size == 72 or hack_image_size == 80 or hack_image_size == 95 or hack_image_size == 96) and enable_hack_image_size: Network.L9 = None Network.L10 = None skip_layers = 2 for l in Network.layers: print(l) print("SL=", skip_layers) if skip_layers > 0: for i, layer in enumerate(Network.layers): if i + skip_layers < len(Network.layers) and (layer is not None) and (Network.layers[i + skip_layers] is not None): if layer.pca_node_class == mdp.nodes.SFANode: print("FIX PCA%d" % i) if Network.layers[i + skip_layers].pca_node_class == mdp.nodes.SFANode: if "sfa_expo" in Network.layers[i + skip_layers].pca_args: layer.pca_args["sfa_expo"] = Network.layers[i + skip_layers].pca_args["sfa_expo"] if "pca_expo" in Network.layers[i + skip_layers].pca_args: layer.pca_args["pca_expo"] = Network.layers[i + skip_layers].pca_args["pca_expo"] else: if "sfa_expo" in Network.layers[i + skip_layers].sfa_args: layer.pca_args["sfa_expo"] = Network.layers[i + skip_layers].sfa_args["sfa_expo"] if "pca_expo" in Network.layers[i + skip_layers].sfa_args: layer.pca_args["pca_expo"] = Network.layers[i + skip_layers].sfa_args["pca_expo"] if layer.ord_node_class == mdp.nodes.SFANode: if "sfa_expo" in Network.layers[i + skip_layers].ord_args: layer.ord_args["sfa_expo"] = Network.layers[i + skip_layers].ord_args["sfa_expo"] if "pca_expo" in Network.layers[i + skip_layers].ord_args: layer.ord_args["pca_expo"] = Network.layers[i + skip_layers].ord_args["pca_expo"] if layer.red_node_class == mdp.nodes.SFANode: layer.red_args["sfa_expo"] = Network.layers[i + skip_layers].red_args["sfa_expo"] layer.red_args["pca_expo"] = Network.layers[i + skip_layers].red_args["pca_expo"] if layer.sfa_node_class == mdp.nodes.SFANode: print("FixSFA %d" % i) if "sfa_expo" in Network.layers[i + skip_layers].sfa_args: layer.sfa_args["sfa_expo"] = Network.layers[i + skip_layers].sfa_args["sfa_expo"] if "pca_expo" in Network.layers[i + skip_layers].sfa_args: layer.sfa_args["pca_expo"] = Network.layers[i + skip_layers].sfa_args["pca_expo"] if skip_layers > 0: Network.layers = Network.layers[:-skip_layers] print("sfa_expo and pca_expo across the network:") for i, layer in enumerate(Network.layers): if "sfa_expo" in Network.layers[i].pca_args: print("pca_args[%d].sfa_expo=" % i, Network.layers[i].pca_args["sfa_expo"]) if "pca_expo" in Network.layers[i].pca_args: print("pca_args[%d].pca_expo=" % i, Network.layers[i].pca_args["pca_expo"]) if "sfa_expo" in Network.layers[i].sfa_args: print("sfa_args[%d].sfa_expo=" % i, Network.layers[i].sfa_args["sfa_expo"]) if "pca_expo" in Network.layers[i].sfa_args: print("sfa_args[%d].pca_expo=" % i, Network.layers[i].sfa_args["pca_expo"]) if make_last_PCA_node_whithening and (hack_image_size == 16) and enable_hack_image_size and (Network.L4 is not None): if Network.L4.sfa_node_class == mdp.nodes.PCANode: Network.L4.sfa_node_class = mdp.nodes.WhiteningNode Network.L4.sfa_out_dim = 50 if make_last_PCA_node_whithening and (hack_image_size == 32) and enable_hack_image_size and (Network.L6 is not None): if Network.L6.sfa_node_class == mdp.nodes.PCANode: Network.L6.sfa_node_class = mdp.nodes.WhiteningNode Network.L6.sfa_out_dim = 100 if make_last_PCA_node_whithening and (hack_image_size == 64 or hack_image_size == 80) and \ enable_hack_image_size and Network.L8 is not None: if Network.L8.sfa_node_class == mdp.nodes.PCANode: Network.L8.sfa_node_class = mdp.nodes.WhiteningNode load_subimages_train_signal_from_cache = True enable_select_train_signal = True subimages_train_signal_in_cache = False if signal_cache_read and load_subimages_train_signal_from_cache and False: print("Looking for subimages_train in cache...") info_beginning_filename = "subimages_info" subimages_info_filenames = cache.find_filenames_beginning_with(network_cache_read_dir, info_beginning_filename, recursion=False, extension=".pckl") print("The following possible training sequences were found:") if len(subimages_info_filenames) > 0: for i, info_filename in enumerate(subimages_info_filenames): info_base_filename = string.split(info_filename, sep=".")[0] # Remove extension (iTrainInfo, sTrainInfo) = subimages_info = signal_cache_read.load_obj_from_cache(base_dir="/", base_filename=info_base_filename, verbose=True) print("%d: %s, with %d images of width=%d, height=%d" % (i, iTrainInfo.name, iTrainInfo.num_images, sTrainInfo.subimage_width, sTrainInfo.subimage_height)) if enable_select_train_signal: selected_train_sequence = int(input("Please select a training sequence (-1=Reload new data):")) else: selected_train_sequence = 0 print("Training sequence %d was selected" % selected_train_sequence) if selected_train_sequence >= 0: info_filename = subimages_info_filenames[selected_train_sequence] info_base_filename = string.split(info_filename, sep=".")[0] # Remove extension (iTrain_set, sTrain_set) = signal_cache_read.load_obj_from_cache(base_dir="/", base_filename=info_base_filename, verbose=True) iTrain = take_0_k_th_from_2D_list(iTrain_set, dataset_for_display_train) sTrain = take_0_k_th_from_2D_list(sTrain_set, dataset_for_display_train) signal_base_filename = string.replace(info_base_filename, "subimages_info", "subimages_train") if signal_cache_read.is_splitted_file_in_filesystem(base_dir="/", base_filename=signal_base_filename): print("Subimages train signal found in cache...") subimages_train = signal_cache_read.load_array_from_cache(base_dir="/", base_filename=signal_base_filename, verbose=True) subimages_train_signal_in_cache = True print("Subimages train signal loaded from cache with shape: ", subimages_train.shape) if signal_cache_write: subimages_train_hash = cache.hash_object(subimages_train).hexdigest() else: print("Subimages training signal UNEXPECTEDLY NOT FOUND in cache:", signal_base_filename) quit() # Conversion from sSeq to data_sets (array or function), param_sets # Actually train_func_sets train_data_sets, train_params_sets = convert_sSeq_to_funcs_params_sets(seq_sets, verbose=False) if load_and_append_output_features_dir is not None: training_data_hash = cache.hash_object((iTrain, sTrain)).hexdigest() training_data_hash = "0" print("loading output features (training data) from dir: ", load_and_append_output_features_dir, "and hash:", training_data_hash) additional_features_training = cache.unpickle_array(base_dir=load_and_append_output_features_dir, base_filename="output_features_training_TrainingD" + training_data_hash) additional_features_training = 100000 * \ additional_features_training[:, 0:num_features_to_append_to_input] + \ 10000.0 * numpy.random.normal(size=(iTrain.num_images, num_features_to_append_to_input)) train_data_sets = system_parameters.expand_dataset_with_additional_features(train_data_sets, additional_features_training) print("now building network") train_data_sets, train_params_sets = network_builder.expand_iSeq_sSeq_Layer_to_Network(train_data_sets, train_params_sets, Network) print("train_params_sets=", train_params_sets) print("dataset_for_display_train=", dataset_for_display_train) print("calling take_first_02D") params_node = take_0_k_th_from_2D_list(train_params_sets, k=dataset_for_display_train) block_size = params_node["block_size"] train_mode = params_node["train_mode"] print("calling take_first_02D again") train_func = take_0_k_th_from_2D_list(train_data_sets, k=dataset_for_display_train) print("train_func=", train_func) if coherent_seeds: numpy.random.seed(experiment_seed + 222222) subimages_train = train_func() # TODO: Here add pre computed features!!!??? Or do this during experiment definition??? print("subimages_train[0,0]=%0.40f" % subimages_train[0, 0]) # Avoid double extraction of data from files if isinstance(train_data_sets, list) and len(train_data_sets) >= 1: if isinstance(train_data_sets[0], list) and len(train_data_sets[0]) >= 1 and len( train_data_sets[0]) > dataset_for_display_train: print("Correcting double loading") func = train_data_sets[0][dataset_for_display_train] print("substituting func=", func, "for loaded data") for i in range(len(train_data_sets)): for j in range(len(train_data_sets[i])): print("train_data_sets[%d][%d]=" % (i, j), train_data_sets[i][j]) if train_data_sets[i][j] is func: print("Correction done") # fdssf train_data_sets[i][j] = subimages_train # TODO: Support train signal cache for generalized training if signal_cache_write and (subimages_train_signal_in_cache is False) and False: print("Caching Train Signal...") subimages_ndim = subimages_train.shape[1] subimages_time = str(int(time.time())) iTrain_hash = cache.hash_object(iTrain_sets).hexdigest() sTrain_hash = cache.hash_object(sTrain_sets).hexdigest() subimages_base_filename = "subimages_train_%s_%s_%s_%s" % ( (subimages_ndim, subimages_time, iTrain_hash, sTrain_hash)) subimages_train_hash = signal_cache_write.update_cache(subimages_train, base_filename=subimages_base_filename, overwrite=True, verbose=True) subimages_info = (iTrain, sTrain) subimages_info_filename = "subimages_info_%s_%s_%s_%s" % \ (subimages_ndim, subimages_time, iTrain_hash, sTrain_hash) subimages_info_hash = signal_cache_write.update_cache(subimages_info, base_filename=subimages_info_filename, overwrite=True, verbose=True) t1 = time.time() print(seq.num_images, "Training Images loaded in %0.3f s" % ((t1 - t0))) # benchmark.append(("Load Info and Training Images", t1-t0)) save_images_training_base_dir = "/local/tmp/escalafl/Alberto/saved_images_training" if save_subimages_training: print("saving images to directory:", save_images_training_base_dir) decimate = 1 # 10 for i, x in enumerate(subimages_train): if i % decimate == 0: if seq.convert_format == "L": im_raw = numpy.reshape(x, (seq.subimage_width, seq.subimage_height)) im = scipy.misc.toimage(im_raw, mode=seq.convert_format) elif seq.convert_format == "RGB": im_raw = numpy.reshape(x, (seq.subimage_width, seq.subimage_height, 3)) im = scipy.misc.toimage(im_raw, mode=seq.convert_format) else: im_raw = numpy.reshape(x, (seq.subimage_width, seq.subimage_height)) im = scipy.misc.toimage(im_raw, mode="L") if save_images_training_supplementary_info is None: filename = "image%05d.png" % (i // decimate) # quit() elif save_images_training_supplementary_info == "Class": filename = "image%05d_gt%05d.png" % (i // decimate, iTrain.correct_classes[i]) elif save_images_training_supplementary_info == "Label": filename = "image%05d_gt%05.5f.png" % (i // decimate, iTrain.correct_labels[i]) # quit() else: er = "Incorrect value of save_images_training_supplementary_info:" + str( save_images_training_supplementary_info) raise Exception(er) fullname = os.path.join(save_images_training_base_dir, filename) im.save(fullname) # print "done, finishing" # quit() if save_average_subimage_training: average_subimage_training = subimages_train.mean(axis=0) if seq.convert_format == "L": average_subimage_training = average_subimage_training.reshape(sTrain.subimage_height, sTrain.subimage_width) elif seq.convert_format == "RGB": average_subimage_training = average_subimage_training.reshape(sTrain.subimage_height, sTrain.subimage_width, 3) else: average_subimage_training = average_subimage_training.reshape(sTrain.subimage_height, sTrain.subimage_width) print("average_subimage_training.shape=", average_subimage_training.shape, "seq.convert_format=", seq.convert_format) average_subimage_training_I = scipy.misc.toimage(average_subimage_training, mode=seq.convert_format) average_subimage_training_I.save("average_image_trainingRGB.jpg", mode=seq.convert_format) # print "done, finishing" # quit() print("******************************************") print("Creating hierarchy through network_builder") print("******************************************") # TODO: more primitive but potentially powerful flow specification here should be possible flow, layers, benchmark = network_builder.create_network(Network, sTrain.subimage_width, sTrain.subimage_height, benchmark=benchmark, in_channel_dim=in_channel_dim, num_features_appended_to_input = \ num_features_to_append_to_input, verbose=True) print("Making sure the first switchboard does not add any noise (noise added during image loading)") if isinstance(flow[0], mdp.nodes.PInvSwitchboard): flow[0].additive_noise_std = 0.0 # For display purposes we alter here the image shape artificially... # TODO: Improve this logic overall... shape should be really the shape, and in_channel_dim should be used print(subimage_shape) if in_channel_dim in [1, 3]: subimage_shape = subimage_shape else: print("Patching subimage_shape for display purposes") subimage_shape = (subimage_shape[0], subimage_shape[1] * in_channel_dim) # add_normalization_node = True if add_normalization_node: normalization_node = mdp.nodes.NormalizeNode() flow += normalization_node print("flow=", flow) print(len(flow)) for node in flow: print("Node: ", node, "out_dim=", node.output_dim, "input_dim", node.input_dim) # quit() print("*****************************") print("Training hierarchy ...") print("*****************************") subimages_p = subimages = subimages_train # DEFINE TRAINING TYPE. SET ONE OF THE FOLLOWING VARIABLES TO TRUE # Either use special (most debugged and efficient) or storage_iterator (saves memory) special_training = True iterator_training = False storage_iterator_training = False if special_training is True: ttrain0 = time.time() # Think: maybe training_signal cache can become unnecessary if flow.train is intelligent enough to look # for the signal in the cache without loading it? # Use same seed as before for data loading... hope the results are the same. Nothing should be done before # data generation to ensure this!!! if coherent_seeds: numpy.random.seed(experiment_seed + 222222) # TODO: f train_data_sets is func() or [[func()]], use instead loaded images!!!! sl_seq = sl_seq_training = flow.special_train_cache_scheduler_sets(train_data_sets, params_sets=train_params_sets, verbose=True, very_verbose=True, benchmark=benchmark, node_cache_read=node_cache_read, signal_cache_read=signal_cache_read, node_cache_write=node_cache_write, signal_cache_write=signal_cache_write, scheduler=scheduler, n_parallel=n_parallel, immediate_stop_training=True) print("sl_seq is", sl_seq) print("sl_seq.shape is", sl_seq.shape) ttrain1 = time.time() print("Network trained (specialized way) in time %0.3f s" % (ttrain1 - ttrain0)) if benchmark is not None: benchmark.append(("Network training (specialized way)", ttrain1 - ttrain0)) else: ttrain0 = time.time() flow.train(subimages_p) y = flow.execute(subimages_p[0:1]) # stop training sl_seq = sl_seq_training = flow.execute(subimages_p) ttrain1 = time.time() print("Network trained (MDP way) in time %0.3f s" % (ttrain1 - ttrain0)) if benchmark is not None: benchmark.append(("Network training (MDP way)", ttrain1 - ttrain0)) nodes_in_flow = len(flow) last_sfa_node = flow[nodes_in_flow - 1] if isinstance(last_sfa_node, mdp.hinet.CloneLayer) or \ isinstance(last_sfa_node, mdp.hinet.Layer): last_sfa_node = last_sfa_node.nodes[0] if isinstance(last_sfa_node, mdp.nodes.SFANode): if iTrain.correct_labels[0:10].mean() <= iTrain.correct_labels[-10:].mean(): start_negative = True else: start_negative = False sl_seq = sl_seq_training = more_nodes.sfa_pretty_coefficients(last_sfa_node, sl_seq_training, start_negative=start_negative) else: print("SFA coefficients not made pretty, last node was not SFA!!!") print("Since training is finished, making sure the switchboards do not add any noise from now on") for node in flow: if isinstance(node, mdp.nodes.PInvSwitchboard): node.additive_noise_std = 0.0 print("Executing for display purposes (subimages_train)...") # This signal is computed for display purposes (it ignores the output of training, which might have noise) sl_seq = sl_seq_training = flow.execute(subimages_train) network_hash = str(int(time.time())) # network_filename = "Network_" + network_hash + ".pckl" if network_write: print("Saving flow, layers, benchmark, Network ...") # update cache is not adding the hash to the filename,so we add it manually network_write.update_cache(flow, None, network_cache_write_dir, "JustFlow" + sTrain.name + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) network_write.update_cache(layers, None, network_cache_write_dir, "JustLayers" + sTrain.name + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) network_write.update_cache(benchmark, None, network_cache_write_dir, "JustBenchmark" + sTrain.name + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) network_write.update_cache(Network, None, network_cache_write_dir, "JustNetwork" + sTrain.name + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) network_write.update_cache([flow, layers, benchmark, Network], None, network_cache_write_dir, "Network" + Network.name + "_ParName" + Parameters.name + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) network_write.update_cache([iTrain, sTrain], None, network_cache_write_dir, "iTrainsTrainData" + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) network_write.update_cache(subimages_train, None, network_cache_write_dir, "TrainData" + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) network_write.update_cache(sl_seq_training, None, network_cache_write_dir, "SLSeqData" + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) if signal_cache_write: print("Caching sl_seq_training Signal... (however, it's never read!)") signal_ndim = sl_seq_training.shape[1] signal_time = str(int(time.time())) flow_hash = cache.hash_object(flow).hexdigest() # TODO: Add subimages_train_hash to signal filename. Compute according to first data/parameter sets signal_base_filename = "sfa_signal_ndim%s_time%s_flow%s" % ((signal_ndim, signal_time, flow_hash)) signal_cache_write.update_cache(sl_seq_training, base_filename=signal_base_filename, overwrite=True, verbose=True) if save_output_features_dir is not None: print("saving output features (training data)") training_data_hash = cache.hash_object((iTrain, sTrain)).hexdigest() cache.pickle_array(sl_seq_training, base_dir=save_output_features_dir, base_filename="output_features_training_TrainingD" + training_data_hash, overwrite=True, verbose=True) print("min val: ", sl_seq_training.min(axis=0), "\nmax val: ", sl_seq_training.max(axis=0)) # Cutoff for final network output if feature_cut_off_level > 0.0: min_cutoff = -feature_cut_off_level max_cutoff = feature_cut_off_level elif feature_cut_off_level == 0.0: min_cutoff = -1.0e300 # usually large enough for all datasets max_cutoff = 1.0e300 # usually small enough for all datasets else: # Adaptive cutoff computed from training data print("using adaptive cutoffs from the training data") min_cutoff = sl_seq_training.min(axis=0) max_cutoff = sl_seq_training.max(axis=0) print("cutoff levels:", min_cutoff, max_cutoff) if feature_cut_off_level != 0.0: sl_seq = sl_seq_training = numpy.clip(sl_seq_training, min_cutoff, max_cutoff) print("After cutoff: min val: ", sl_seq_training.min(axis=0), "\nmax val: ", sl_seq_training.max(axis=0)) if transfer_learning_feature_normalization: sl_seq_training_mean = sl_seq_training.mean(axis=0) sl_seq_training_std = sl_seq_training.std(axis=0) sl_seq_training = sfa_libs.normalize_mean_std(sl_seq_training, sl_seq_training_mean, sl_seq_training_std) print("Removed mean and data of training data (transfer learning)") num_pixels_per_image = numpy.ones(subimage_shape, dtype=int).sum() print("taking into account objective_label=%d" % objective_label) if len(iTrain.correct_labels.shape) == 2: print("correction...") correct_objective_labels_training = iTrain.correct_labels[:, objective_label].flatten() correct_objective_labels_seenid = Parameters.iSeenid.correct_labels[:, objective_label].flatten() correct_objective_labels_newid = Parameters.iNewid[0][0].correct_labels[:, objective_label].flatten() correct_objective_classes_training = iTrain.correct_classes[:, objective_label].flatten() correct_objective_classes_seenid = Parameters.iSeenid.correct_classes[:, objective_label].flatten() correct_objective_classes_newid = Parameters.iNewid[0][0].correct_classes[:, objective_label].flatten() else: correct_objective_labels_training = iTrain.correct_labels correct_objective_labels_seenid = Parameters.iSeenid.correct_labels correct_objective_labels_newid = Parameters.iNewid[0][0].correct_labels correct_objective_classes_training = iTrain.correct_classes correct_objective_classes_seenid = Parameters.iSeenid.correct_classes correct_objective_classes_newid = Parameters.iNewid[0][0].correct_classes print("correct_objective_classes_training=", correct_objective_classes_training) print("correct_objective_labels_training=", correct_objective_labels_training) print("correct_objective_classes_newid", correct_objective_classes_newid) print("correct_objective_labels_newid", correct_objective_labels_newid) print("correct_objective_classes_seenid=", correct_objective_classes_seenid) print("correct_objective_labels_seenid=", correct_objective_labels_seenid) if coherent_seeds: numpy.random.seed(experiment_seed + 333333) subimages_p = subimages = subimages_train sl_seq_training = sl_seq_training[:, skip_num_signals:] sl_seq = sl_seq_training print("subimages_train[0,0]=%0.40f" % subimages_train[0, 0]) print("Done creating / training / loading network") y = flow.execute(subimages_p[0:1]) print(y.shape) more_nodes.describe_flow(flow) more_nodes.display_eigenvalues(flow, mode="Average") # mode="FirstNodeInLayer", "Average", "All" hierarchy_out_dim = y.shape[1] - skip_num_signals print("hierarchy_out_dim (real output data) =", hierarchy_out_dim) print("last_node_out_dim=", flow[-1].output_dim) if isinstance(flow[-1], (mdp.hinet.Layer, mdp.hinet.CloneLayer)): print("last_Layer_node_out_dim=", flow[-1][0].output_dim) print("last node of network is a layer! is this a mistake?") if hierarchy_out_dim != flow[-1].output_dim: print("error!!! hierarchy_out_dim != flow[-1].output_dim") print("Perhaps caused by enable_reduced_image_sizes=True or enable_hack_image_size=True?") quit() results = system_parameters.ExperimentResult() results.name = Parameters.name results.network_name = Network.name results.layers_name = [] for lay in layers: results.layers_name.append(lay.name) results.iTrain = iTrain results.sTrain = sTrain results.iSeenid = iSeenid results.sSeenid = sSeenid results.iNewid = iNewid results.sNewid = sNewid print("Computing typical delta, eta values for Train SFA Signal") t_delta_eta0 = time.time() (results.typical_delta_train, results.typical_eta_train) = sfa_libs.comp_typical_delta_eta(sl_seq_training, block_size, num_reps=200, training_mode=iTrain.train_mode) results.brute_delta_train = sfa_libs.comp_delta_normalized(sl_seq_training) results.brute_eta_train = sfa_libs.comp_eta(sl_seq_training) t_delta_eta1 = time.time() print("typical_delta_train=", results.typical_delta_train) print("typical_delta_train[0:31].sum()=", results.typical_delta_train[0:31].sum()) print("computed delta/eta in %0.3f ms" % ((t_delta_eta1 - t_delta_eta0) * 1000.0)) if benchmark is not None: benchmark.append(("Computation of delta, eta values for Train SFA Signal", t_delta_eta1 - t_delta_eta0)) print("Setting correct classes and labels for the Classifier/Regression, Train SFA Signal") correct_classes_training = correct_objective_classes_training print("correct_classes_training=", correct_classes_training) correct_labels_training = correct_objective_labels_training if convert_labels_days_to_years: correct_labels_training = correct_labels_training / DAYS_IN_A_YEAR if integer_label_estimation: correct_labels_training = (correct_labels_training + 0.0006).astype( int) * 1.0 # This must still have float type, otherwise the MSE computation is erroneous! print("Loading test images, seen ids...") t_load_images0 = time.time() print("LOADING KNOWNID TEST INFORMATION") image_files_seenid = iSeenid.input_files num_images_seenid = iSeenid.num_images block_size_seenid = iSeenid.block_size seq = sSeenid if coherent_seeds: numpy.random.seed(experiment_seed + 444444) if seq.input_files == "LoadBinaryData00": subimages_seenid = load_natural_data(seq.data_base_dir, seq.base_filename, seq.samples, verbose=False) elif seq.input_files == "LoadRawData": subimages_seenid = load_raw_data(seq.data_base_dir, seq.base_filename, input_dim=seq.input_dim, dtype=seq.dtype, select_samples=seq.samples, verbose=False) else: subimages_seenid = seq.load_data(seq) if load_and_append_output_features_dir is not None: seenid_data_hash = cache.hash_object((iSeenid, sSeenid)).hexdigest() seenid_data_hash = "0" print("loading output features (seenid data) from dir: ", load_and_append_output_features_dir, "and hash:", seenid_data_hash) additional_features_seenid = cache.unpickle_array(base_dir=load_and_append_output_features_dir, base_filename="output_features_training_SeenidD" + seenid_data_hash) additional_features_seenid = 100000 * additional_features_seenid[:, 0:num_features_to_append_to_input] + \ 0.0 * numpy.random.normal(size=(iSeenid.num_images, num_features_to_append_to_input)) print(additional_features_seenid.shape) print(subimages_seenid.shape) subimages_seenid = numpy.concatenate((subimages_seenid, additional_features_seenid), axis=1) t_load_images1 = time.time() print(num_images_seenid, " Images loaded in %0.3f s" % (t_load_images1 - t_load_images0)) t_exec0 = time.time() print("Execution over known id testing set...") print("Input Signal: Known Id test images") sl_seq_seenid = flow.execute(subimages_seenid) sl_seq_seenid = sl_seq_seenid[:, skip_num_signals:] print("Knownid min value: ", sl_seq_seenid.min(axis=0)) print("Knownid max value: ", sl_seq_seenid.max(axis=0)) sl_seq_seenid = numpy.nan_to_num(sl_seq_seenid) if feature_cut_off_level != 0.0: print("before cutoff sl_seq_seenid= ", sl_seq_seenid) sl_seq_seenid = numpy.clip(sl_seq_seenid, min_cutoff, max_cutoff) print("After cutoff, Knownid min value: ", sl_seq_seenid.min(axis=0)) print("After cutoff, Knowind max value: ", sl_seq_seenid.max(axis=0)) sl_seq_training_min = sl_seq_training.min(axis=0) sl_seq_training_max = sl_seq_training.max(axis=0) if clip_seenid_newid_to_training: print("clipping sl_seq_seenid") sl_seq_seenid_min = sl_seq_seenid.min(axis=0) sl_seq_seenid_max = sl_seq_seenid.max(axis=0) print("sl_seq_training_min=", sl_seq_training_min) print("sl_seq_training_max=", sl_seq_training_max) print("sl_seq_seenid_min=", sl_seq_seenid_min) print("sl_seq_seenid_max=", sl_seq_seenid_max) sl_seq_seenid = numpy.clip(sl_seq_seenid, sl_seq_training_min, sl_seq_training_max) if add_noise_to_seenid: # Using uniform noise due to its speed over normal noise noise_amplitude = (3 ** 0.5) * 0.5 # standard deviation 0.00005 print("adding noise to sl_seq_seenid, with noise_amplitude:", noise_amplitude) sl_seq_seenid += noise_amplitude * numpy.random.uniform(-1.0, 1.0, size=sl_seq_seenid.shape) if transfer_learning_feature_normalization: sl_seq_seenid_mean = sl_seq_seenid.mean(axis=0) sl_seq_seenid_std = sl_seq_seenid.std(axis=0) sl_seq_seenid = sfa_libs.normalize_mean_std(sl_seq_seenid, sl_seq_seenid_mean, sl_seq_seenid_std) print("Removed mean and data of seenid data (transfer learning)") t_exec1 = time.time() print("Execution over Known Id in %0.3f s" % (t_exec1 - t_exec0)) if network_write: network_write.update_cache([iSeenid, sSeenid], None, network_cache_write_dir, "iSeenidsSeenidData" + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) network_write.update_cache(subimages_seenid, None, network_cache_write_dir, "SeenidData" + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) network_write.update_cache(sl_seq_seenid, None, network_cache_write_dir, "SLSeenid" + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) if save_output_features_dir is not None: print("saving output features (seenid data)") seenid_data_hash = cache.hash_object((iSeenid, sSeenid)).hexdigest() cache.pickle_array(sl_seq_seenid, base_dir=save_output_features_dir, base_filename="output_features_training_SeenidD" + seenid_data_hash, overwrite=True, verbose=True) print("Computing typical delta, eta values for Seen Id SFA Signal") t_delta_eta0 = time.time() (results.typical_delta_seenid, results.typical_eta_seenid) = sfa_libs.comp_typical_delta_eta(sl_seq_seenid, iSeenid.block_size, num_reps=200, training_mode=iSeenid.train_mode) print("sl_seq_seenid=", sl_seq_seenid) results.brute_delta_seenid = sfa_libs.comp_delta_normalized(sl_seq_seenid) results.brute_eta_seenid = sfa_libs.comp_eta(sl_seq_seenid) t_delta_eta1 = time.time() print("typical_delta_seenid=", results.typical_delta_seenid) print("typical_delta_seenid[0:31].sum()=", results.typical_delta_seenid[0:31].sum()) print("typical_eta_seenid=", results.typical_eta_seenid) print("brute_delta_seenid=", results.brute_delta_seenid) print("computed delta/eta in %0.3f ms" % ((t_delta_eta1 - t_delta_eta0) * 1000.0)) print("Setting correct labels/classes data for seenid") correct_classes_seenid = correct_objective_classes_seenid correct_labels_seenid = correct_objective_labels_seenid correct_labels_seenid_real = correct_labels_seenid.copy() if convert_labels_days_to_years: correct_labels_seenid_real = correct_labels_seenid_real / DAYS_IN_A_YEAR correct_labels_seenid /= DAYS_IN_A_YEAR if integer_label_estimation: correct_labels_seenid = (correct_labels_seenid + 0.0006).astype(int)*1.0 # t8 = time.time() t_classifier_train0 = time.time() print("*** Training Classifier/Regression") # W if use_full_sl_output or reg_num_signals == 0: results.reg_num_signals = reg_num_signals = sl_seq_training.shape[1] # else: # results.reg_num_signals = reg_num_signals = 3 #42 # cf_sl = sl_seq_training # cf_num_samples = cf_sl.shape[0] # cf_correct_labels = correct_labels_training # cf_correct_classes = correct_objective_classes_training # cf_spacing = cf_block_size = iTrain.block_size cf_sl = sl_seq_seenid cf_num_samples = cf_sl.shape[0] cf_correct_labels = correct_labels_seenid_real cf_correct_classes = correct_objective_classes_seenid cf_spacing = cf_block_size = iSeenid.block_size all_classes = numpy.unique(cf_correct_classes) avg_labels = more_nodes.compute_average_labels_for_each_class(cf_correct_classes, cf_correct_labels) if reg_num_signals <= 128 and (Parameters.analysis is not False) and enable_NCC: enable_ncc_cfr = True else: enable_ncc_cfr = False if reg_num_signals <= 128 and (Parameters.analysis is not False) and enable_GC: enable_ccc_Gauss_cfr = True enable_gc_cfr = True else: enable_ccc_Gauss_cfr = False enable_gc_cfr = False if reg_num_signals <= 64 and (Parameters.analysis is not False) and enable_kNN: enable_kNN_cfr = True else: enable_kNN_cfr = False if reg_num_signals <= 120 and (Parameters.analysis is not False) and enable_svm: # and False: enable_svm_cfr = True else: enable_svm_cfr = False # TODO: do I need this construct also fro SVR? if reg_num_signals <= 8192 and (Parameters.analysis is not False) and enable_lr: enable_lr_cfr = True else: enable_lr_cfr = False if enable_ncc_cfr: print("Training Classifier/Regression NCC") ncc_node = mdp.nodes.NearestMeanClassifier() ncc_node.train(x=cf_sl[:, 0:reg_num_signals], labels=cf_correct_classes) ncc_node.stop_training() if enable_ccc_Gauss_cfr: print("Training Classifier/Regression GC...") print("unique labels =", numpy.unique(cf_correct_classes)) print("len(unique_labels)=", len(numpy.unique(cf_correct_classes))) print("cf_sl[0,:]=", cf_sl[0, :]) print("cf_sl[1,:]=", cf_sl[1, :]) print("cf_sl[2,:]=", cf_sl[2, :]) print("cf_sl[3,:]=", cf_sl[3, :]) print("cf_sl[4,:]=", cf_sl[4, :]) print("cf_sl[5,:]=", cf_sl[5, :]) GC_node = mdp.nodes.SFA_GaussianClassifier(reduced_dim=sfa_gc_reduced_dim, verbose=True) GC_node.train(x=cf_sl[:, 0:reg_num_signals], labels=cf_correct_classes) # Functions for regression use class values!!! GC_node.stop_training() GC_node.avg_labels = avg_labels # # Experimental post-processing of the estimations computed using soft Gaussian classifier # regression_Gauss_cf = GC_node.regression(cf_sl[:, 0:reg_num_signals], avg_labels) # print("regression_Gauss_cf[0:5]=", regression_Gauss_cf[0:5]) # expanded_regression = numpy.zeros((regression_Gauss_cf.shape[0], 3)) # for i in range(3): # expanded_regression[:, i] = (regression_Gauss_cf) ** (i + 1) # print("expanded_regression[0:5]=", expanded_regression[0:5]) # # GC_post_processing_node = mdp.nodes.LinearRegressionNode(with_bias=True, use_pinv=True) # GC_post_processing_node.train(expanded_regression, cf_correct_labels.reshape((cf_sl.shape[0], 1))) # GC_post_processing_node.stop_training() # print("Expansion coefficients for post processing", GC_post_processing_node.beta) t_classifier_train1 = time.time() if benchmark is not None: benchmark.append(("Training Classifier/Regression GC", t_classifier_train1 - t_classifier_train0)) t_classifier_train1 = time.time() if enable_kNN_cfr: print("Training Classifier/Regression kNN, for k=%d..." % kNN_k) kNN_node = mdp.nodes.KNNClassifier(k=kNN_k) kNN_node.train(x=cf_sl[:, 0:reg_num_signals], labels=cf_correct_classes) kNN_node.stop_training() t_classifier_train1b = time.time() if benchmark is not None: benchmark.append(("Training Classifier/Regression kNN", t_classifier_train1b - t_classifier_train1)) t_classifier_train1 = time.time() if cf_block_size is not None: if isinstance(cf_block_size, (numpy.float, numpy.float64, numpy.int)): num_blocks = cf_sl.shape[0] // cf_block_size else: num_blocks = len(cf_block_size) else: num_blocks = cf_sl.shape[0] if enable_svm_cfr: print("Training SVM...") params = {"C": svm_C, "gamma": svm_gamma, "nu": 0.6, "eps": 0.0001} svm_node = mdp.nodes.LibSVMClassifier(kernel="RBF", classifier="C_SVC", params=params, probability=True) data_mins, data_maxs = svm_compute_range(cf_sl[:, 0:reg_num_signals]) svm_node.train(svm_scale(cf_sl[:, 0:reg_num_signals], data_mins, data_maxs, svm_min, svm_max), cf_correct_classes) if svm_gamma == 0: svm_gamma = 1.0 / num_blocks svm_node.stop_training() if enable_svr: print("Training SVR...") data_mins, data_maxs = svm_compute_range(cf_sl[:, 0:reg_num_signals]) svr = slsvm.SVR(C=svr_C, epsilon=svr_epsilon, gamma=svr_gamma) svr.fit(svm_scale(cf_sl[:, 0:reg_num_signals], data_mins, data_maxs, svm_min, svm_max), cf_correct_labels) if enable_lr_cfr: print("Training LR...") lr_node = mdp.nodes.LinearRegressionNode(with_bias=True, use_pinv=False) lr_node.train(cf_sl[:, 0:reg_num_signals], cf_correct_labels.reshape((cf_sl.shape[0], 1))) lr_node.stop_training() if classifier_write and enable_ccc_Gauss_cfr: print("Saving Gaussian Classifier") cf_sl_hash = cache.hash_array(cf_sl).hexdigest() # update cache is not adding the hash to the filename,so we add it manually classifier_filename = "GaussianClassifier_NetName" + Network.name + "iTrainName" + iTrain.name + "_NetH" + \ network_hash + "_CFSlowH" + cf_sl_hash + "_NumSig%03d" % reg_num_signals + "_L" + \ str(objective_label) classifier_write.update_cache(GC_node, None, None, classifier_filename, overwrite=True, verbose=True) print("Executing/Executed over training set...") print("Input Signal: Training Data") subimages_training = subimages num_images_training = num_images print("Classification/Regression over training set...") t_class0 = time.time() if enable_ncc_cfr: print("ncc classify...") classes_ncc_training = numpy.array(ncc_node.label(sl_seq_training[:, 0:reg_num_signals])) labels_ncc_training = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_ncc_training) print(classes_ncc_training) else: classes_ncc_training = labels_ncc_training = numpy.zeros(num_images_training) # correct_post_processing_bias = True # False # WARNING! if enable_ccc_Gauss_cfr: print("GC classify...") classes_Gauss_training = numpy.array(GC_node.label(sl_seq_training[:, 0:reg_num_signals])) labels_Gauss_training = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_Gauss_training) regression_Gauss_training = GC_node.regression(sl_seq_training[:, 0:reg_num_signals], avg_labels) regressionMAE_Gauss_training = GC_node.regressionMAE(sl_seq_training[:, 0:reg_num_signals], avg_labels) probs_training = GC_node.class_probabilities(sl_seq_training[:, 0:reg_num_signals]) softCR_Gauss_training = GC_node.softCR(sl_seq_training[:, 0:reg_num_signals], correct_classes_training) # if correct_post_processing_bias: # # Experimental post-processing. WARNING! # expanded_regression = numpy.zeros((regression_Gauss_training.shape[0], 3)) # for i in range(3): # expanded_regression[:, i] = (regression_Gauss_training) ** (i + 1) # print("expanded_regression[0:5]=", expanded_regression[0:5]) # regressionMAE_Gauss_training = GC_post_processing_node.execute(expanded_regression).flatten() # print("regressionMAE_Gauss_training[0:5]=", regressionMAE_Gauss_training[0:5]) else: classes_Gauss_training = labels_Gauss_training = regression_Gauss_training = \ regressionMAE_Gauss_training = numpy.zeros(num_images_training) probs_training = numpy.zeros((num_images_training, 2)) softCR_Gauss_training = 0.0 if enable_kNN_cfr: print("kNN classify... (k=%d)" % kNN_k) classes_kNN_training = numpy.array(kNN_node.label(sl_seq_training[:, 0:reg_num_signals])) labels_kNN_training = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_kNN_training) else: classes_kNN_training = labels_kNN_training = numpy.zeros(num_images_training) skip_svm_training = False if enable_svm_cfr and skip_svm_training is False: print("SVM classify...") classes_svm_training = svm_node.label( svm_scale(sl_seq_training[:, 0:reg_num_signals], data_mins, data_maxs, svm_min, svm_max)) regression_svm_training = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_svm_training) regression2_svm_training = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_svm_training) regression3_svm_training = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_svm_training) else: classes_svm_training = regression_svm_training = regression2_svm_training = regression3_svm_training = \ numpy.zeros(num_images_training) if enable_svr: print("SVR regression...") regression_svr_training = svr.predict(svm_scale(sl_seq_training[:, 0:reg_num_signals], data_mins, data_maxs, svm_min, svm_max)) else: regression_svr_training = numpy.zeros(num_images_training) if enable_lr_cfr: print("LR execute...") regression_lr_training = lr_node.execute(sl_seq_training[:, 0:reg_num_signals]).flatten() else: regression_lr_training = numpy.zeros(num_images_training) if output_instead_of_SVM2: regression2_svm_training = (sl_seq_training[:, 0] * orig_train_label_std) + orig_train_label_mean print("Applying cutoff to the label estimations for LR and Linear Scaling (SVM2)") regression2_svm_training = numpy.clip(regression2_svm_training, orig_train_label_min, orig_train_label_max) regression_lr_training = numpy.clip(regression_lr_training, orig_train_label_min, orig_train_label_max) if svr_instead_of_SVM2: print("replacing regression estimation for svm2 by svr estimation") regression2_svm_training = regression_svr_training print("Classification of training data: ", labels_kNN_training) t_classifier_train2 = time.time() print("Classifier trained in time %0.3f s" % (t_classifier_train1 - t_classifier_train0)) print("Training Images Classified in time %0.3f s" % (t_classifier_train2 - t_classifier_train1)) if benchmark is not None: benchmark.append("Classification of Training Images", (t_classifier_train2 - t_classifier_train1) ) t_class1 = time.time() print("Classification/Regression over Training Set in %0.3f s" % (t_class1 - t_class0)) if integer_label_estimation: print("Making all label estimations for training data integer numbers") if convert_labels_days_to_years: labels_ncc_training = labels_ncc_training.astype(int) regression_Gauss_training = regression_Gauss_training.astype(int) regressionMAE_Gauss_training = regressionMAE_Gauss_training.astype(int) labels_kNN_training = labels_kNN_training.astype(int) regression_svm_training = regression_svm_training.astype(int) regression2_svm_training = regression2_svm_training.astype(int) regression3_svm_training = regression3_svm_training.astype(int) regression_lr_training = regression_lr_training.astype(int) else: labels_ncc_training = numpy.rint(labels_ncc_training) regression_Gauss_training = numpy.rint(regression_Gauss_training) regressionMAE_Gauss_training = numpy.rint(regressionMAE_Gauss_training) labels_kNN_training = numpy.rint(labels_kNN_training) regression_svm_training = numpy.rint(regression_svm_training) regression2_svm_training = numpy.rint(regression2_svm_training) regression3_svm_training = numpy.rint(regression3_svm_training) regression_lr_training = numpy.rint(regression_lr_training) print("regressionMAE_Gauss_training[0:5]=", regressionMAE_Gauss_training[0:5]) t_class0 = time.time() if enable_ncc_cfr: print("NCC classify...") classes_ncc_seenid = numpy.array(ncc_node.label(sl_seq_seenid[:, 0:reg_num_signals])) labels_ncc_seenid = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_ncc_seenid) print(classes_ncc_seenid) else: classes_ncc_seenid = labels_ncc_seenid = numpy.zeros(num_images_seenid) if enable_ccc_Gauss_cfr: classes_Gauss_seenid = numpy.array(GC_node.label(sl_seq_seenid[:, 0:reg_num_signals])) labels_Gauss_seenid = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_Gauss_seenid) regression_Gauss_seenid = GC_node.regression(sl_seq_seenid[:, 0:reg_num_signals], avg_labels) regressionMAE_Gauss_seenid = GC_node.regressionMAE(sl_seq_seenid[:, 0:reg_num_signals], avg_labels) probs_seenid = GC_node.class_probabilities(sl_seq_seenid[:, 0:reg_num_signals]) softCR_Gauss_seenid = GC_node.softCR(sl_seq_seenid[:, 0:reg_num_signals], correct_classes_seenid) # if correct_post_processing_bias: # # Experimental post-processing. WARNING! # expanded_regression = numpy.zeros((regression_Gauss_seenid.shape[0], 3)) # for i in range(3): # expanded_regression[:, i] = (regression_Gauss_seenid) ** (i + 1) # print("expanded_regression[0:5]=", expanded_regression[0:5]) # regressionMAE_Gauss_seenid = GC_post_processing_node.execute(expanded_regression).flatten() # print("regressionMAE_Gauss_seenid[0:5]=", regressionMAE_Gauss_seenid[0:5]) else: classes_Gauss_seenid = labels_Gauss_seenid = regression_Gauss_seenid = regressionMAE_Gauss_seenid = numpy.zeros( num_images_seenid) probs_seenid = numpy.zeros((num_images_seenid, 2)) softCR_Gauss_seenid = 0.0 if enable_kNN_cfr: print("kNN classify... (k=%d)" % kNN_k) classes_kNN_seenid = numpy.array(kNN_node.label(sl_seq_seenid[:, 0:reg_num_signals])) labels_kNN_seenid = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_kNN_seenid) else: classes_kNN_seenid = labels_kNN_seenid = numpy.zeros(num_images_seenid) if enable_svm_cfr: classes_svm_seenid = svm_node.label( svm_scale(sl_seq_seenid[:, 0:reg_num_signals], data_mins, data_maxs, svm_min, svm_max)) regression_svm_seenid = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_svm_seenid) regression2_svm_seenid = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_svm_seenid) regression3_svm_seenid = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_svm_seenid) else: classes_svm_seenid = regression_svm_seenid = regression2_svm_seenid = regression3_svm_seenid = numpy.zeros( num_images_seenid) if enable_svr: regression_svr_seenid = svr.predict(svm_scale(sl_seq_seenid[:, 0:reg_num_signals], data_mins, data_maxs, svm_min, svm_max)) else: regression_svr_seenid = numpy.zeros(num_images_seenid) if enable_lr_cfr: regression_lr_seenid = lr_node.execute(sl_seq_seenid[:, 0:reg_num_signals]).flatten() else: regression_lr_seenid = numpy.zeros(num_images_seenid) if output_instead_of_SVM2: regression2_svm_seenid = (sl_seq_seenid[:, 0] * orig_train_label_std) + orig_train_label_mean print("Applying cutoff to the label estimations for LR and Linear Scaling (SVM2)") regression2_svm_seenid = numpy.clip(regression2_svm_seenid, orig_train_label_min, orig_train_label_max) regression_lr_seenid = numpy.clip(regression_lr_seenid, orig_train_label_min, orig_train_label_max) if svr_instead_of_SVM2: print("replacing regression estimation for svm2 by svr estimation") regression2_svm_seenid = regression_svr_seenid print("labels_kNN_seenid.shape=", labels_kNN_seenid.shape) # correct_labels_seenid = wider_1Darray(numpy.arange(iSeenid.MIN_GENDER, iSeenid.MAX_GENDER, iSeenid.GENDER_STEP), # iSeenid.block_size) print("correct_labels_seenid.shape=", correct_labels_seenid.shape) t_class1 = time.time() print("Classification/Regression over Seen Id in %0.3f s" % (t_class1 - t_class0)) if integer_label_estimation: print("Making all label estimations for seenid data integer numbers") if convert_labels_days_to_years: labels_ncc_seenid = labels_ncc_seenid.astype(int) regression_Gauss_seenid = regression_Gauss_seenid.astype(int) regressionMAE_Gauss_seenid = regressionMAE_Gauss_seenid.astype(int) labels_kNN_seenid = labels_kNN_seenid.astype(int) regression_svm_seenid = regression_svm_seenid.astype(int) regression2_svm_seenid = regression2_svm_seenid.astype(int) regression3_svm_seenid = regression3_svm_seenid.astype(int) regression_lr_seenid = regression_lr_seenid.astype(int) else: labels_ncc_seenid = numpy.rint(labels_ncc_seenid) regression_Gauss_seenid = numpy.rint(regression_Gauss_seenid) regressionMAE_Gauss_seenid = numpy.rint(regressionMAE_Gauss_seenid) labels_kNN_seenid = numpy.rint(labels_kNN_seenid) regression_svm_seenid = numpy.rint(regression_svm_seenid) regression2_svm_seenid = numpy.rint(regression2_svm_seenid) regression3_svm_seenid = numpy.rint(regression3_svm_seenid) regression_lr_seenid = numpy.rint(regression_lr_seenid) print("regressionMAE_Gauss_seenid[0:5]=", regressionMAE_Gauss_seenid[0:5]) # t10 = time.time() t_load_images0 = time.time() print("Loading test images, new ids...") if coherent_seeds: numpy.random.seed(experiment_seed + 555555) image_files_newid = iNewid.input_files num_images_newid = iNewid.num_images block_size_newid = iNewid.block_size seq = sNewid if seq.input_files == "LoadBinaryData00": subimages_newid = load_natural_data(seq.data_base_dir, seq.base_filename, seq.samples, verbose=False) elif seq.input_files == "LoadRawData": subimages_newid = load_raw_data(seq.data_base_dir, seq.base_filename, input_dim=seq.input_dim, dtype=seq.dtype, select_samples=seq.samples, verbose=False) else: subimages_newid = seq.load_data(seq) if load_and_append_output_features_dir is not None: newid_data_hash = cache.hash_object((iNewid, sNewid)).hexdigest() newid_data_hash = "0" print("loading output features (newid data) from dir: ", load_and_append_output_features_dir, "and hash:", newid_data_hash) additional_features_newid = cache.unpickle_array(base_dir=load_and_append_output_features_dir, base_filename="output_features_training_TestD" + newid_data_hash) additional_features_newid = 100000 * additional_features_newid[:, 0:num_features_to_append_to_input] # subimages_newid = numpy.concatenate((subimages_newid, additional_features_newid), axis=1) t_load_images1 = time.time() print(num_images_newid, " Images loaded in %0.3f s" % (t_load_images1 - t_load_images0)) t_exec0 = time.time() print("Execution over New Id testing set...") print("Input Signal: New Id test images") sl_seq_newid = flow.execute(subimages_newid) sl_seq_newid = sl_seq_newid[:, skip_num_signals:] sl_seq_newid = numpy.nan_to_num(sl_seq_newid) if feature_cut_off_level != 0.0: sl_seq_newid = numpy.clip(sl_seq_newid, min_cutoff, max_cutoff) if clip_seenid_newid_to_training: # This is obsolete and has been repaced by negative feature_cut_off_level print("clipping sl_seq_newid") sl_seq_newid_min = sl_seq_newid.min(axis=0) sl_seq_newid_max = sl_seq_newid.max(axis=0) print("sl_seq_training_min=", sl_seq_training_min) print("sl_seq_training_max=", sl_seq_training_max) print("sl_seq_newid_min=", sl_seq_newid_min) print("sl_seq_newid_max=", sl_seq_newid_max) sl_seq_newid = numpy.clip(sl_seq_newid, sl_seq_training_min, sl_seq_training_max) if transfer_learning_feature_normalization: sl_seq_newid_mean = sl_seq_newid.mean(axis=0) sl_seq_newid_std = sl_seq_newid.std(axis=0) sl_seq_newid = sfa_libs.normalize_mean_std(sl_seq_newid, sl_seq_newid_mean, sl_seq_newid_std) print("Removed mean and data of newid data (transfer learning)") # Corrections corrections_newid, corrections_gauss_newid = more_nodes.combine_correction_factors(flow) print("Final correction factors (newid):", corrections_newid) if corrections_newid is None: print("Corrections are not available") else: num_interesting_samples = 40 worst_correction_factors_indices_newid = numpy.argsort(corrections_newid)[0:num_interesting_samples] best_correction_factors_indices_newid = numpy.argsort(corrections_newid)[:-num_interesting_samples-1:-1] print(num_interesting_samples, "worst final corrections at indices:", worst_correction_factors_indices_newid) print(num_interesting_samples, "worst final corrections:", corrections_newid[worst_correction_factors_indices_newid]) print(num_interesting_samples, "respective gauss corrections:", corrections_gauss_newid[worst_correction_factors_indices_newid]) print(num_interesting_samples, "images with worst corrections:", end=' ') for i in range(num_interesting_samples): print(seq.input_files[worst_correction_factors_indices_newid[i]], end=' ') worst_gauss_correction_factors_indices_newid = numpy.argsort(corrections_gauss_newid)[0:num_interesting_samples] best_gauss_correction_factors_indices_newid = numpy.argsort(corrections_gauss_newid)[:-num_interesting_samples-1:-1] print(num_interesting_samples, "best final gauss_corrections at indices:", best_gauss_correction_factors_indices_newid) print(num_interesting_samples, "best final gauss_corrections:", corrections_gauss_newid[best_gauss_correction_factors_indices_newid]) print(num_interesting_samples, "respective corrections:", corrections_newid[worst_gauss_correction_factors_indices_newid]) print(num_interesting_samples, "images with best gauss_corrections:", end=' ') for i in range(num_interesting_samples): print(seq.input_files[best_gauss_correction_factors_indices_newid[i]], end=' ') corr_factor = 1.0 print("using corr_factor (feature scale)", corr_factor) sl_seq_newid[:, 0:reg_num_signals] = sl_seq_newid[:, 0:reg_num_signals] * corr_factor t_exec1 = time.time() print("Execution over New Id in %0.3f s" % (t_exec1 - t_exec0)) if network_write: network_write.update_cache([iNewid, sNewid], None, network_cache_write_dir, "iNewidsNewidData" + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) network_write.update_cache(subimages_newid, None, network_cache_write_dir, "NewidData" + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) network_write.update_cache(sl_seq_newid, None, network_cache_write_dir, "SLNewid" + "_" + network_hash, overwrite=True, use_hash=network_hash, verbose=True) if save_output_features_dir is not None: print("saving output features (test data)") testing_data_hash = cache.hash_object((iNewid, sNewid)).hexdigest() cache.pickle_array(sl_seq_newid, base_dir=save_output_features_dir, base_filename="output_features_training_TestD" + testing_data_hash, overwrite=True, verbose=True) t_class0 = time.time() correct_classes_newid = correct_objective_classes_newid correct_labels_newid = correct_objective_labels_newid if convert_labels_days_to_years: if correct_labels_newid.mean() < 200: print("correct_labels_newid appears to be already year values (mean %f)" % correct_labels_newid.mean()) else: print("converting correct_labels_newid from days to years") correct_labels_newid = correct_labels_newid / DAYS_IN_A_YEAR if integer_label_estimation: if (correct_labels_newid - correct_labels_newid.astype(int)).mean() < 0.01: print("correct_labels_newid appears to be already integer, preserving its value") else: print("correct_labels_newid seem to be real values, converting them to years") correct_labels_newid = (correct_labels_newid + 0.0006).astype(int) * 1.0 print("correct_labels_newid=", correct_labels_newid) if enable_ncc_cfr: print("NCC classify...") classes_ncc_newid = numpy.array(ncc_node.label(sl_seq_newid[:, 0:reg_num_signals])) labels_ncc_newid = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_ncc_newid) print(classes_ncc_newid) else: classes_ncc_newid = labels_ncc_newid = numpy.zeros(num_images_newid) if enable_ccc_Gauss_cfr: classes_Gauss_newid = numpy.array(GC_node.label(sl_seq_newid[:, 0:reg_num_signals])) labels_Gauss_newid = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_Gauss_newid) regression_Gauss_newid = GC_node.regression(sl_seq_newid[:, 0:reg_num_signals], avg_labels) regressionMAE_Gauss_newid = GC_node.regressionMAE(sl_seq_newid[:, 0:reg_num_signals], avg_labels) probs_newid = GC_node.class_probabilities(sl_seq_newid[:, 0:reg_num_signals]) softCR_Gauss_newid = GC_node.softCR(sl_seq_newid[:, 0:reg_num_signals], correct_classes_newid) # if correct_post_processing_bias: # # Experimental post-processing. WARNING! # expanded_regression = numpy.zeros((regression_Gauss_newid.shape[0], 3)) # for i in range(3): # expanded_regression[:, i] = (regression_Gauss_newid) ** (i + 1) # print("expanded_regression[0:5]=", expanded_regression[0:5]) # regressionMAE_Gauss_newid = GC_post_processing_node.execute(expanded_regression).flatten() # print("regressionMAE_Gauss_newid[0:5]=", regressionMAE_Gauss_newid[0:5]) else: classes_Gauss_newid = labels_Gauss_newid = regression_Gauss_newid = regressionMAE_Gauss_newid = \ numpy.zeros(num_images_newid) probs_newid = numpy.zeros((num_images_newid, 2)) softCR_Gauss_newid = 0.0 if enable_kNN_cfr: print("kNN classify... (k=%d)" % kNN_k) classes_kNN_newid = numpy.array(kNN_node.label(sl_seq_newid[:, 0:reg_num_signals])) labels_kNN_newid = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_kNN_newid) else: classes_kNN_newid = labels_kNN_newid = numpy.zeros(num_images_newid) if enable_svm_cfr: classes_svm_newid = svm_node.label( svm_scale(sl_seq_newid[:, 0:reg_num_signals], data_mins, data_maxs, svm_min, svm_max)) regression_svm_newid = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_svm_newid) regression2_svm_newid = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_svm_newid) regression3_svm_newid = more_nodes.map_class_numbers_to_avg_label(all_classes, avg_labels, classes_svm_newid) # probs_training[0, 10] = 1.0 # probs_newid[0, 10] = 1.0 # probs_seenid[0, 10] = 1.0 else: classes_svm_newid = regression_svm_newid = regression2_svm_newid = \ regression3_svm_newid = numpy.zeros(num_images_newid) if enable_svr: regression_svr_newid = svr.predict(svm_scale(sl_seq_newid[:, 0:reg_num_signals], data_mins, data_maxs, svm_min, svm_max)) else: regression_svr_newid = numpy.zeros(num_images_newid) if enable_lr_cfr: regression_lr_newid = lr_node.execute(sl_seq_newid[:, 0:reg_num_signals]).flatten() else: regression_lr_newid = numpy.zeros(num_images_newid) if output_instead_of_SVM2: regression2_svm_newid = (sl_seq_newid[:, 0] * orig_train_label_std) + orig_train_label_mean print("Applying cutoff to the label estimations for LR and Linear Scaling (SVM2)") regression2_svm_newid = numpy.clip(regression2_svm_newid, orig_train_label_min, orig_train_label_max) regression_lr_newid = numpy.clip(regression_lr_newid, orig_train_label_min, orig_train_label_max) if svr_instead_of_SVM2: print("replacing regression estimation for svm2 by svr estimation") regression2_svm_newid = regression_svr_newid t_class1 = time.time() print("Classification/Regression over New Id in %0.3f s" % ((t_class1 - t_class0))) if integer_label_estimation: # print"WARNING, ADDING A BIAS OF -0.5 TO ESTIMATION OF NEWID ONLY!!!" # regression_Gauss_newid += -0.5 # regressionMAE_Gauss_newid += -0.5 print("Making all label estimations for newid data integer numbers") if convert_labels_days_to_years: # 5.7 should be mapped to 5 years because age estimation is exact (days) labels_ncc_newid = labels_ncc_newid.astype(int) regression_Gauss_newid = regression_Gauss_newid.astype(int) regressionMAE_Gauss_newid = regressionMAE_Gauss_newid.astype(int) labels_kNN_newid = labels_kNN_newid.astype(int) regression_svm_newid = regression_svm_newid.astype(int) regression2_svm_newid = regression2_svm_newid.astype(int) regression3_svm_newid = regression3_svm_newid.astype(int) regression_lr_newid = regression_lr_newid.astype(int) else: # 5.7 should be mapped to 6 years because age estimation is already based on years labels_ncc_newid = numpy.rint(labels_ncc_newid) regression_Gauss_newid = numpy.rint(regression_Gauss_newid) regressionMAE_Gauss_newid = numpy.rint(regressionMAE_Gauss_newid) labels_kNN_newid = numpy.rint(labels_kNN_newid) regression_svm_newid = numpy.rint(regression_svm_newid) regression2_svm_newid = numpy.rint(regression2_svm_newid) regression3_svm_newid = numpy.rint(regression3_svm_newid) regression_lr_newid = numpy.rint(regression_lr_newid) print("regressionMAE_Gauss_newid[0:5]=", regressionMAE_Gauss_newid[0:5]) # print "Saving train/test_data for external analysis" # ndarray_to_string(sl_seq_training, "/local/tmp/escalafl/training_samples.txt") # ndarray_to_string(correct_labels_training, "/local/tmp/escalafl/training_labels.txt") # ndarray_to_string(sl_seq_seenid, "/local/tmp/escalafl/seenid_samples.txt") # ndarray_to_string(correct_labels_seenid, "/local/tmp/escalafl/seenid_labels.txt") # ndarray_to_string(sl_seq_newid, "/local/tmp/escalafl/newid_samples.txt") # ndarray_to_string(correct_labels_newid, "/local/tmp/escalafl/newid_labels.txt") print("Computing typical delta, eta values for Training SFA Signal") (results.typical_delta_train, results.typical_eta_newid) = sfa_libs.comp_typical_delta_eta(sl_seq_training, iTrain.block_size, num_reps=200, training_mode=iTrain.train_mode) results.brute_delta_train = sfa_libs.comp_delta_normalized(sl_seq_training) results.brute_eta_train = sfa_libs.comp_eta(sl_seq_training) t_delta_eta1 = time.time() print("delta_train=", results.typical_delta_train) print("eta_train=", results.typical_eta_train) print("brute_delta_train=", results.brute_delta_train) print("Computing typical delta, eta values for New Id SFA Signal") t_delta_eta0 = time.time() (results.typical_delta_newid, results.typical_eta_newid) = sfa_libs.comp_typical_delta_eta(sl_seq_newid, iNewid.block_size, num_reps=200, training_mode=iNewid.train_mode) results.brute_delta_newid = sfa_libs.comp_delta_normalized(sl_seq_newid) results.brute_eta_newid = sfa_libs.comp_eta(sl_seq_newid) t_delta_eta1 = time.time() print("typical_delta_newid=", results.typical_delta_newid) print("typical_delta_newid[0:31].sum()=", results.typical_delta_newid[0:31].sum()) print("typical_eta_newid=", results.typical_eta_newid) print("brute_delta_newid=", results.brute_delta_newid) # print "brute_eta_newid=", results.brute_eta_newid print("computed delta/eta in %0.3f ms" % ((t_delta_eta1 - t_delta_eta0) * 1000.0)) if isinstance(block_size, int): print("virtual sequence length complete = ", num_images_training * (block_size - 1) // 2) print("virtual sequence length sequence = ", (num_images_training - block_size) * block_size) print("virtual sequence length mixed = ", num_images_training * (block_size - 1) // 2 + (num_images_training - block_size) * block_size) else: print("length of virtual sequence not computed = ") save_train_data = True and False # This fails for large datasets :( TODO: Make this an option if save_train_data: uniqueness = numpy.random.randint(32000) save_dir_subimages_features_training = "/local/tmp/escalafl/Alberto/saved_images_features_training" print("Using uniqueness %d for saving subimage and feature data" % uniqueness) cache.pickle_array(subimages_train, base_dir=save_dir_subimages_features_training, base_filename="subimages_train%5d" % uniqueness, chunk_size=5000, block_size=1, continuous=False, overwrite=True, verbose=True) cache.pickle_array(sl_seq_training, base_dir=save_dir_subimages_features_training, base_filename="sl_seq_training%5d" % uniqueness, chunk_size=5000, block_size=1, continuous=False, overwrite=True, verbose=True) # Then unpicke with unpickle_array(base_dir="", base_filename="subimages_train%5d"%uniqueness): print("Estimating explained variance for Train SFA Signal") number_samples_explained_variance = 9000 # 1000 #4000 #2000 # fast_inverse_available = True and False if estimate_explained_var_with_inverse: print("Estimated explained variance with inverse (train) is: ", more_nodes.estimate_explained_variance( subimages_train, flow, sl_seq_training, number_samples_explained_variance)) print("Estimated explained variance with inverse (newid) is: ", more_nodes.estimate_explained_variance( subimages_newid, flow, sl_seq_newid, number_samples_explained_variance)) else: print("Fast inverse not available, not estimating explained variance") if estimate_explained_var_with_kNN_k: k = estimate_explained_var_with_kNN_k # k=64 print("Estimated explained variance with kNN (train, %d features) is: " % reg_num_signals, more_nodes.estimate_explained_var_with_kNN(subimages_train, sl_seq_training[:, 0:reg_num_signals], max_num_samples_for_ev=10000, max_test_samples_for_ev=10000, k=k, ignore_closest_match=True, operation="average")) else: print("Not estimating explained variance with kNN") if estimate_explained_var_with_kNN_lin_app_k: k = estimate_explained_var_with_kNN_lin_app_k # k=64 print("Estimated explained variance with kNN_lin_app (train, %d features) is: " % reg_num_signals, more_nodes.estimate_explained_var_with_kNN(subimages_train, sl_seq_training[:, 0:reg_num_signals], max_num_samples_for_ev=10000, max_test_samples_for_ev=10000, k=k, ignore_closest_match=True, operation="lin_app")) else: print("Not estimating explained variance with kNN_lin_app") if estimate_explained_var_linear_global_N: if estimate_explained_var_linear_global_N > 0: number_samples_EV_linear_global = estimate_explained_var_linear_global_N else: number_samples_EV_linear_global = sl_seq_training.shape[0] number_samples_EV_linear_global = sl_seq_seenid.shape[0] num_features_linear_model = 1209 # 75 EVLinGlobal_train1, EVLinGlobal_train2, EVLinGlobal_newid = more_nodes.estimate_explained_var_linear_global( subimages_seenid, sl_seq_seenid[:, 0:num_features_linear_model], subimages_newid, sl_seq_newid[:, 0:num_features_linear_model], num_features_linear_model, number_samples_EV_linear_global) print("Explained Variance Linear Global for training data (%d features, subset of size %d) is: " % ( num_features_linear_model, number_samples_EV_linear_global), EVLinGlobal_train1) print("for training data (new random subset) is: ", EVLinGlobal_train2) print("for newid (all_samples FORCED %d) is: " % num_features_linear_model, EVLinGlobal_newid) else: print("Not estimating explained variance with global linear reconstruction") num_features_linear_model = 75 print("Computing chance levels for newid data") chance_level_RMSE_newid = correct_labels_newid.std() correct_labels_newid_sorted = correct_labels_newid + 0.0 correct_labels_newid_sorted.sort() median_estimation = numpy.ones(len(correct_labels_newid)) * correct_labels_newid_sorted[ len(correct_labels_newid) // 2] chance_level_MAE_newid = classifiers.mean_average_error(correct_labels_newid, median_estimation) print("chance_level_RMSE_newid=", chance_level_RMSE_newid, "chance_level_MAE_newid=", chance_level_MAE_newid) print("correct_labels_newid.mean()=", correct_labels_newid.mean(), "correct_labels_newid median() ", median_estimation) print("correct_labels_newid.min()=", correct_labels_newid.min(), "correct_labels_newid.max() ", correct_labels_newid.max()) print("Computations Finished!") print("** Displaying Benchmark data: **") if benchmark is not None: for task_name, task_time in benchmark: print(" ", task_name, " done in %0.3f s" % task_time) print("Classification/Regression Performance: ") if Parameters.analysis or True: print(correct_classes_training) print(classes_kNN_training) # MSE results.class_ncc_rate_train = classifiers.classification_rate(correct_classes_training, classes_ncc_training) results.class_kNN_rate_train = classifiers.classification_rate(correct_classes_training, classes_kNN_training) results.class_Gauss_rate_train = classifiers.classification_rate(correct_classes_training, classes_Gauss_training) results.class_svm_rate_train = classifiers.classification_rate(correct_classes_training, classes_svm_training) results.mse_ncc_train = distance_squared_Euclidean(correct_labels_training, labels_ncc_training) / len( labels_kNN_training) results.mse_kNN_train = distance_squared_Euclidean(correct_labels_training, labels_kNN_training) / len( labels_kNN_training) results.mse_gauss_train = distance_squared_Euclidean(correct_labels_training, regression_Gauss_training) / len( labels_kNN_training) results.mse_svm_train = distance_squared_Euclidean(correct_labels_training, regression_svm_training) / len( labels_kNN_training) results.mse2_svm_train = distance_squared_Euclidean(correct_labels_training, regression2_svm_training) / len( labels_kNN_training) results.mse3_svm_train = distance_squared_Euclidean(correct_labels_training, regression3_svm_training) / len( labels_kNN_training) results.mse_lr_train = distance_squared_Euclidean(correct_labels_training, regression_lr_training) / len( labels_kNN_training) # MAE results.maeOpt_gauss_train = classifiers.mean_average_error(correct_labels_training, regressionMAE_Gauss_training) results.mae_gauss_train = classifiers.mean_average_error(regression_Gauss_training, correct_labels_training) results.mae_svr_train = classifiers.mean_average_error(regression_svr_training, correct_labels_training) # RMSE results.rmse_ncc_train = results.mse_ncc_train ** 0.5 results.rmse_kNN_train = results.mse_kNN_train ** 0.5 results.rmse_gauss_train = results.mse_gauss_train ** 0.5 results.rmse_svm_train = results.mse_svm_train ** 0.5 results.rmse2_svm_train = results.mse2_svm_train ** 0.5 results.rmse3_svm_train = results.mse3_svm_train ** 0.5 results.rmse_lr_train = results.mse_lr_train ** 0.5 results.class_ncc_rate_seenid = classifiers.classification_rate(correct_classes_seenid, classes_ncc_seenid) results.class_kNN_rate_seenid = classifiers.classification_rate(correct_classes_seenid, classes_kNN_seenid) results.class_Gauss_rate_seenid = classifiers.classification_rate(correct_classes_seenid, classes_Gauss_seenid) results.class_svm_rate_seenid = classifiers.classification_rate(correct_classes_seenid, classes_svm_seenid) results.mse_ncc_seenid = distance_squared_Euclidean(correct_labels_seenid, labels_ncc_seenid) / len( labels_kNN_seenid) results.mse_kNN_seenid = distance_squared_Euclidean(correct_labels_seenid, labels_kNN_seenid) / len( labels_kNN_seenid) results.mse_gauss_seenid = distance_squared_Euclidean(correct_labels_seenid, regression_Gauss_seenid) / len( labels_kNN_seenid) results.mse_svm_seenid = distance_squared_Euclidean(correct_labels_seenid, regression_svm_seenid) / len( labels_kNN_seenid) results.mse2_svm_seenid = distance_squared_Euclidean(correct_labels_seenid, regression2_svm_seenid) / len( labels_kNN_seenid) results.mse3_svm_seenid = distance_squared_Euclidean(correct_labels_seenid, regression3_svm_seenid) / len( labels_kNN_seenid) results.mse_lr_seenid = distance_squared_Euclidean(correct_labels_seenid, regression_lr_seenid) / len( labels_kNN_seenid) results.mae_gauss_seenid = classifiers.mean_average_error(regression_Gauss_seenid, correct_labels_seenid) results.maeOpt_gauss_seenid = classifiers.mean_average_error(correct_labels_seenid, regressionMAE_Gauss_seenid) results.mae_svr_seenid = classifiers.mean_average_error(regression_svr_seenid, correct_labels_seenid) results.rmse_ncc_seenid = results.mse_ncc_seenid ** 0.5 results.rmse_kNN_seenid = results.mse_kNN_seenid ** 0.5 results.rmse_gauss_seenid = results.mse_gauss_seenid ** 0.5 results.rmse_svm_seenid = results.mse_svm_seenid ** 0.5 results.rmse2_svm_seenid = results.mse2_svm_seenid ** 0.5 results.rmse3_svm_seenid = results.mse3_svm_seenid ** 0.5 results.rmse_lr_seenid = results.mse_lr_seenid ** 0.5 print(correct_classes_newid.shape, classes_kNN_newid.shape) results.class_ncc_rate_newid = classifiers.classification_rate(correct_classes_newid, classes_ncc_newid) results.class_kNN_rate_newid = classifiers.classification_rate(correct_classes_newid, classes_kNN_newid) results.class_Gauss_rate_newid = classifiers.classification_rate(correct_classes_newid, classes_Gauss_newid) results.class_svm_rate_newid = classifiers.classification_rate(correct_classes_newid, classes_svm_newid) results.mse_ncc_newid = distance_squared_Euclidean(correct_labels_newid, labels_ncc_newid) / len( labels_kNN_newid) results.mse_kNN_newid = distance_squared_Euclidean(correct_labels_newid, labels_kNN_newid) / len( labels_kNN_newid) results.mse_gauss_newid = distance_squared_Euclidean(correct_labels_newid, regression_Gauss_newid) / len( labels_kNN_newid) results.mse_svm_newid = distance_squared_Euclidean(correct_labels_newid, regression_svm_newid) / len( labels_kNN_newid) results.mse2_svm_newid = distance_squared_Euclidean(correct_labels_newid, regression2_svm_newid) / len( labels_kNN_newid) results.mse3_svm_newid = distance_squared_Euclidean(correct_labels_newid, regression3_svm_newid) / len( labels_kNN_newid) results.mse_lr_newid = distance_squared_Euclidean(correct_labels_newid, regression_lr_newid) / len( labels_kNN_newid) results.mae_gauss_newid = classifiers.mean_average_error(correct_labels_newid, regression_Gauss_newid) results.maeOpt_gauss_newid = classifiers.mean_average_error(correct_labels_newid, regressionMAE_Gauss_newid) results.mae_svr_newid = classifiers.mean_average_error(regression_svr_newid, correct_labels_newid) results.rmse_ncc_newid = results.mse_ncc_newid ** 0.5 results.rmse_kNN_newid = results.mse_kNN_newid ** 0.5 results.rmse_gauss_newid = results.mse_gauss_newid ** 0.5 results.rmse_svm_newid = results.mse_svm_newid ** 0.5 results.rmse2_svm_newid = results.mse2_svm_newid ** 0.5 results.rmse3_svm_newid = results.mse3_svm_newid ** 0.5 results.rmse_lr_newid = results.mse_lr_newid ** 0.5 print("Comparisson of MAE for RMSE estimation and MAE estimation") print("regression_Gauss_newid[100:150] =", regression_Gauss_newid[100:150]) print("regressionMAE_Gauss_newid[100:150] =", regressionMAE_Gauss_newid[100:150]) print("diff MAE-RMSE= ", regressionMAE_Gauss_newid[100:150] - regression_Gauss_newid[100:150]) worst = numpy.argsort(numpy.abs(regressionMAE_Gauss_newid - regression_Gauss_newid)) print("worst[-50:] diff MAE-RMSE= ", worst[-50:]) print("regression_Gauss_newid[worst[-50:]]=", regression_Gauss_newid[worst[-50:]]) print("regressionMAE_Gauss_newid[worst[-50:]]=", regressionMAE_Gauss_newid[worst[-50:]]) print("correct_labels_newid[worst[-50:]]=", correct_labels_newid[worst[-50:]]) results.maeOpt_gauss_newid = classifiers.mean_average_error(correct_labels_newid, regressionMAE_Gauss_newid) results.mae_gauss_newid = classifiers.mean_average_error(correct_labels_newid, regression_Gauss_newid) print("N1=", classifiers.mean_average_error(correct_labels_newid, regressionMAE_Gauss_newid)) print("N2=", classifiers.mean_average_error(correct_labels_newid, regression_Gauss_newid)) numpy.savetxt("regressionMAE_Gauss_newid.txt", regressionMAE_Gauss_newid) numpy.savetxt("regression_Gauss_newid.txt", regression_Gauss_newid) numpy.savetxt("regression_svr_newid
codeparrot/github-code-clean
"""Tools for spectral analysis. """ from __future__ import division, print_function, absolute_import import numpy as np from scipy import fftpack from . import signaltools from .windows import get_window from ._spectral import _lombscargle from ._arraytools import const_ext, even_ext, odd_ext, zero_ext import warnings from scipy._lib.six import string_types __all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence', 'spectrogram', 'stft', 'istft', 'check_COLA'] def lombscargle(x, y, freqs, precenter=False, normalize=False): """ lombscargle(x, y, freqs) Computes the Lomb-Scargle periodogram. The Lomb-Scargle periodogram was developed by Lomb [1]_ and further extended by Scargle [2]_ to find, and test the significance of weak periodic signals with uneven temporal sampling. When *normalize* is False (default) the computed periodogram is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic signal with amplitude A for sufficiently large N. When *normalize* is True the computed periodogram is is normalized by the residuals of the data around a constant reference model (at zero). Input arrays should be one-dimensional and will be cast to float64. Parameters ---------- x : array_like Sample times. y : array_like Measurement values. freqs : array_like Angular frequencies for output periodogram. precenter : bool, optional Pre-center amplitudes by subtracting the mean. normalize : bool, optional Compute normalized periodogram. Returns ------- pgram : array_like Lomb-Scargle periodogram. Raises ------ ValueError If the input arrays `x` and `y` do not have the same shape. Notes ----- This subroutine calculates the periodogram using a slightly modified algorithm due to Townsend [3]_ which allows the periodogram to be calculated using only a single pass through the input arrays for each frequency. The algorithm running time scales roughly as O(x * freqs) or O(N^2) for a large number of samples and frequencies. References ---------- .. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976 .. [2] J.D. Scargle "Studies in astronomical time series analysis. II - Statistical aspects of spectral analysis of unevenly spaced data", The Astrophysical Journal, vol 263, pp. 835-853, 1982 .. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle periodogram using graphics processing units.", The Astrophysical Journal Supplement Series, vol 191, pp. 247-253, 2010 Examples -------- >>> import scipy.signal >>> import matplotlib.pyplot as plt First define some input parameters for the signal: >>> A = 2. >>> w = 1. >>> phi = 0.5 * np.pi >>> nin = 1000 >>> nout = 100000 >>> frac_points = 0.9 # Fraction of points to select Randomly select a fraction of an array with timesteps: >>> r = np.random.rand(nin) >>> x = np.linspace(0.01, 10*np.pi, nin) >>> x = x[r >= frac_points] Plot a sine wave for the selected times: >>> y = A * np.sin(w*x+phi) Define the array of frequencies for which to compute the periodogram: >>> f = np.linspace(0.01, 10, nout) Calculate Lomb-Scargle periodogram: >>> import scipy.signal as signal >>> pgram = signal.lombscargle(x, y, f, normalize=True) Now make a plot of the input data: >>> plt.subplot(2, 1, 1) >>> plt.plot(x, y, 'b+') Then plot the normalized periodogram: >>> plt.subplot(2, 1, 2) >>> plt.plot(f, pgram) >>> plt.show() """ x = np.asarray(x, dtype=np.float64) y = np.asarray(y, dtype=np.float64) freqs = np.asarray(freqs, dtype=np.float64) assert x.ndim == 1 assert y.ndim == 1 assert freqs.ndim == 1 if precenter: pgram = _lombscargle(x, y - y.mean(), freqs) else: pgram = _lombscargle(x, y, freqs) if normalize: pgram *= 2 / np.dot(y, y) return pgram def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1): """ Estimate power spectral density using a periodogram. Parameters ---------- x : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to 'boxcar'. nfft : int, optional Length of the FFT used. If `None` the length of `x` will be used. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to 'constant'. return_onesided : bool, optional If `True`, return a one-sided spectrum for real data. If `False` return a two-sided spectrum. Note that for complex data, a two-sided spectrum is always returned. scaling : { 'density', 'spectrum' }, optional Selects between computing the power spectral density ('density') where `Pxx` has units of V**2/Hz and computing the power spectrum ('spectrum') where `Pxx` has units of V**2, if `x` is measured in V and `fs` is measured in Hz. Defaults to 'density' axis : int, optional Axis along which the periodogram is computed; the default is over the last axis (i.e. ``axis=-1``). Returns ------- f : ndarray Array of sample frequencies. Pxx : ndarray Power spectral density or power spectrum of `x`. Notes ----- .. versionadded:: 0.12.0 See Also -------- welch: Estimate power spectral density using Welch's method lombscargle: Lomb-Scargle periodogram for unevenly sampled data Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> np.random.seed(1234) Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by 0.001 V**2/Hz of white noise sampled at 10 kHz. >>> fs = 10e3 >>> N = 1e5 >>> amp = 2*np.sqrt(2) >>> freq = 1234.0 >>> noise_power = 0.001 * fs / 2 >>> time = np.arange(N) / fs >>> x = amp*np.sin(2*np.pi*freq*time) >>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape) Compute and plot the power spectral density. >>> f, Pxx_den = signal.periodogram(x, fs) >>> plt.semilogy(f, Pxx_den) >>> plt.ylim([1e-7, 1e2]) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('PSD [V**2/Hz]') >>> plt.show() If we average the last half of the spectral density, to exclude the peak, we can recover the noise power on the signal. >>> np.mean(Pxx_den[256:]) 0.0018156616014838548 Now compute and plot the power spectrum. >>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum') >>> plt.figure() >>> plt.semilogy(f, np.sqrt(Pxx_spec)) >>> plt.ylim([1e-4, 1e1]) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('Linear spectrum [V RMS]') >>> plt.show() The peak height in the power spectrum is an estimate of the RMS amplitude. >>> np.sqrt(Pxx_spec.max()) 2.0077340678640727 """ x = np.asarray(x) if x.size == 0: return np.empty(x.shape), np.empty(x.shape) if window is None: window = 'boxcar' if nfft is None: nperseg = x.shape[axis] elif nfft == x.shape[axis]: nperseg = nfft elif nfft > x.shape[axis]: nperseg = x.shape[axis] elif nfft < x.shape[axis]: s = [np.s_[:]]*len(x.shape) s[axis] = np.s_[:nfft] x = x[s] nperseg = nfft nfft = None return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided, scaling, axis) def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1): r""" Estimate power spectral density using Welch's method. Welch's method [1]_ computes an estimate of the power spectral density by dividing the data into overlapping segments, computing a modified periodogram for each segment and averaging the periodograms. Parameters ---------- x : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. nperseg : int, optional Length of each segment. Defaults to None, but if window is str or tuple, is set to 256, and if window is array_like, is set to the length of the window. noverlap : int, optional Number of points to overlap between segments. If `None`, ``noverlap = nperseg // 2``. Defaults to `None`. nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If `None`, the FFT length is `nperseg`. Defaults to `None`. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to 'constant'. return_onesided : bool, optional If `True`, return a one-sided spectrum for real data. If `False` return a two-sided spectrum. Note that for complex data, a two-sided spectrum is always returned. scaling : { 'density', 'spectrum' }, optional Selects between computing the power spectral density ('density') where `Pxx` has units of V**2/Hz and computing the power spectrum ('spectrum') where `Pxx` has units of V**2, if `x` is measured in V and `fs` is measured in Hz. Defaults to 'density' axis : int, optional Axis along which the periodogram is computed; the default is over the last axis (i.e. ``axis=-1``). Returns ------- f : ndarray Array of sample frequencies. Pxx : ndarray Power spectral density or power spectrum of x. See Also -------- periodogram: Simple, optionally modified periodogram lombscargle: Lomb-Scargle periodogram for unevenly sampled data Notes ----- An appropriate amount of overlap will depend on the choice of window and on your requirements. For the default Hann window an overlap of 50% is a reasonable trade off between accurately estimating the signal power, while not over counting any of the data. Narrower windows may require a larger overlap. If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_. .. versionadded:: 0.12.0 References ---------- .. [1] P. Welch, "The use of the fast Fourier transform for the estimation of power spectra: A method based on time averaging over short, modified periodograms", IEEE Trans. Audio Electroacoust. vol. 15, pp. 70-73, 1967. .. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika, vol. 37, pp. 1-16, 1950. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> np.random.seed(1234) Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by 0.001 V**2/Hz of white noise sampled at 10 kHz. >>> fs = 10e3 >>> N = 1e5 >>> amp = 2*np.sqrt(2) >>> freq = 1234.0 >>> noise_power = 0.001 * fs / 2 >>> time = np.arange(N) / fs >>> x = amp*np.sin(2*np.pi*freq*time) >>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape) Compute and plot the power spectral density. >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024) >>> plt.semilogy(f, Pxx_den) >>> plt.ylim([0.5e-3, 1]) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('PSD [V**2/Hz]') >>> plt.show() If we average the last half of the spectral density, to exclude the peak, we can recover the noise power on the signal. >>> np.mean(Pxx_den[256:]) 0.0009924865443739191 Now compute and plot the power spectrum. >>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum') >>> plt.figure() >>> plt.semilogy(f, np.sqrt(Pxx_spec)) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('Linear spectrum [V RMS]') >>> plt.show() The peak height in the power spectrum is an estimate of the RMS amplitude. >>> np.sqrt(Pxx_spec.max()) 2.0077340678640727 """ freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling, axis) return freqs, Pxx.real def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1): r""" Estimate the cross power spectral density, Pxy, using Welch's method. Parameters ---------- x : array_like Time series of measurement values y : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` and `y` time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. nperseg : int, optional Length of each segment. Defaults to None, but if window is str or tuple, is set to 256, and if window is array_like, is set to the length of the window. noverlap: int, optional Number of points to overlap between segments. If `None`, ``noverlap = nperseg // 2``. Defaults to `None`. nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If `None`, the FFT length is `nperseg`. Defaults to `None`. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to 'constant'. return_onesided : bool, optional If `True`, return a one-sided spectrum for real data. If `False` return a two-sided spectrum. Note that for complex data, a two-sided spectrum is always returned. scaling : { 'density', 'spectrum' }, optional Selects between computing the cross spectral density ('density') where `Pxy` has units of V**2/Hz and computing the cross spectrum ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are measured in V and `fs` is measured in Hz. Defaults to 'density' axis : int, optional Axis along which the CSD is computed for both inputs; the default is over the last axis (i.e. ``axis=-1``). Returns ------- f : ndarray Array of sample frequencies. Pxy : ndarray Cross spectral density or cross power spectrum of x,y. See Also -------- periodogram: Simple, optionally modified periodogram lombscargle: Lomb-Scargle periodogram for unevenly sampled data welch: Power spectral density by Welch's method. [Equivalent to csd(x,x)] coherence: Magnitude squared coherence by Welch's method. Notes -------- By convention, Pxy is computed with the conjugate FFT of X multiplied by the FFT of Y. If the input series differ in length, the shorter series will be zero-padded to match. An appropriate amount of overlap will depend on the choice of window and on your requirements. For the default Hann window an overlap of 50% is a reasonable trade off between accurately estimating the signal power, while not over counting any of the data. Narrower windows may require a larger overlap. .. versionadded:: 0.16.0 References ---------- .. [1] P. Welch, "The use of the fast Fourier transform for the estimation of power spectra: A method based on time averaging over short, modified periodograms", IEEE Trans. Audio Electroacoust. vol. 15, pp. 70-73, 1967. .. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975 Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt Generate two test signals with some common features. >>> fs = 10e3 >>> N = 1e5 >>> amp = 20 >>> freq = 1234.0 >>> noise_power = 0.001 * fs / 2 >>> time = np.arange(N) / fs >>> b, a = signal.butter(2, 0.25, 'low') >>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape) >>> y = signal.lfilter(b, a, x) >>> x += amp*np.sin(2*np.pi*freq*time) >>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape) Compute and plot the magnitude of the cross spectral density. >>> f, Pxy = signal.csd(x, y, fs, nperseg=1024) >>> plt.semilogy(f, np.abs(Pxy)) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('CSD [V**2/Hz]') >>> plt.show() """ freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling, axis, mode='psd') # Average over windows. if len(Pxy.shape) >= 2 and Pxy.size > 0: if Pxy.shape[-1] > 1: Pxy = Pxy.mean(axis=-1) else: Pxy = np.reshape(Pxy, Pxy.shape[:-1]) return freqs, Pxy def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1, mode='psd'): """ Compute a spectrogram with consecutive Fourier transforms. Spectrograms can be used as a way of visualizing the change of a nonstationary signal's frequency content over time. Parameters ---------- x : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Tukey window with shape parameter of 0.25. nperseg : int, optional Length of each segment. Defaults to None, but if window is str or tuple, is set to 256, and if window is array_like, is set to the length of the window. noverlap : int, optional Number of points to overlap between segments. If `None`, ``noverlap = nperseg // 8``. Defaults to `None`. nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If `None`, the FFT length is `nperseg`. Defaults to `None`. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to 'constant'. return_onesided : bool, optional If `True`, return a one-sided spectrum for real data. If `False` return a two-sided spectrum. Note that for complex data, a two-sided spectrum is always returned. scaling : { 'density', 'spectrum' }, optional Selects between computing the power spectral density ('density') where `Sxx` has units of V**2/Hz and computing the power spectrum ('spectrum') where `Sxx` has units of V**2, if `x` is measured in V and `fs` is measured in Hz. Defaults to 'density'. axis : int, optional Axis along which the spectrogram is computed; the default is over the last axis (i.e. ``axis=-1``). mode : str, optional Defines what kind of return values are expected. Options are ['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is equivalent to the output of `stft` with no padding or boundary extension. 'magnitude' returns the absolute magnitude of the STFT. 'angle' and 'phase' return the complex angle of the STFT, with and without unwrapping, respectively. Returns ------- f : ndarray Array of sample frequencies. t : ndarray Array of segment times. Sxx : ndarray Spectrogram of x. By default, the last axis of Sxx corresponds to the segment times. See Also -------- periodogram: Simple, optionally modified periodogram lombscargle: Lomb-Scargle periodogram for unevenly sampled data welch: Power spectral density by Welch's method. csd: Cross spectral density by Welch's method. Notes ----- An appropriate amount of overlap will depend on the choice of window and on your requirements. In contrast to welch's method, where the entire data stream is averaged over, one may wish to use a smaller overlap (or perhaps none at all) when computing a spectrogram, to maintain some statistical independence between individual segments. It is for this reason that the default window is a Tukey window with 1/8th of a window's length overlap at each end. .. versionadded:: 0.16.0 References ---------- .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time Signal Processing", Prentice Hall, 1999. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt Generate a test signal, a 2 Vrms sine wave whose frequency is slowly modulated around 3kHz, corrupted by white noise of exponentially decreasing magnitude sampled at 10 kHz. >>> fs = 10e3 >>> N = 1e5 >>> amp = 2 * np.sqrt(2) >>> noise_power = 0.01 * fs / 2 >>> time = np.arange(N) / float(fs) >>> mod = 500*np.cos(2*np.pi*0.25*time) >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod) >>> noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape) >>> noise *= np.exp(-time/5) >>> x = carrier + noise Compute and plot the spectrogram. >>> f, t, Sxx = signal.spectrogram(x, fs) >>> plt.pcolormesh(t, f, Sxx) >>> plt.ylabel('Frequency [Hz]') >>> plt.xlabel('Time [sec]') >>> plt.show() """ modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase'] if mode not in modelist: raise ValueError('unknown value for mode {}, must be one of {}' .format(mode, modelist)) # need to set default for nperseg before setting default for noverlap below window, nperseg = _triage_segments(window, nperseg, input_length=x.shape[axis]) # Less overlap than welch, so samples are more statisically independent if noverlap is None: noverlap = nperseg // 8 if mode == 'psd': freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling, axis, mode='psd') else: freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling, axis, mode='stft') if mode == 'magnitude': Sxx = np.abs(Sxx) elif mode in ['angle', 'phase']: Sxx = np.angle(Sxx) if mode == 'phase': # Sxx has one additional dimension for time strides if axis < 0: axis -= 1 Sxx = np.unwrap(Sxx, axis=axis) # mode =='complex' is same as `stft`, doesn't need modification return freqs, time, Sxx def check_COLA(window, nperseg, noverlap, tol=1e-10): r""" Check whether the Constant OverLap Add (COLA) constraint is met Parameters ---------- window : str or tuple or array_like Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. nperseg : int Length of each segment. noverlap : int Number of points to overlap between segments. tol : float, optional The allowed variance of a bin's weighted sum from the median bin sum. Returns ------- verdict : bool `True` if chosen combination satisfies COLA within `tol`, `False` otherwise See Also -------- stft: Short Time Fourier Transform istft: Inverse Short Time Fourier Transform Notes ----- In order to enable inversion of an STFT via the inverse STFT in `istft`, the signal windowing must obey the constraint of "Constant OverLap Add" (COLA). This ensures that every point in the input data is equally weighted, thereby avoiding aliasing and allowing full reconstruction. Some examples of windows that satisfy COLA: - Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ... - Bartlett window at overlap of 1/2, 3/4, 5/6, ... - Hann window at 1/2, 2/3, 3/4, ... - Any Blackman family window at 2/3 overlap - Any window with ``noverlap = nperseg-1`` A very comprehensive list of other windows may be found in [2]_, wherein the COLA condition is satisfied when the "Amplitude Flatness" is unity. .. versionadded:: 0.19.0 References ---------- .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K Publishing, 2011,ISBN 978-0-9745607-3-1. .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and spectral density estimation by the Discrete Fourier transform (DFT), including a comprehensive list of window functions and some new at-top windows", 2002, http://hdl.handle.net/11858/00-001M-0000-0013-557A-5 Examples -------- >>> from scipy import signal Confirm COLA condition for rectangular window of 75% (3/4) overlap: >>> signal.check_COLA(signal.boxcar(100), 100, 75) True COLA is not true for 25% (1/4) overlap, though: >>> signal.check_COLA(signal.boxcar(100), 100, 25) False "Symmetrical" Hann window (for filter design) is not COLA: >>> signal.check_COLA(signal.hann(120, sym=True), 120, 60) False "Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for overlap of 1/2, 2/3, 3/4, etc.: >>> signal.check_COLA(signal.hann(120, sym=False), 120, 60) True >>> signal.check_COLA(signal.hann(120, sym=False), 120, 80) True >>> signal.check_COLA(signal.hann(120, sym=False), 120, 90) True """ nperseg = int(nperseg) if nperseg < 1: raise ValueError('nperseg must be a positive integer') if noverlap >= nperseg: raise ValueError('noverlap must be less than nperseg.') noverlap = int(noverlap) if isinstance(window, string_types) or type(window) is tuple: win = get_window(window, nperseg) else: win = np.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if win.shape[0] != nperseg: raise ValueError('window must have length of nperseg') step = nperseg - noverlap binsums = np.sum((win[ii*step:(ii+1)*step] for ii in range(nperseg//step)), axis=0) if nperseg % step != 0: binsums[:nperseg % step] += win[-(nperseg % step):] deviation = binsums - np.median(binsums) return np.max(np.abs(deviation)) < tol def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None, detrend=False, return_onesided=True, boundary='zeros', padded=True, axis=-1): r""" Compute the Short Time Fourier Transform (STFT). STFTs can be used as a way of quantifying the change of a nonstationary signal's frequency and phase content over time. Parameters ---------- x : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. nperseg : int, optional Length of each segment. Defaults to 256. noverlap : int, optional Number of points to overlap between segments. If `None`, ``noverlap = nperseg // 2``. Defaults to `None`. When specified, the COLA constraint must be met (see Notes below). nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If `None`, the FFT length is `nperseg`. Defaults to `None`. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to `False`. return_onesided : bool, optional If `True`, return a one-sided spectrum for real data. If `False` return a two-sided spectrum. Note that for complex data, a two-sided spectrum is always returned. Defaults to `True`. boundary : str or None, optional Specifies whether the input signal is extended at both ends, and how to generate the new values, in order to center the first windowed segment on the first input point. This has the benefit of enabling reconstruction of the first input point when the employed window function starts at zero. Valid options are ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to 'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``. padded : bool, optional Specifies whether the input signal is zero-padded at the end to make the signal fit exactly into an integer number of window segments, so that all of the signal is included in the output. Defaults to `True`. Padding occurs after boundary extension, if `boundary` is not `None`, and `padded` is `True`, as is the default. axis : int, optional Axis along which the STFT is computed; the default is over the last axis (i.e. ``axis=-1``). Returns ------- f : ndarray Array of sample frequencies. t : ndarray Array of segment times. Zxx : ndarray STFT of `x`. By default, the last axis of `Zxx` corresponds to the segment times. See Also -------- istft: Inverse Short Time Fourier Transform check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met welch: Power spectral density by Welch's method. spectrogram: Spectrogram by Welch's method. csd: Cross spectral density by Welch's method. lombscargle: Lomb-Scargle periodogram for unevenly sampled data Notes ----- In order to enable inversion of an STFT via the inverse STFT in `istft`, the signal windowing must obey the constraint of "Constant OverLap Add" (COLA), and the input signal must have complete windowing coverage (i.e. ``(x.shape[axis] - nperseg) % (nperseg-noverlap) == 0``). The `padded` argument may be used to accomplish this. The COLA constraint ensures that every point in the input data is equally weighted, thereby avoiding aliasing and allowing full reconstruction. Whether a choice of `window`, `nperseg`, and `noverlap` satisfy this constraint can be tested with `check_COLA`. .. versionadded:: 0.19.0 References ---------- .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time Signal Processing", Prentice Hall, 1999. .. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from Modified Short Fourier Transform", IEEE 1984, 10.1109/TASSP.1984.1164317 Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt Generate a test signal, a 2 Vrms sine wave whose frequency is slowly modulated around 3kHz, corrupted by white noise of exponentially decreasing magnitude sampled at 10 kHz. >>> fs = 10e3 >>> N = 1e5 >>> amp = 2 * np.sqrt(2) >>> noise_power = 0.01 * fs / 2 >>> time = np.arange(N) / float(fs) >>> mod = 500*np.cos(2*np.pi*0.25*time) >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod) >>> noise = np.random.normal(scale=np.sqrt(noise_power), ... size=time.shape) >>> noise *= np.exp(-time/5) >>> x = carrier + noise Compute and plot the STFT's magnitude. >>> f, t, Zxx = signal.stft(x, fs, nperseg=1000) >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp) >>> plt.title('STFT Magnitude') >>> plt.ylabel('Frequency [Hz]') >>> plt.xlabel('Time [sec]') >>> plt.show() """ freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling='spectrum', axis=axis, mode='stft', boundary=boundary, padded=padded) return freqs, time, Zxx def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2): r""" Perform the inverse Short Time Fourier transform (iSTFT). Parameters ---------- Zxx : array_like STFT of the signal to be reconstructed. If a purely real array is passed, it will be cast to a complex data type. fs : float, optional Sampling frequency of the time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. Must match the window used to generate the STFT for faithful inversion. nperseg : int, optional Number of data points corresponding to each STFT segment. This parameter must be specified if the number of data points per segment is odd, or if the STFT was padded via ``nfft > nperseg``. If `None`, the value depends on the shape of `Zxx` and `input_onesided`. If `input_onesided` is True, ``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise, ``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`. noverlap : int, optional Number of points to overlap between segments. If `None`, half of the segment length. Defaults to `None`. When specified, the COLA constraint must be met (see Notes below), and should match the parameter used to generate the STFT. Defaults to `None`. nfft : int, optional Number of FFT points corresponding to each STFT segment. This parameter must be specified if the STFT was padded via ``nfft > nperseg``. If `None`, the default values are the same as for `nperseg`, detailed above, with one exception: if `input_onesided` is True and ``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on that value. This case allows the proper inversion of an odd-length unpadded STFT using ``nfft=None``. Defaults to `None`. input_onesided : bool, optional If `True`, interpret the input array as one-sided FFTs, such as is returned by `stft` with ``return_onesided=True`` and `numpy.fft.rfft`. If `False`, interpret the input as a a two-sided FFT. Defaults to `True`. boundary : bool, optional Specifies whether the input signal was extended at its boundaries by supplying a non-`None` ``boundary`` argument to `stft`. Defaults to `True`. time_axis : int, optional Where the time segments of the STFT is located; the default is the last axis (i.e. ``axis=-1``). freq_axis : int, optional Where the frequency axis of the STFT is located; the default is the penultimate axis (i.e. ``axis=-2``). Returns ------- t : ndarray Array of output data times. x : ndarray iSTFT of `Zxx`. See Also -------- stft: Short Time Fourier Transform check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met Notes ----- In order to enable inversion of an STFT via the inverse STFT with `istft`, the signal windowing must obey the constraint of "Constant OverLap Add" (COLA). This ensures that every point in the input data is equally weighted, thereby avoiding aliasing and allowing full reconstruction. Whether a choice of `window`, `nperseg`, and `noverlap` satisfy this constraint can be tested with `check_COLA`, by using ``nperseg = Zxx.shape[freq_axis]``. An STFT which has been modified (via masking or otherwise) is not guaranteed to correspond to a exactly realizible signal. This function implements the iSTFT via the least-squares esimation algorithm detailed in [2]_, which produces a signal that minimizes the mean squared error between the STFT of the returned signal and the modified STFT. .. versionadded:: 0.19.0 References ---------- .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time Signal Processing", Prentice Hall, 1999. .. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from Modified Short Fourier Transform", IEEE 1984, 10.1109/TASSP.1984.1164317 Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by 0.001 V**2/Hz of white noise sampled at 1024 Hz. >>> fs = 1024 >>> N = 10*fs >>> nperseg = 512 >>> amp = 2 * np.sqrt(2) >>> noise_power = 0.001 * fs / 2 >>> time = np.arange(N) / float(fs) >>> carrier = amp * np.sin(2*np.pi*50*time) >>> noise = np.random.normal(scale=np.sqrt(noise_power), ... size=time.shape) >>> x = carrier + noise Compute the STFT, and plot its magnitude >>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg) >>> plt.figure() >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp) >>> plt.ylim([f[1], f[-1]]) >>> plt.title('STFT Magnitude') >>> plt.ylabel('Frequency [Hz]') >>> plt.xlabel('Time [sec]') >>> plt.yscale('log') >>> plt.show() Zero the components that are 10% or less of the carrier magnitude, then convert back to a time series via inverse STFT >>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0) >>> _, xrec = signal.istft(Zxx, fs) Compare the cleaned signal with the original and true carrier signals. >>> plt.figure() >>> plt.plot(time, x, time, xrec, time, carrier) >>> plt.xlim([2, 2.1]) >>> plt.xlabel('Time [sec]') >>> plt.ylabel('Signal') >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier']) >>> plt.show() Note that the cleaned signal does not start as abruptly as the original, since some of the coefficients of the transient were also removed: >>> plt.figure() >>> plt.plot(time, x, time, xrec, time, carrier) >>> plt.xlim([0, 0.1]) >>> plt.xlabel('Time [sec]') >>> plt.ylabel('Signal') >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier']) >>> plt.show() """ # Make sure input is an ndarray of appropriate complex dtype Zxx = np.asarray(Zxx) + 0j freq_axis = int(freq_axis) time_axis = int(time_axis) if Zxx.ndim < 2: raise ValueError('Input stft must be at least 2d!') if freq_axis == time_axis: raise ValueError('Must specify differing time and frequency axes!') nseg = Zxx.shape[time_axis] if input_onesided: # Assume even segment length n_default = 2*(Zxx.shape[freq_axis] - 1) else: n_default = Zxx.shape[freq_axis] # Check windowing parameters if nperseg is None: nperseg = n_default else: nperseg = int(nperseg) if nperseg < 1: raise ValueError('nperseg must be a positive integer') if nfft is None: if (input_onesided) and (nperseg == n_default + 1): # Odd nperseg, no FFT padding nfft = nperseg else: nfft = n_default elif nfft < nperseg: raise ValueError('nfft must be greater than or equal to nperseg.') else: nfft = int(nfft) if noverlap is None: noverlap = nperseg//2 else: noverlap = int(noverlap) if noverlap >= nperseg: raise ValueError('noverlap must be less than nperseg.') nstep = nperseg - noverlap if not check_COLA(window, nperseg, noverlap): raise ValueError('Window, STFT shape and noverlap do not satisfy the ' 'COLA constraint.') # Rearrange axes if neccessary if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2: # Turn negative indices to positive for the call to transpose if freq_axis < 0: freq_axis = Zxx.ndim + freq_axis if time_axis < 0: time_axis = Zxx.ndim + time_axis zouter = list(range(Zxx.ndim)) for ax in sorted([time_axis, freq_axis], reverse=True): zouter.pop(ax) Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis]) # Get window as array if isinstance(window, string_types) or type(window) is tuple: win = get_window(window, nperseg) else: win = np.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if win.shape[0] != nperseg: raise ValueError('window must have length of {0}'.format(nperseg)) if input_onesided: ifunc = np.fft.irfft else: ifunc = fftpack.ifft xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :] # Initialize output and normalization arrays outputlength = nperseg + (nseg-1)*nstep x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype) norm = np.zeros(outputlength, dtype=xsubs.dtype) if np.result_type(win, xsubs) != xsubs.dtype: win = win.astype(xsubs.dtype) xsubs *= win.sum() # This takes care of the 'spectrum' scaling # Construct the output from the ifft segments # This loop could perhaps be vectorized/strided somehow... for ii in range(nseg): # Window the ifft x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win norm[..., ii*nstep:ii*nstep+nperseg] += win**2 # Divide out normalization where non-tiny x /= np.where(norm > 1e-10, norm, 1.0) # Remove extension points if boundary: x = x[..., nperseg//2:-(nperseg//2)] if input_onesided: x = x.real # Put axes back if x.ndim > 1: if time_axis != Zxx.ndim-1: if freq_axis < time_axis: time_axis -= 1 x = np.rollaxis(x, -1, time_axis) time = np.arange(x.shape[0])/float(fs) return time, x def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, detrend='constant', axis=-1): r""" Estimate the magnitude squared coherence estimate, Cxy, of discrete-time signals X and Y using Welch's method. ``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power spectral density estimates of X and Y, and `Pxy` is the cross spectral density estimate of X and Y. Parameters ---------- x : array_like Time series of measurement values y : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` and `y` time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. nperseg : int, optional Length of each segment. Defaults to None, but if window is str or tuple, is set to 256, and if window is array_like, is set to the length of the window. noverlap: int, optional Number of points to overlap between segments. If `None`, ``noverlap = nperseg // 2``. Defaults to `None`. nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If `None`, the FFT length is `nperseg`. Defaults to `None`. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to 'constant'. axis : int, optional Axis along which the coherence is computed for both inputs; the default is over the last axis (i.e. ``axis=-1``). Returns ------- f : ndarray Array of sample frequencies. Cxy : ndarray Magnitude squared coherence of x and y. See Also -------- periodogram: Simple, optionally modified periodogram lombscargle: Lomb-Scargle periodogram for unevenly sampled data welch: Power spectral density by Welch's method. csd: Cross spectral density by Welch's method. Notes -------- An appropriate amount of overlap will depend on the choice of window and on your requirements. For the default Hann window an overlap of 50% is a reasonable trade off between accurately estimating the signal power, while not over counting any of the data. Narrower windows may require a larger overlap. .. versionadded:: 0.16.0 References ---------- .. [1] P. Welch, "The use of the fast Fourier transform for the estimation of power spectra: A method based on time averaging over short, modified periodograms", IEEE Trans. Audio Electroacoust. vol. 15, pp. 70-73, 1967. .. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of Signals" Prentice Hall, 2005 Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt Generate two test signals with some common features. >>> fs = 10e3 >>> N = 1e5 >>> amp = 20 >>> freq = 1234.0 >>> noise_power = 0.001 * fs / 2 >>> time = np.arange(N) / fs >>> b, a = signal.butter(2, 0.25, 'low') >>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape) >>> y = signal.lfilter(b, a, x) >>> x += amp*np.sin(2*np.pi*freq*time) >>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape) Compute and plot the coherence. >>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024) >>> plt.semilogy(f, Cxy) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('Coherence') >>> plt.show() """ freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend, axis=axis) _, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis) _, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis) Cxy = np.abs(Pxy)**2 / Pxx / Pyy return freqs, Cxy def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='spectrum', axis=-1, mode='psd', boundary=None, padded=False): """ Calculate various forms of windowed FFTs for PSD, CSD, etc. This is a helper function that implements the commonality between the stft, psd, csd, and spectrogram functions. It is not designed to be called externally. The windows are not averaged over; the result from each window is returned. Parameters --------- x : array_like Array or sequence containing the data to be analyzed. y : array_like Array or sequence containing the data to be analyzed. If this is the same object in memory as `x` (i.e. ``_spectral_helper(x, x, ...)``), the extra computations are spared. fs : float, optional Sampling frequency of the time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. nperseg : int, optional Length of each segment. Defaults to None, but if window is str or tuple, is set to 256, and if window is array_like, is set to the length of the window. noverlap : int, optional Number of points to overlap between segments. If `None`, ``noverlap = nperseg // 2``. Defaults to `None`. nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If `None`, the FFT length is `nperseg`. Defaults to `None`. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to 'constant'. return_onesided : bool, optional If `True`, return a one-sided spectrum for real data. If `False` return a two-sided spectrum. Note that for complex data, a two-sided spectrum is always returned. scaling : { 'density', 'spectrum' }, optional Selects between computing the cross spectral density ('density') where `Pxy` has units of V**2/Hz and computing the cross spectrum ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are measured in V and `fs` is measured in Hz. Defaults to 'density' axis : int, optional Axis along which the FFTs are computed; the default is over the last axis (i.e. ``axis=-1``). mode: str {'psd', 'stft'}, optional Defines what kind of return values are expected. Defaults to 'psd'. boundary : str or None, optional Specifies whether the input signal is extended at both ends, and how to generate the new values, in order to center the first windowed segment on the first input point. This has the benefit of enabling reconstruction of the first input point when the employed window function starts at zero. Valid options are ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to `None`. padded : bool, optional Specifies whether the input signal is zero-padded at the end to make the signal fit exactly into an integer number of window segments, so that all of the signal is included in the output. Defaults to `False`. Padding occurs after boundary extension, if `boundary` is not `None`, and `padded` is `True`. Returns ------- freqs : ndarray Array of sample frequencies. t : ndarray Array of times corresponding to each data segment result : ndarray Array of output data, contents dependant on *mode* kwarg. References ---------- .. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?", http://stackoverflow.com/a/6811241 .. [2] Stack Overflow, "Using strides for an efficient moving average filter", http://stackoverflow.com/a/4947453 Notes ----- Adapted from matplotlib.mlab .. versionadded:: 0.16.0 """ if mode not in ['psd', 'stft']: raise ValueError("Unknown value for mode %s, must be one of: " "{'psd', 'stft'}" % mode) boundary_funcs = {'even': even_ext, 'odd': odd_ext, 'constant': const_ext, 'zeros': zero_ext, None: None} if boundary not in boundary_funcs: raise ValueError("Unknown boundary option '{0}', must be one of: {1}" .format(boundary, list(boundary_funcs.keys()))) # If x and y are the same object we can save ourselves some computation. same_data = y is x if not same_data and mode != 'psd': raise ValueError("x and y must be equal if mode is 'stft'") axis = int(axis) # Ensure we have np.arrays, get outdtype x = np.asarray(x) if not same_data: y = np.asarray(y) outdtype = np.result_type(x, y, np.complex64) else: outdtype = np.result_type(x, np.complex64) if not same_data: # Check if we can broadcast the outer axes together xouter = list(x.shape) youter = list(y.shape) xouter.pop(axis) youter.pop(axis) try: outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape except ValueError: raise ValueError('x and y cannot be broadcast together.') if same_data: if x.size == 0: return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape) else: if x.size == 0 or y.size == 0: outshape = outershape + (min([x.shape[axis], y.shape[axis]]),) emptyout = np.rollaxis(np.empty(outshape), -1, axis) return emptyout, emptyout, emptyout if x.ndim > 1: if axis != -1: x = np.rollaxis(x, axis, len(x.shape)) if not same_data and y.ndim > 1: y = np.rollaxis(y, axis, len(y.shape)) # Check if x and y are the same length, zero-pad if neccesary if not same_data: if x.shape[-1] != y.shape[-1]: if x.shape[-1] < y.shape[-1]: pad_shape = list(x.shape) pad_shape[-1] = y.shape[-1] - x.shape[-1] x = np.concatenate((x, np.zeros(pad_shape)), -1) else: pad_shape = list(y.shape) pad_shape[-1] = x.shape[-1] - y.shape[-1] y = np.concatenate((y, np.zeros(pad_shape)), -1) if nperseg is not None: # if specified by user nperseg = int(nperseg) if nperseg < 1: raise ValueError('nperseg must be a positive integer') # parse window; if array like, then set nperseg = win.shape win, nperseg = _triage_segments(window, nperseg,input_length=x.shape[-1]) if nfft is None: nfft = nperseg elif nfft < nperseg: raise ValueError('nfft must be greater than or equal to nperseg.') else: nfft = int(nfft) if noverlap is None: noverlap = nperseg//2 else: noverlap = int(noverlap) if noverlap >= nperseg: raise ValueError('noverlap must be less than nperseg.') nstep = nperseg - noverlap # Padding occurs after boundary extension, so that the extended signal ends # in zeros, instead of introducing an impulse at the end. # I.e. if x = [..., 3, 2] # extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0] # pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3] if boundary is not None: ext_func = boundary_funcs[boundary] x = ext_func(x, nperseg//2, axis=-1) if not same_data: y = ext_func(y, nperseg//2, axis=-1) if padded: # Pad to integer number of windowed segments # I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg zeros_shape = list(x.shape[:-1]) + [nadd] x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1) if not same_data: zeros_shape = list(y.shape[:-1]) + [nadd] y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1) # Handle detrending and window functions if not detrend: def detrend_func(d): return d elif not hasattr(detrend, '__call__'): def detrend_func(d): return signaltools.detrend(d, type=detrend, axis=-1) elif axis != -1: # Wrap this function so that it receives a shape that it could # reasonably expect to receive. def detrend_func(d): d = np.rollaxis(d, -1, axis) d = detrend(d) return np.rollaxis(d, axis, len(d.shape)) else: detrend_func = detrend if np.result_type(win,np.complex64) != outdtype: win = win.astype(outdtype) if scaling == 'density': scale = 1.0 / (fs * (win*win).sum()) elif scaling == 'spectrum': scale = 1.0 / win.sum()**2 else: raise ValueError('Unknown scaling: %r' % scaling) if mode == 'stft': scale = np.sqrt(scale) if return_onesided: if np.iscomplexobj(x): sides = 'twosided' warnings.warn('Input data is complex, switching to ' 'return_onesided=False') else: sides = 'onesided' if not same_data: if np.iscomplexobj(y): sides = 'twosided' warnings.warn('Input data is complex, switching to ' 'return_onesided=False') else: sides = 'twosided' if sides == 'twosided': freqs = fftpack.fftfreq(nfft, 1/fs) elif sides == 'onesided': freqs = np.fft.rfftfreq(nfft, 1/fs) # Perform the windowed FFTs result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides) if not same_data: # All the same operations on the y data result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft, sides) result = np.conjugate(result) * result_y elif mode == 'psd': result = np.conjugate(result) * result result *= scale if sides == 'onesided' and mode == 'psd': if nfft % 2: result[..., 1:] *= 2 else: # Last point is unpaired Nyquist freq point, don't double result[..., 1:-1] *= 2 time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, nperseg - noverlap)/float(fs) if boundary is not None: time -= (nperseg/2) / fs result = result.astype(outdtype) # All imaginary parts are zero anyways if same_data and mode != 'stft': result = result.real # Output is going to have new last axis for time/window index, so a # negative axis index shifts down one if axis < 0: axis -= 1 # Roll frequency axis back to axis where the data came from result = np.rollaxis(result, -1, axis) return freqs, time, result def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides): """ Calculate windowed FFT, for internal use by scipy.signal._spectral_helper This is a helper function that does the main FFT calculation for `_spectral helper`. All input valdiation is performed there, and the data axis is assumed to be the last axis of x. It is not designed to be called externally. The windows are not averaged over; the result from each window is returned. Returns ------- result : ndarray Array of FFT data References ---------- .. [1] Stack Overflow, "Repeat NumPy array without replicating data?", http://stackoverflow.com/a/5568169 Notes ----- Adapted from matplotlib.mlab .. versionadded:: 0.16.0 """ # Created strided array of data segments if nperseg == 1 and noverlap == 0: result = x[..., np.newaxis] else: step = nperseg - noverlap shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg) strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1]) result = np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides) # Detrend each data segment individually result = detrend_func(result) # Apply window by multiplication result = win * result # Perform the fft. Acts on last axis by default. Zero-pads automatically if sides == 'twosided': func = fftpack.fft else: result = result.real func = np.fft.rfft result = func(result, n=nfft) return result def _triage_segments(window, nperseg,input_length): """ Parses window and nperseg arguments for spectrogram and _spectral_helper. This is a helper function, not meant to be called externally. Parameters --------- window : string, tuple, or ndarray If window is specified by a string or tuple and nperseg is not specified, nperseg is set to the default of 256 and returns a window of that length. If instead the window is array_like and nperseg is not specified, then nperseg is set to the length of the window. A ValueError is raised if the user supplies both an array_like window and a value for nperseg but nperseg does not equal the length of the window. nperseg : int Length of each segment input_length: int Length of input signal, i.e. x.shape[-1]. Used to test for errors. Returns ------- win : ndarray window. If function was called with string or tuple than this will hold the actual array used as a window. nperseg : int Length of each segment. If window is str or tuple, nperseg is set to 256. If window is array_like, nperseg is set to the length of the 6 window. """ #parse window; if array like, then set nperseg = win.shape if isinstance(window, string_types) or isinstance(window, tuple): # if nperseg not specified if nperseg is None: nperseg = 256 # then change to default if nperseg > input_length: warnings.warn('nperseg = {0:d} is greater than input length ' ' = {1:d}, using nperseg = {1:d}' .format(nperseg, input_length)) nperseg = input_length win = get_window(window, nperseg) else: win = np.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if input_length < win.shape[-1]: raise ValueError('window is longer than input signal') if nperseg is None: nperseg = win.shape[0] elif nperseg is not None: if nperseg != win.shape[0]: raise ValueError("value specified for nperseg is different from" " length of window") return win, nperseg
codeparrot/github-code-clean
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message import operator from sqlalchemy import * from sqlalchemy import exc as sa_exc, util from sqlalchemy.sql import compiler, table, column from sqlalchemy.engine import default from sqlalchemy.orm import * from sqlalchemy.orm import attributes from sqlalchemy.testing import eq_ import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.testing import AssertsCompiledSQL, engines from sqlalchemy.testing.schema import Column from test.orm import _fixtures from sqlalchemy.testing import fixtures from sqlalchemy.orm.util import join, outerjoin, with_parent class QueryTest(_fixtures.FixtureTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def setup_mappers(cls): Node, composite_pk_table, users, Keyword, items, Dingaling, \ order_items, item_keywords, Item, User, dingalings, \ Address, keywords, CompositePk, nodes, Order, orders, \ addresses = cls.classes.Node, \ cls.tables.composite_pk_table, cls.tables.users, \ cls.classes.Keyword, cls.tables.items, \ cls.classes.Dingaling, cls.tables.order_items, \ cls.tables.item_keywords, cls.classes.Item, \ cls.classes.User, cls.tables.dingalings, \ cls.classes.Address, cls.tables.keywords, \ cls.classes.CompositePk, cls.tables.nodes, \ cls.classes.Order, cls.tables.orders, cls.tables.addresses mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', order_by=addresses.c.id), 'orders':relationship(Order, backref='user', order_by=orders.c.id), # o2m, m2o }) mapper(Address, addresses, properties={ 'dingaling':relationship(Dingaling, uselist=False, backref="address") #o2o }) mapper(Dingaling, dingalings) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m 'address':relationship(Address), # m2o }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords) #m2m }) mapper(Keyword, keywords) mapper(Node, nodes, properties={ 'children':relationship(Node, backref=backref('parent', remote_side=[nodes.c.id]) ) }) mapper(CompositePk, composite_pk_table) configure_mappers() class InheritedJoinTest(fixtures.MappedTest, AssertsCompiledSQL): run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): Table('companies', metadata, Column('company_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50))) Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('company_id', Integer, ForeignKey('companies.company_id')), Column('name', String(50)), Column('type', String(30))) Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30)), Column('engineer_name', String(50)), Column('primary_language', String(50)), ) Table('machines', metadata, Column('machine_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('engineer_id', Integer, ForeignKey('engineers.person_id'))) Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30)), Column('manager_name', String(50)) ) Table('boss', metadata, Column('boss_id', Integer, ForeignKey('managers.person_id'), primary_key=True), Column('golf_swing', String(30)), ) Table('paperwork', metadata, Column('paperwork_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('description', String(50)), Column('person_id', Integer, ForeignKey('people.person_id'))) @classmethod def setup_classes(cls): paperwork, people, companies, boss, managers, machines, engineers = (cls.tables.paperwork, cls.tables.people, cls.tables.companies, cls.tables.boss, cls.tables.managers, cls.tables.machines, cls.tables.engineers) class Company(cls.Comparable): pass class Person(cls.Comparable): pass class Engineer(Person): pass class Manager(Person): pass class Boss(Manager): pass class Machine(cls.Comparable): pass class Paperwork(cls.Comparable): pass mapper(Company, companies, properties={ 'employees':relationship(Person, order_by=people.c.person_id) }) mapper(Machine, machines) mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person', order_by=people.c.person_id, properties={ 'paperwork':relationship(Paperwork, order_by=paperwork.c.paperwork_id) }) mapper(Engineer, engineers, inherits=Person, polymorphic_identity='engineer', properties={ 'machines':relationship(Machine, order_by=machines.c.machine_id) }) mapper(Manager, managers, inherits=Person, polymorphic_identity='manager') mapper(Boss, boss, inherits=Manager, polymorphic_identity='boss') mapper(Paperwork, paperwork) def test_single_prop(self): Company = self.classes.Company sess = create_session() self.assert_compile( sess.query(Company).join(Company.employees), "SELECT companies.company_id AS companies_company_id, companies.name AS companies_name " "FROM companies JOIN people ON companies.company_id = people.company_id" , use_default_dialect = True ) def test_force_via_select_from(self): Company, Engineer = self.classes.Company, self.classes.Engineer sess = create_session() self.assert_compile( sess.query(Company).\ filter(Company.company_id==Engineer.company_id).\ filter(Engineer.primary_language=='java'), "SELECT companies.company_id AS companies_company_id, companies.name AS companies_name " "FROM companies, people, engineers " "WHERE companies.company_id = people.company_id AND engineers.primary_language " "= :primary_language_1", use_default_dialect=True ) self.assert_compile( sess.query(Company).select_from(Company, Engineer).\ filter(Company.company_id==Engineer.company_id).\ filter(Engineer.primary_language=='java'), "SELECT companies.company_id AS companies_company_id, companies.name AS companies_name " "FROM companies, people JOIN engineers ON people.person_id = engineers.person_id " "WHERE companies.company_id = people.company_id AND engineers.primary_language =" " :primary_language_1", use_default_dialect=True ) def test_single_prop_of_type(self): Company, Engineer = self.classes.Company, self.classes.Engineer sess = create_session() self.assert_compile( sess.query(Company).join(Company.employees.of_type(Engineer)), "SELECT companies.company_id AS companies_company_id, " "companies.name AS companies_name " "FROM companies JOIN " "(people JOIN engineers ON people.person_id = engineers.person_id) " "ON companies.company_id = people.company_id" , use_default_dialect = True ) def test_prop_with_polymorphic_1(self): Person, Manager, Paperwork = (self.classes.Person, self.classes.Manager, self.classes.Paperwork) sess = create_session() self.assert_compile( sess.query(Person).with_polymorphic(Manager). join('paperwork').filter(Paperwork.description.like('%review%')), "SELECT people.person_id AS people_person_id, people.company_id AS" " people_company_id, " "people.name AS people_name, people.type AS people_type, managers.person_id " "AS managers_person_id, " "managers.status AS managers_status, managers.manager_name AS " "managers_manager_name FROM people " "LEFT OUTER JOIN managers ON people.person_id = managers.person_id JOIN " "paperwork ON people.person_id = " "paperwork.person_id WHERE paperwork.description LIKE :description_1 " "ORDER BY people.person_id" , use_default_dialect=True ) def test_prop_with_polymorphic_2(self): Person, Manager, Paperwork = (self.classes.Person, self.classes.Manager, self.classes.Paperwork) sess = create_session() self.assert_compile( sess.query(Person).with_polymorphic(Manager). join('paperwork', aliased=True). filter(Paperwork.description.like('%review%')), "SELECT people.person_id AS people_person_id, people.company_id AS people_company_id, " "people.name AS people_name, people.type AS people_type, managers.person_id " "AS managers_person_id, " "managers.status AS managers_status, managers.manager_name AS managers_manager_name " "FROM people LEFT OUTER JOIN managers ON people.person_id = managers.person_id JOIN " "paperwork AS paperwork_1 ON people.person_id = paperwork_1.person_id " "WHERE paperwork_1.description LIKE :description_1 ORDER BY people.person_id" , use_default_dialect=True ) def test_explicit_polymorphic_join_one(self): Company, Engineer = self.classes.Company, self.classes.Engineer sess = create_session() self.assert_compile( sess.query(Company).join(Engineer).filter(Engineer.engineer_name=='vlad'), "SELECT companies.company_id AS companies_company_id, companies.name AS " "companies_name " "FROM companies JOIN (people JOIN engineers " "ON people.person_id = engineers.person_id) " "ON " "companies.company_id = people.company_id " "WHERE engineers.engineer_name = :engineer_name_1" , use_default_dialect=True ) def test_explicit_polymorphic_join_two(self): Company, Engineer = self.classes.Company, self.classes.Engineer sess = create_session() self.assert_compile( sess.query(Company).join(Engineer, Company.company_id==Engineer.company_id). filter(Engineer.engineer_name=='vlad'), "SELECT companies.company_id AS companies_company_id, companies.name " "AS companies_name " "FROM companies JOIN " "(people JOIN engineers ON people.person_id = engineers.person_id) " "ON " "companies.company_id = people.company_id " "WHERE engineers.engineer_name = :engineer_name_1" , use_default_dialect=True ) def test_multiple_adaption(self): """test that multiple filter() adapters get chained together " and work correctly within a multiple-entry join().""" people, Company, Machine, engineers, machines, Engineer = (self.tables.people, self.classes.Company, self.classes.Machine, self.tables.engineers, self.tables.machines, self.classes.Engineer) sess = create_session() self.assert_compile( sess.query(Company).join(people.join(engineers), Company.employees). filter(Engineer.name=='dilbert'), "SELECT companies.company_id AS companies_company_id, companies.name AS " "companies_name " "FROM companies JOIN (people " "JOIN engineers ON people.person_id = " "engineers.person_id) ON companies.company_id = " "people.company_id WHERE people.name = :name_1" , use_default_dialect = True ) mach_alias = machines.select() self.assert_compile( sess.query(Company).join(people.join(engineers), Company.employees). join(mach_alias, Engineer.machines, from_joinpoint=True). filter(Engineer.name=='dilbert').filter(Machine.name=='foo'), "SELECT companies.company_id AS companies_company_id, companies.name AS " "companies_name " "FROM companies JOIN (people " "JOIN engineers ON people.person_id = " "engineers.person_id) ON companies.company_id = " "people.company_id JOIN " "(SELECT machines.machine_id AS machine_id, machines.name AS name, " "machines.engineer_id AS engineer_id " "FROM machines) AS anon_1 ON engineers.person_id = anon_1.engineer_id " "WHERE people.name = :name_1 AND anon_1.name = :name_2" , use_default_dialect = True ) class JoinTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_single_name(self): User = self.classes.User sess = create_session() self.assert_compile( sess.query(User).join("orders"), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id" ) assert_raises( sa_exc.InvalidRequestError, sess.query(User).join, "user", ) self.assert_compile( sess.query(User).join("orders", "items"), "SELECT users.id AS users_id, users.name AS users_name FROM users " "JOIN orders ON users.id = orders.user_id JOIN order_items AS order_items_1 " "ON orders.id = order_items_1.order_id JOIN items ON items.id = order_items_1.item_id" ) # test overlapping paths. User->orders is used by both joins, but rendered once. self.assert_compile( sess.query(User).join("orders", "items").join("orders", "address"), "SELECT users.id AS users_id, users.name AS users_name FROM users JOIN orders " "ON users.id = orders.user_id JOIN order_items AS order_items_1 ON orders.id = " "order_items_1.order_id JOIN items ON items.id = order_items_1.item_id JOIN addresses " "ON addresses.id = orders.address_id" ) def test_join_on_synonym(self): class User(object): pass class Address(object): pass users, addresses = (self.tables.users, self.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address), 'ad_syn':synonym("addresses") }) mapper(Address, addresses) self.assert_compile( Session().query(User).join(User.ad_syn), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN addresses ON users.id = addresses.user_id" ) def test_multi_tuple_form(self): """test the 'tuple' form of join, now superseded by the two-element join() form. Not deprecating this style as of yet. """ Item, Order, User = (self.classes.Item, self.classes.Order, self.classes.User) sess = create_session() #assert_raises( # sa.exc.SADeprecationWarning, # sess.query(User).join, (Order, User.id==Order.user_id) #) self.assert_compile( sess.query(User).join((Order, User.id == Order.user_id)), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id", ) self.assert_compile( sess.query(User).join( (Order, User.id == Order.user_id), (Item, Order.items)), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id " "JOIN order_items AS order_items_1 ON orders.id = " "order_items_1.order_id JOIN items ON items.id = " "order_items_1.item_id", ) # the old "backwards" form self.assert_compile( sess.query(User).join(("orders", Order)), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id", ) def test_single_prop_1(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() self.assert_compile( sess.query(User).join(User.orders), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id" ) def test_single_prop_2(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() self.assert_compile( sess.query(User).join(Order.user), "SELECT users.id AS users_id, users.name AS users_name " "FROM orders JOIN users ON users.id = orders.user_id" ) def test_single_prop_3(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() oalias1 = aliased(Order) self.assert_compile( sess.query(User).join(oalias1.user), "SELECT users.id AS users_id, users.name AS users_name " "FROM orders AS orders_1 JOIN users ON users.id = orders_1.user_id" ) def test_single_prop_4(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() oalias1 = aliased(Order) oalias2 = aliased(Order) # another nonsensical query. (from [ticket:1537]). # in this case, the contract of "left to right" is honored self.assert_compile( sess.query(User).join(oalias1.user).join(oalias2.user), "SELECT users.id AS users_id, users.name AS users_name " "FROM orders AS orders_1 JOIN users ON users.id = orders_1.user_id, " "orders AS orders_2 JOIN users ON users.id = orders_2.user_id" ) def test_single_prop_5(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() self.assert_compile( sess.query(User).join(User.orders, Order.items), "SELECT users.id AS users_id, users.name AS users_name FROM users " "JOIN orders ON users.id = orders.user_id JOIN order_items AS order_items_1 " "ON orders.id = order_items_1.order_id JOIN items ON items.id = order_items_1.item_id" ) def test_single_prop_6(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() ualias = aliased(User) self.assert_compile( sess.query(ualias).join(ualias.orders), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM users AS users_1 JOIN orders ON users_1.id = orders.user_id" ) def test_single_prop_7(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() # this query is somewhat nonsensical. the old system didn't render a correct # query for this. In this case its the most faithful to what was asked - # there's no linkage between User.orders and "oalias", so two FROM elements # are generated. oalias = aliased(Order) self.assert_compile( sess.query(User).join(User.orders, oalias.items), "SELECT users.id AS users_id, users.name AS users_name FROM users " "JOIN orders ON users.id = orders.user_id, " "orders AS orders_1 JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id " "JOIN items ON items.id = order_items_1.item_id" ) def test_single_prop_8(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() # same as before using an aliased() for User as well ualias = aliased(User) oalias = aliased(Order) self.assert_compile( sess.query(ualias).join(ualias.orders, oalias.items), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM users AS users_1 " "JOIN orders ON users_1.id = orders.user_id, " "orders AS orders_1 JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id " "JOIN items ON items.id = order_items_1.item_id" ) def test_single_prop_9(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() self.assert_compile( sess.query(User).filter(User.name == 'ed').from_self(). join(User.orders), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name " "FROM (SELECT users.id AS users_id, users.name AS users_name " "FROM users " "WHERE users.name = :name_1) AS anon_1 JOIN orders ON anon_1.users_id = orders.user_id" ) def test_single_prop_10(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() self.assert_compile( sess.query(User).join(User.addresses, aliased=True). filter(Address.email_address == 'foo'), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id " "WHERE addresses_1.email_address = :email_address_1" ) def test_single_prop_11(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() self.assert_compile( sess.query(User).join(User.orders, Order.items, aliased=True). filter(Item.id == 10), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders AS orders_1 ON users.id = orders_1.user_id " "JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id " "JOIN items AS items_1 ON items_1.id = order_items_1.item_id " "WHERE items_1.id = :id_1" ) def test_single_prop_12(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() oalias1 = aliased(Order) # test #1 for [ticket:1706] ualias = aliased(User) self.assert_compile( sess.query(ualias). join(oalias1, ualias.orders).\ join(Address, ualias.addresses), "SELECT users_1.id AS users_1_id, users_1.name AS " "users_1_name FROM users AS users_1 JOIN orders AS orders_1 " "ON users_1.id = orders_1.user_id JOIN addresses ON users_1.id " "= addresses.user_id" ) def test_single_prop_13(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() # test #2 for [ticket:1706] ualias = aliased(User) ualias2 = aliased(User) self.assert_compile( sess.query(ualias). join(Address, ualias.addresses). join(ualias2, Address.user). join(Order, ualias.orders), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM users " "AS users_1 JOIN addresses ON users_1.id = addresses.user_id JOIN users AS users_2 " "ON users_2.id = addresses.user_id JOIN orders ON users_1.id = orders.user_id" ) def test_overlapping_paths(self): User = self.classes.User for aliased in (True,False): # load a user who has an order that contains item id 3 and address id 1 (order 3, owned by jack) result = create_session().query(User).join('orders', 'items', aliased=aliased).\ filter_by(id=3).join('orders','address', aliased=aliased).filter_by(id=1).all() assert [User(id=7, name='jack')] == result def test_overlapping_paths_multilevel(self): User = self.classes.User s = Session() q = s.query(User).\ join('orders').\ join('addresses').\ join('orders', 'items').\ join('addresses', 'dingaling') self.assert_compile( q, "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id " "JOIN addresses ON users.id = addresses.user_id " "JOIN order_items AS order_items_1 ON orders.id = " "order_items_1.order_id " "JOIN items ON items.id = order_items_1.item_id " "JOIN dingalings ON addresses.id = dingalings.address_id" ) def test_overlapping_paths_outerjoin(self): User = self.classes.User result = create_session().query(User).outerjoin('orders', 'items').\ filter_by(id=3).outerjoin('orders','address').filter_by(id=1).all() assert [User(id=7, name='jack')] == result def test_from_joinpoint(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = create_session() for oalias,ialias in [(True, True), (False, False), (True, False), (False, True)]: eq_( sess.query(User).join('orders', aliased=oalias).\ join('items', from_joinpoint=True, aliased=ialias).\ filter(Item.description == 'item 4').all(), [User(name='jack')] ) # use middle criterion eq_( sess.query(User).join('orders', aliased=oalias).\ filter(Order.user_id==9).\ join('items', from_joinpoint=True, aliased=ialias).\ filter(Item.description=='item 4').all(), [] ) orderalias = aliased(Order) itemalias = aliased(Item) eq_( sess.query(User).join(orderalias, 'orders'). join(itemalias, 'items', from_joinpoint=True). filter(itemalias.description == 'item 4').all(), [User(name='jack')] ) eq_( sess.query(User).join(orderalias, 'orders'). join(itemalias, 'items', from_joinpoint=True). filter(orderalias.user_id==9).\ filter(itemalias.description=='item 4').all(), [] ) def test_join_nonmapped_column(self): """test that the search for a 'left' doesn't trip on non-mapped cols""" Order, User = self.classes.Order, self.classes.User sess = create_session() # intentionally join() with a non-existent "left" side self.assert_compile( sess.query(User.id, literal_column('foo')).join(Order.user), "SELECT users.id AS users_id, foo FROM " "orders JOIN users ON users.id = orders.user_id" ) def test_backwards_join(self): User, Address = self.classes.User, self.classes.Address # a more controversial feature. join from # User->Address, but the onclause is Address.user. sess = create_session() eq_( sess.query(User).join(Address.user).\ filter(Address.email_address=='ed@wood.com').all(), [User(id=8,name='ed')] ) # its actually not so controversial if you view it in terms # of multiple entities. eq_( sess.query(User, Address).join(Address.user).filter(Address.email_address=='ed@wood.com').all(), [(User(id=8,name='ed'), Address(email_address='ed@wood.com'))] ) # this was the controversial part. now, raise an error if the feature is abused. # before the error raise was added, this would silently work..... assert_raises( sa_exc.InvalidRequestError, sess.query(User).join, Address, Address.user, ) # but this one would silently fail adalias = aliased(Address) assert_raises( sa_exc.InvalidRequestError, sess.query(User).join, adalias, Address.user, ) def test_multiple_with_aliases(self): Order, User = self.classes.Order, self.classes.User sess = create_session() ualias = aliased(User) oalias1 = aliased(Order) oalias2 = aliased(Order) self.assert_compile( sess.query(ualias).join(oalias1, ualias.orders). join(oalias2, ualias.orders). filter(or_(oalias1.user_id==9, oalias2.user_id==7)), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM users AS users_1 " "JOIN orders AS orders_1 ON users_1.id = orders_1.user_id JOIN orders AS orders_2 ON " "users_1.id = orders_2.user_id WHERE orders_1.user_id = :user_id_1 OR orders_2.user_id = :user_id_2", use_default_dialect=True ) def test_select_from_orm_joins(self): User, Order = self.classes.User, self.classes.Order sess = create_session() ualias = aliased(User) oalias1 = aliased(Order) oalias2 = aliased(Order) self.assert_compile( join(User, oalias2, User.id==oalias2.user_id), "users JOIN orders AS orders_1 ON users.id = orders_1.user_id", use_default_dialect=True ) self.assert_compile( join(ualias, oalias1, ualias.orders), "users AS users_1 JOIN orders AS orders_1 ON users_1.id = orders_1.user_id", use_default_dialect=True ) self.assert_compile( sess.query(ualias).select_from(join(ualias, oalias1, ualias.orders)), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM users AS users_1 " "JOIN orders AS orders_1 ON users_1.id = orders_1.user_id", use_default_dialect=True ) self.assert_compile( sess.query(User, ualias).select_from(join(ualias, oalias1, ualias.orders)), "SELECT users.id AS users_id, users.name AS users_name, users_1.id AS users_1_id, " "users_1.name AS users_1_name FROM users, users AS users_1 JOIN orders AS orders_1 ON users_1.id = orders_1.user_id", use_default_dialect=True ) # this fails (and we cant quite fix right now). if False: self.assert_compile( sess.query(User, ualias).\ join(oalias1, ualias.orders).\ join(oalias2, User.id==oalias2.user_id).\ filter(or_(oalias1.user_id==9, oalias2.user_id==7)), "SELECT users.id AS users_id, users.name AS users_name, users_1.id AS users_1_id, users_1.name AS " "users_1_name FROM users JOIN orders AS orders_2 ON users.id = orders_2.user_id, " "users AS users_1 JOIN orders AS orders_1 ON users_1.id = orders_1.user_id " "WHERE orders_1.user_id = :user_id_1 OR orders_2.user_id = :user_id_2", use_default_dialect=True ) # this is the same thing using explicit orm.join() (which now offers multiple again) self.assert_compile( sess.query(User, ualias).\ select_from( join(ualias, oalias1, ualias.orders), join(User, oalias2, User.id==oalias2.user_id), ).\ filter(or_(oalias1.user_id==9, oalias2.user_id==7)), "SELECT users.id AS users_id, users.name AS users_name, users_1.id AS users_1_id, users_1.name AS " "users_1_name FROM users AS users_1 JOIN orders AS orders_1 ON users_1.id = orders_1.user_id, " "users JOIN orders AS orders_2 ON users.id = orders_2.user_id " "WHERE orders_1.user_id = :user_id_1 OR orders_2.user_id = :user_id_2", use_default_dialect=True ) def test_overlapping_backwards_joins(self): User, Order = self.classes.User, self.classes.Order sess = create_session() oalias1 = aliased(Order) oalias2 = aliased(Order) # this is invalid SQL - joins from orders_1/orders_2 to User twice. # but that is what was asked for so they get it ! self.assert_compile( sess.query(User).join(oalias1.user).join(oalias2.user), "SELECT users.id AS users_id, users.name AS users_name FROM orders AS orders_1 " "JOIN users ON users.id = orders_1.user_id, orders AS orders_2 JOIN users ON users.id = orders_2.user_id", use_default_dialect=True, ) def test_replace_multiple_from_clause(self): """test adding joins onto multiple FROM clauses""" User, Order, Address = (self.classes.User, self.classes.Order, self.classes.Address) sess = create_session() self.assert_compile( sess.query(Address, User).join(Address.dingaling).join(User.orders, Order.items), "SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, " "addresses.email_address AS addresses_email_address, users.id AS users_id, " "users.name AS users_name FROM addresses JOIN dingalings ON addresses.id = dingalings.address_id, " "users JOIN orders ON users.id = orders.user_id JOIN order_items AS order_items_1 " "ON orders.id = order_items_1.order_id JOIN items ON items.id = order_items_1.item_id", use_default_dialect = True ) def test_multiple_adaption(self): Item, Order, User = (self.classes.Item, self.classes.Order, self.classes.User) sess = create_session() self.assert_compile( sess.query(User).join(User.orders, Order.items, aliased=True).filter(Order.id==7).filter(Item.id==8), "SELECT users.id AS users_id, users.name AS users_name FROM users JOIN orders AS orders_1 " "ON users.id = orders_1.user_id JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id " "JOIN items AS items_1 ON items_1.id = order_items_1.item_id WHERE orders_1.id = :id_1 AND items_1.id = :id_2", use_default_dialect=True ) def test_onclause_conditional_adaption(self): Item, Order, orders, order_items, User = (self.classes.Item, self.classes.Order, self.tables.orders, self.tables.order_items, self.classes.User) sess = create_session() # this is now a very weird test, nobody should really # be using the aliased flag in this way. self.assert_compile( sess.query(User).join(User.orders, aliased=True). join(Item, and_(Order.id==order_items.c.order_id, order_items.c.item_id==Item.id), from_joinpoint=True, aliased=True ), "SELECT users.id AS users_id, users.name AS users_name FROM users JOIN " "orders AS orders_1 ON users.id = orders_1.user_id JOIN items AS items_1 " "ON orders_1.id = order_items.order_id AND order_items.item_id = items_1.id", use_default_dialect=True ) oalias = orders.select() self.assert_compile( sess.query(User).join(oalias, User.orders). join(Item, and_(Order.id==order_items.c.order_id, order_items.c.item_id==Item.id), from_joinpoint=True ), "SELECT users.id AS users_id, users.name AS users_name FROM users JOIN " "(SELECT orders.id AS id, orders.user_id AS user_id, orders.address_id AS address_id, orders.description " "AS description, orders.isopen AS isopen FROM orders) AS anon_1 ON users.id = anon_1.user_id JOIN items " "ON anon_1.id = order_items.order_id AND order_items.item_id = items.id", use_default_dialect=True ) # query.join(<stuff>, aliased=True).join(target, sql_expression) # or: query.join(path_to_some_joined_table_mapper).join(target, sql_expression) def test_pure_expression_error(self): addresses, users = self.tables.addresses, self.tables.users sess = create_session() self.assert_compile( sess.query(users).join(addresses), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN addresses ON users.id = addresses.user_id" ) def test_orderby_arg_bug(self): User, users, Order = (self.classes.User, self.tables.users, self.classes.Order) sess = create_session() # no arg error result = sess.query(User).join('orders', aliased=True).order_by(Order.id).reset_joinpoint().order_by(users.c.id).all() def test_no_onclause(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = create_session() eq_( sess.query(User).select_from(join(User, Order).join(Item, Order.items)).filter(Item.description == 'item 4').all(), [User(name='jack')] ) eq_( sess.query(User.name).select_from(join(User, Order).join(Item, Order.items)).filter(Item.description == 'item 4').all(), [('jack',)] ) eq_( sess.query(User).join(Order).join(Item, Order.items) .filter(Item.description == 'item 4').all(), [User(name='jack')] ) def test_clause_onclause(self): Item, Order, users, order_items, User = (self.classes.Item, self.classes.Order, self.tables.users, self.tables.order_items, self.classes.User) sess = create_session() eq_( sess.query(User).join(Order, User.id==Order.user_id). join(order_items, Order.id==order_items.c.order_id). join(Item, order_items.c.item_id==Item.id). filter(Item.description == 'item 4').all(), [User(name='jack')] ) eq_( sess.query(User.name).join(Order, User.id==Order.user_id). join(order_items, Order.id==order_items.c.order_id). join(Item, order_items.c.item_id==Item.id). filter(Item.description == 'item 4').all(), [('jack',)] ) ualias = aliased(User) eq_( sess.query(ualias.name).join(Order, ualias.id==Order.user_id). join(order_items, Order.id==order_items.c.order_id). join(Item, order_items.c.item_id==Item.id). filter(Item.description == 'item 4').all(), [('jack',)] ) # explicit onclause with from_self(), means # the onclause must be aliased against the query's custom # FROM object eq_( sess.query(User).order_by(User.id).offset(2). from_self(). join(Order, User.id==Order.user_id). all(), [User(name='fred')] ) # same with an explicit select_from() eq_( sess.query(User).select_entity_from(select([users]). order_by(User.id).offset(2).alias()). join(Order, User.id==Order.user_id). all(), [User(name='fred')] ) def test_aliased_classes(self): User, Address = self.classes.User, self.classes.Address sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() (address1, address2, address3, address4, address5) = sess.query(Address).all() expected = [(user7, address1), (user8, address2), (user8, address3), (user8, address4), (user9, address5), (user10, None)] q = sess.query(User) AdAlias = aliased(Address) q = q.add_entity(AdAlias).select_from(outerjoin(User, AdAlias)) l = q.order_by(User.id, AdAlias.id).all() eq_(l, expected) sess.expunge_all() q = sess.query(User).add_entity(AdAlias) l = q.select_from(outerjoin(User, AdAlias)).filter(AdAlias.email_address=='ed@bettyboop.com').all() eq_(l, [(user8, address3)]) l = q.select_from(outerjoin(User, AdAlias, 'addresses')).filter(AdAlias.email_address=='ed@bettyboop.com').all() eq_(l, [(user8, address3)]) l = q.select_from(outerjoin(User, AdAlias, User.id==AdAlias.user_id)).filter(AdAlias.email_address=='ed@bettyboop.com').all() eq_(l, [(user8, address3)]) # this is the first test where we are joining "backwards" - from AdAlias to User even though # the query is against User q = sess.query(User, AdAlias) l = q.join(AdAlias.user).filter(User.name=='ed').order_by(User.id, AdAlias.id) eq_(l.all(), [(user8, address2),(user8, address3),(user8, address4),]) q = sess.query(User, AdAlias).select_from(join(AdAlias, User, AdAlias.user)).filter(User.name=='ed') eq_(l.all(), [(user8, address2),(user8, address3),(user8, address4),]) def test_expression_onclauses(self): Order, User = self.classes.Order, self.classes.User sess = create_session() subq = sess.query(User).subquery() self.assert_compile( sess.query(User).join(subq, User.name==subq.c.name), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN (SELECT users.id AS id, users.name " "AS name FROM users) AS anon_1 ON users.name = anon_1.name", use_default_dialect=True ) subq = sess.query(Order).subquery() self.assert_compile( sess.query(User).join(subq, User.id==subq.c.user_id), "SELECT users.id AS users_id, users.name AS users_name FROM " "users JOIN (SELECT orders.id AS id, orders.user_id AS user_id, " "orders.address_id AS address_id, orders.description AS " "description, orders.isopen AS isopen FROM orders) AS " "anon_1 ON users.id = anon_1.user_id", use_default_dialect=True ) self.assert_compile( sess.query(User).join(Order, User.id==Order.user_id), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id", use_default_dialect=True ) def test_implicit_joins_from_aliases(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = create_session() OrderAlias = aliased(Order) eq_( sess.query(OrderAlias).join('items').filter_by(description='item 3').\ order_by(OrderAlias.id).all(), [ Order(address_id=1,description='order 1',isopen=0,user_id=7,id=1), Order(address_id=4,description='order 2',isopen=0,user_id=9,id=2), Order(address_id=1,description='order 3',isopen=1,user_id=7,id=3) ] ) eq_( sess.query(User, OrderAlias, Item.description). join(OrderAlias, 'orders'). join('items', from_joinpoint=True). filter_by(description='item 3').\ order_by(User.id, OrderAlias.id).all(), [ (User(name='jack',id=7), Order(address_id=1,description='order 1',isopen=0,user_id=7,id=1), 'item 3'), (User(name='jack',id=7), Order(address_id=1,description='order 3',isopen=1,user_id=7,id=3), 'item 3'), (User(name='fred',id=9), Order(address_id=4,description='order 2',isopen=0,user_id=9,id=2), 'item 3') ] ) def test_aliased_classes_m2m(self): Item, Order = self.classes.Item, self.classes.Order sess = create_session() (order1, order2, order3, order4, order5) = sess.query(Order).all() (item1, item2, item3, item4, item5) = sess.query(Item).all() expected = [ (order1, item1), (order1, item2), (order1, item3), (order2, item1), (order2, item2), (order2, item3), (order3, item3), (order3, item4), (order3, item5), (order4, item1), (order4, item5), (order5, item5), ] q = sess.query(Order) q = q.add_entity(Item).select_from(join(Order, Item, 'items')).order_by(Order.id, Item.id) l = q.all() eq_(l, expected) IAlias = aliased(Item) q = sess.query(Order, IAlias).select_from(join(Order, IAlias, 'items')).filter(IAlias.description=='item 3') l = q.all() eq_(l, [ (order1, item3), (order2, item3), (order3, item3), ] ) def test_joins_from_adapted_entities(self): User = self.classes.User # test for #1853 session = create_session() first = session.query(User) second = session.query(User) unioned = first.union(second) subquery = session.query(User.id).subquery() join = subquery, subquery.c.id == User.id joined = unioned.outerjoin(*join) self.assert_compile(joined, 'SELECT anon_1.users_id AS ' 'anon_1_users_id, anon_1.users_name AS ' 'anon_1_users_name FROM (SELECT users.id ' 'AS users_id, users.name AS users_name ' 'FROM users UNION SELECT users.id AS ' 'users_id, users.name AS users_name FROM ' 'users) AS anon_1 LEFT OUTER JOIN (SELECT ' 'users.id AS id FROM users) AS anon_2 ON ' 'anon_2.id = anon_1.users_id', use_default_dialect=True) first = session.query(User.id) second = session.query(User.id) unioned = first.union(second) subquery = session.query(User.id).subquery() join = subquery, subquery.c.id == User.id joined = unioned.outerjoin(*join) self.assert_compile(joined, 'SELECT anon_1.users_id AS anon_1_users_id ' 'FROM (SELECT users.id AS users_id FROM ' 'users UNION SELECT users.id AS users_id ' 'FROM users) AS anon_1 LEFT OUTER JOIN ' '(SELECT users.id AS id FROM users) AS ' 'anon_2 ON anon_2.id = anon_1.users_id', use_default_dialect=True) def test_reset_joinpoint(self): User = self.classes.User for aliased in (True, False): # load a user who has an order that contains item id 3 and address id 1 (order 3, owned by jack) result = create_session().query(User).join('orders', 'items', aliased=aliased).filter_by(id=3).reset_joinpoint().join('orders','address', aliased=aliased).filter_by(id=1).all() assert [User(id=7, name='jack')] == result result = create_session().query(User).outerjoin('orders', 'items', aliased=aliased).filter_by(id=3).reset_joinpoint().outerjoin('orders','address', aliased=aliased).filter_by(id=1).all() assert [User(id=7, name='jack')] == result def test_overlap_with_aliases(self): orders, User, users = (self.tables.orders, self.classes.User, self.tables.users) oalias = orders.alias('oalias') result = create_session().query(User).select_from(users.join(oalias)).filter(oalias.c.description.in_(["order 1", "order 2", "order 3"])).join('orders', 'items').order_by(User.id).all() assert [User(id=7, name='jack'), User(id=9, name='fred')] == result result = create_session().query(User).select_from(users.join(oalias)).filter(oalias.c.description.in_(["order 1", "order 2", "order 3"])).join('orders', 'items').filter_by(id=4).all() assert [User(id=7, name='jack')] == result def test_aliased(self): """test automatic generation of aliased joins.""" Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() # test a basic aliasized path q = sess.query(User).join('addresses', aliased=True).filter_by(email_address='jack@bean.com') assert [User(id=7)] == q.all() q = sess.query(User).join('addresses', aliased=True).filter(Address.email_address=='jack@bean.com') assert [User(id=7)] == q.all() q = sess.query(User).join('addresses', aliased=True).filter(or_(Address.email_address=='jack@bean.com', Address.email_address=='fred@fred.com')) assert [User(id=7), User(id=9)] == q.all() # test two aliasized paths, one to 'orders' and the other to 'orders','items'. # one row is returned because user 7 has order 3 and also has order 1 which has item 1 # this tests a o2m join and a m2m join. q = sess.query(User).join('orders', aliased=True).filter(Order.description=="order 3").join('orders', 'items', aliased=True).filter(Item.description=="item 1") assert q.count() == 1 assert [User(id=7)] == q.all() # test the control version - same joins but not aliased. rows are not returned because order 3 does not have item 1 q = sess.query(User).join('orders').filter(Order.description=="order 3").join('orders', 'items').filter(Item.description=="item 1") assert [] == q.all() assert q.count() == 0 # the left half of the join condition of the any() is aliased. q = sess.query(User).join('orders', aliased=True).filter(Order.items.any(Item.description=='item 4')) assert [User(id=7)] == q.all() # test that aliasing gets reset when join() is called q = sess.query(User).join('orders', aliased=True).filter(Order.description=="order 3").join('orders', aliased=True).filter(Order.description=="order 5") assert q.count() == 1 assert [User(id=7)] == q.all() def test_aliased_order_by(self): User = self.classes.User sess = create_session() ualias = aliased(User) eq_( sess.query(User, ualias).filter(User.id > ualias.id).order_by(desc(ualias.id), User.name).all(), [ (User(id=10,name='chuck'), User(id=9,name='fred')), (User(id=10,name='chuck'), User(id=8,name='ed')), (User(id=9,name='fred'), User(id=8,name='ed')), (User(id=10,name='chuck'), User(id=7,name='jack')), (User(id=8,name='ed'), User(id=7,name='jack')), (User(id=9,name='fred'), User(id=7,name='jack')) ] ) def test_plain_table(self): addresses, User = self.tables.addresses, self.classes.User sess = create_session() eq_( sess.query(User.name).join(addresses, User.id==addresses.c.user_id).order_by(User.id).all(), [('jack',), ('ed',), ('ed',), ('ed',), ('fred',)] ) def test_no_joinpoint_expr(self): User, users = self.classes.User, self.tables.users sess = create_session() # these are consistent regardless of # select_from() being present. assert_raises_message( sa_exc.InvalidRequestError, "Can't join table/selectable 'users' to itself", sess.query(users.c.id).join, User ) assert_raises_message( sa_exc.InvalidRequestError, "Can't join table/selectable 'users' to itself", sess.query(users.c.id).select_from(users).join, User ) def test_select_from(self): """Test that the left edge of the join can be set reliably with select_from().""" Item, Order, User = (self.classes.Item, self.classes.Order, self.classes.User) sess = create_session() self.assert_compile( sess.query(Item.id).select_from(User).join(User.orders).join(Order.items), "SELECT items.id AS items_id FROM users JOIN orders ON " "users.id = orders.user_id JOIN order_items AS order_items_1 " "ON orders.id = order_items_1.order_id JOIN items ON items.id = " "order_items_1.item_id", use_default_dialect=True ) # here, the join really wants to add a second FROM clause # for "Item". but select_from disallows that self.assert_compile( sess.query(Item.id).select_from(User).join(Item, User.id==Item.id), "SELECT items.id AS items_id FROM users JOIN items ON users.id = items.id", use_default_dialect=True ) def test_from_self_resets_joinpaths(self): """test a join from from_self() doesn't confuse joins inside the subquery with the outside. """ Item, Keyword = self.classes.Item, self.classes.Keyword sess = create_session() self.assert_compile( sess.query(Item).join(Item.keywords).from_self(Keyword).join(Item.keywords), "SELECT keywords.id AS keywords_id, keywords.name AS keywords_name FROM " "(SELECT items.id AS items_id, items.description AS items_description " "FROM items JOIN item_keywords AS item_keywords_1 ON items.id = " "item_keywords_1.item_id JOIN keywords ON keywords.id = item_keywords_1.keyword_id) " "AS anon_1 JOIN item_keywords AS item_keywords_2 ON " "anon_1.items_id = item_keywords_2.item_id " "JOIN keywords ON " "keywords.id = item_keywords_2.keyword_id", use_default_dialect=True ) class JoinFromSelectableTest(fixtures.MappedTest, AssertsCompiledSQL): __dialect__ = 'default' run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): Table('table1', metadata, Column('id', Integer, primary_key=True) ) Table('table2', metadata, Column('id', Integer, primary_key=True), Column('t1_id', Integer) ) @classmethod def setup_classes(cls): table1, table2 = cls.tables.table1, cls.tables.table2 class T1(cls.Comparable): pass class T2(cls.Comparable): pass mapper(T1, table1) mapper(T2, table2) def test_select_mapped_to_mapped_explicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() self.assert_compile( sess.query(subq.c.count, T1.id).select_from(subq).join(T1, subq.c.t1_id==T1.id), "SELECT anon_1.count AS anon_1_count, table1.id AS table1_id " "FROM (SELECT table2.t1_id AS t1_id, " "count(table2.id) AS count FROM table2 " "GROUP BY table2.t1_id) AS anon_1 JOIN table1 ON anon_1.t1_id = table1.id" ) def test_select_mapped_to_mapped_implicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() self.assert_compile( sess.query(subq.c.count, T1.id).join(T1, subq.c.t1_id==T1.id), "SELECT anon_1.count AS anon_1_count, table1.id AS table1_id " "FROM (SELECT table2.t1_id AS t1_id, " "count(table2.id) AS count FROM table2 " "GROUP BY table2.t1_id) AS anon_1 JOIN table1 ON anon_1.t1_id = table1.id" ) def test_select_mapped_to_select_explicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() self.assert_compile( sess.query(subq.c.count, T1.id).select_from(T1).join(subq, subq.c.t1_id==T1.id), "SELECT anon_1.count AS anon_1_count, table1.id AS table1_id " "FROM table1 JOIN (SELECT table2.t1_id AS t1_id, " "count(table2.id) AS count FROM table2 GROUP BY table2.t1_id) " "AS anon_1 ON anon_1.t1_id = table1.id" ) def test_select_mapped_to_select_implicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() assert_raises_message( sa_exc.InvalidRequestError, r"Can't construct a join from ", sess.query(subq.c.count, T1.id).join, subq, subq.c.t1_id==T1.id, ) def test_mapped_select_to_mapped_implicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() assert_raises_message( sa_exc.InvalidRequestError, "Can't join table/selectable 'table1' to itself", sess.query(T1.id, subq.c.count).join, T1, subq.c.t1_id == T1.id ) self.assert_compile( sess.query(T1.id, subq.c.count).select_from(subq).\ join(T1, subq.c.t1_id == T1.id), "SELECT table1.id AS table1_id, anon_1.count AS anon_1_count " "FROM (SELECT table2.t1_id AS t1_id, count(table2.id) AS count " "FROM table2 GROUP BY table2.t1_id) AS anon_1 " "JOIN table1 ON anon_1.t1_id = table1.id" ) def test_mapped_select_to_mapped_explicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() self.assert_compile( sess.query(T1.id, subq.c.count).select_from(subq).join(T1, subq.c.t1_id==T1.id), "SELECT table1.id AS table1_id, anon_1.count AS anon_1_count " "FROM (SELECT table2.t1_id AS t1_id, count(table2.id) AS count " "FROM table2 GROUP BY table2.t1_id) AS anon_1 JOIN table1 " "ON anon_1.t1_id = table1.id" ) def test_mapped_select_to_select_explicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() self.assert_compile( sess.query(T1.id, subq.c.count).select_from(T1).join(subq, subq.c.t1_id==T1.id), "SELECT table1.id AS table1_id, anon_1.count AS anon_1_count " "FROM table1 JOIN (SELECT table2.t1_id AS t1_id, count(table2.id) AS count " "FROM table2 GROUP BY table2.t1_id) AS anon_1 " "ON anon_1.t1_id = table1.id" ) def test_mapped_select_to_select_implicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() self.assert_compile( sess.query(T1.id, subq.c.count).join(subq, subq.c.t1_id==T1.id), "SELECT table1.id AS table1_id, anon_1.count AS anon_1_count " "FROM table1 JOIN (SELECT table2.t1_id AS t1_id, count(table2.id) AS count " "FROM table2 GROUP BY table2.t1_id) AS anon_1 " "ON anon_1.t1_id = table1.id" ) class MultiplePathTest(fixtures.MappedTest, AssertsCompiledSQL): @classmethod def define_tables(cls, metadata): t1 = Table('t1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)) ) t2 = Table('t2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)) ) t1t2_1 = Table('t1t2_1', metadata, Column('t1id', Integer, ForeignKey('t1.id')), Column('t2id', Integer, ForeignKey('t2.id')) ) t1t2_2 = Table('t1t2_2', metadata, Column('t1id', Integer, ForeignKey('t1.id')), Column('t2id', Integer, ForeignKey('t2.id')) ) def test_basic(self): t2, t1t2_1, t1t2_2, t1 = (self.tables.t2, self.tables.t1t2_1, self.tables.t1t2_2, self.tables.t1) class T1(object):pass class T2(object):pass mapper(T1, t1, properties={ 't2s_1':relationship(T2, secondary=t1t2_1), 't2s_2':relationship(T2, secondary=t1t2_2), }) mapper(T2, t2) q = create_session().query(T1).join('t2s_1').filter(t2.c.id==5).reset_joinpoint().join('t2s_2') self.assert_compile( q, "SELECT t1.id AS t1_id, t1.data AS t1_data FROM t1 JOIN t1t2_1 AS t1t2_1_1 " "ON t1.id = t1t2_1_1.t1id JOIN t2 ON t2.id = t1t2_1_1.t2id JOIN t1t2_2 AS t1t2_2_1 " "ON t1.id = t1t2_2_1.t1id JOIN t2 ON t2.id = t1t2_2_1.t2id WHERE t2.id = :id_1" , use_default_dialect=True ) class SelfRefMixedTest(fixtures.MappedTest, AssertsCompiledSQL): run_setup_mappers = 'once' __dialect__ = default.DefaultDialect() @classmethod def define_tables(cls, metadata): nodes = Table('nodes', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('nodes.id')) ) sub_table = Table('sub_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('node_id', Integer, ForeignKey('nodes.id')), ) assoc_table = Table('assoc_table', metadata, Column('left_id', Integer, ForeignKey('nodes.id')), Column('right_id', Integer, ForeignKey('nodes.id')) ) @classmethod def setup_classes(cls): nodes, assoc_table, sub_table = (cls.tables.nodes, cls.tables.assoc_table, cls.tables.sub_table) class Node(cls.Comparable): pass class Sub(cls.Comparable): pass mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='select', join_depth=3, backref=backref('parent', remote_side=[nodes.c.id]) ), 'subs' : relationship(Sub), 'assoc':relationship(Node, secondary=assoc_table, primaryjoin=nodes.c.id==assoc_table.c.left_id, secondaryjoin=nodes.c.id==assoc_table.c.right_id) }) mapper(Sub, sub_table) def test_o2m_aliased_plus_o2m(self): Node, Sub = self.classes.Node, self.classes.Sub sess = create_session() n1 = aliased(Node) self.assert_compile( sess.query(Node).join(n1, Node.children).join(Sub, n1.subs), "SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id " "FROM nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN sub_table ON nodes_1.id = sub_table.node_id" ) self.assert_compile( sess.query(Node).join(n1, Node.children).join(Sub, Node.subs), "SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id " "FROM nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN sub_table ON nodes.id = sub_table.node_id" ) def test_m2m_aliased_plus_o2m(self): Node, Sub = self.classes.Node, self.classes.Sub sess = create_session() n1 = aliased(Node) self.assert_compile( sess.query(Node).join(n1, Node.assoc).join(Sub, n1.subs), "SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id " "FROM nodes JOIN assoc_table AS assoc_table_1 ON nodes.id = " "assoc_table_1.left_id JOIN nodes AS nodes_1 ON nodes_1.id = " "assoc_table_1.right_id JOIN sub_table ON nodes_1.id = sub_table.node_id", ) self.assert_compile( sess.query(Node).join(n1, Node.assoc).join(Sub, Node.subs), "SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id " "FROM nodes JOIN assoc_table AS assoc_table_1 ON nodes.id = " "assoc_table_1.left_id JOIN nodes AS nodes_1 ON nodes_1.id = " "assoc_table_1.right_id JOIN sub_table ON nodes.id = sub_table.node_id", ) class CreateJoinsTest(fixtures.ORMTest, AssertsCompiledSQL): __dialect__ = 'default' def _inherits_fixture(self): m = MetaData() base = Table('base', m, Column('id', Integer, primary_key=True)) a = Table('a', m, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('b_id', Integer, ForeignKey('b.id'))) b = Table('b', m, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('c_id', Integer, ForeignKey('c.id'))) c = Table('c', m, Column('id', Integer, ForeignKey('base.id'), primary_key=True)) class Base(object): pass class A(Base): pass class B(Base): pass class C(Base): pass mapper(Base, base) mapper(A, a, inherits=Base, properties={'b':relationship(B, primaryjoin=a.c.b_id==b.c.id)}) mapper(B, b, inherits=Base, properties={'c':relationship(C, primaryjoin=b.c.c_id==c.c.id)}) mapper(C, c, inherits=Base) return A, B, C, Base def test_double_level_aliased_exists(self): A, B, C, Base = self._inherits_fixture() s = Session() self.assert_compile( s.query(A).filter(A.b.has(B.c.has(C.id==5))), "SELECT a.id AS a_id, base.id AS base_id, a.b_id AS a_b_id " "FROM base JOIN a ON base.id = a.id WHERE " "EXISTS (SELECT 1 FROM (SELECT base.id AS base_id, b.id AS " "b_id, b.c_id AS b_c_id FROM base JOIN b ON base.id = b.id) " "AS anon_1 WHERE a.b_id = anon_1.b_id AND (EXISTS " "(SELECT 1 FROM (SELECT base.id AS base_id, c.id AS c_id " "FROM base JOIN c ON base.id = c.id) AS anon_2 " "WHERE anon_1.b_c_id = anon_2.c_id AND anon_2.c_id = :id_1" ")))" ) class JoinToNonPolyAliasesTest(fixtures.MappedTest, AssertsCompiledSQL): """test joins to an aliased selectable and that we can refer to that aliased selectable in filter criteria. Basically testing that the aliasing Query applies to with_polymorphic targets doesn't leak into non-polymorphic mappers. """ __dialect__ = 'default' run_create_tables = None run_deletes = None @classmethod def define_tables(cls, metadata): Table("parent", metadata, Column('id', Integer, primary_key=True), Column('data', String(50)), ) Table("child", metadata, Column('id', Integer, primary_key=True), Column('parent_id', Integer, ForeignKey('parent.id')), Column('data', String(50)) ) @classmethod def setup_mappers(cls): parent, child = cls.tables.parent, cls.tables.child class Parent(cls.Comparable): pass class Child(cls.Comparable): pass mp = mapper(Parent, parent) mapper(Child, child) derived = select([child]).alias() npc = mapper(Child, derived, non_primary=True) cls.npc = npc cls.derived = derived mp.add_property("npc", relationship(npc)) def test_join_parent_child(self): Parent = self.classes.Parent npc = self.npc sess = Session() self.assert_compile( sess.query(Parent).join(Parent.npc).filter(self.derived.c.data == 'x'), "SELECT parent.id AS parent_id, parent.data AS parent_data " "FROM parent JOIN (SELECT child.id AS id, child.parent_id AS parent_id, " "child.data AS data " "FROM child) AS anon_1 ON parent.id = anon_1.parent_id " "WHERE anon_1.data = :data_1" ) def test_join_parent_child_select_from(self): Parent = self.classes.Parent npc = self.npc sess = Session() self.assert_compile( sess.query(npc).select_from(Parent).join(Parent.npc).\ filter(self.derived.c.data == 'x'), "SELECT anon_1.id AS anon_1_id, anon_1.parent_id " "AS anon_1_parent_id, anon_1.data AS anon_1_data " "FROM parent JOIN (SELECT child.id AS id, child.parent_id AS " "parent_id, child.data AS data FROM child) AS anon_1 ON " "parent.id = anon_1.parent_id WHERE anon_1.data = :data_1" ) def test_join_select_parent_child(self): Parent = self.classes.Parent npc = self.npc sess = Session() self.assert_compile( sess.query(Parent, npc).join(Parent.npc).filter( self.derived.c.data == 'x'), "SELECT parent.id AS parent_id, parent.data AS parent_data, " "anon_1.id AS anon_1_id, anon_1.parent_id AS anon_1_parent_id, " "anon_1.data AS anon_1_data FROM parent JOIN " "(SELECT child.id AS id, child.parent_id AS parent_id, " "child.data AS data FROM child) AS anon_1 ON parent.id = " "anon_1.parent_id WHERE anon_1.data = :data_1" ) class SelfReferentialTest(fixtures.MappedTest, AssertsCompiledSQL): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table('nodes', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('nodes.id')), Column('data', String(30))) @classmethod def setup_classes(cls): class Node(cls.Comparable): def append(self, node): self.children.append(node) @classmethod def setup_mappers(cls): Node, nodes = cls.classes.Node, cls.tables.nodes mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='select', join_depth=3, backref=backref('parent', remote_side=[nodes.c.id]) ), }) @classmethod def insert_data(cls): Node = cls.classes.Node sess = create_session() n1 = Node(data='n1') n1.append(Node(data='n11')) n1.append(Node(data='n12')) n1.append(Node(data='n13')) n1.children[1].append(Node(data='n121')) n1.children[1].append(Node(data='n122')) n1.children[1].append(Node(data='n123')) sess.add(n1) sess.flush() sess.close() def test_join_1(self): Node = self.classes.Node sess = create_session() node = sess.query(Node).join('children', aliased=True).filter_by(data='n122').first() assert node.data=='n12' def test_join_2(self): Node = self.classes.Node sess = create_session() ret = sess.query(Node.data).join(Node.children, aliased=True).filter_by(data='n122').all() assert ret == [('n12',)] def test_join_3(self): Node = self.classes.Node sess = create_session() node = sess.query(Node).join('children', 'children', aliased=True).filter_by(data='n122').first() assert node.data=='n1' def test_join_4(self): Node = self.classes.Node sess = create_session() node = sess.query(Node).filter_by(data='n122').join('parent', aliased=True).filter_by(data='n12').\ join('parent', aliased=True, from_joinpoint=True).filter_by(data='n1').first() assert node.data == 'n122' def test_string_or_prop_aliased(self): """test that join('foo') behaves the same as join(Cls.foo) in a self referential scenario. """ Node = self.classes.Node sess = create_session() nalias = aliased(Node, sess.query(Node).filter_by(data='n1').subquery()) q1 = sess.query(nalias).join(nalias.children, aliased=True).\ join(Node.children, from_joinpoint=True) q2 = sess.query(nalias).join(nalias.children, aliased=True).\ join("children", from_joinpoint=True) for q in (q1, q2): self.assert_compile( q, "SELECT anon_1.id AS anon_1_id, anon_1.parent_id AS " "anon_1_parent_id, anon_1.data AS anon_1_data FROM " "(SELECT nodes.id AS id, nodes.parent_id AS parent_id, " "nodes.data AS data FROM nodes WHERE nodes.data = :data_1) " "AS anon_1 JOIN nodes AS nodes_1 ON anon_1.id = " "nodes_1.parent_id JOIN nodes ON nodes_1.id = nodes.parent_id", use_default_dialect=True ) q1 = sess.query(Node).join(nalias.children, aliased=True).\ join(Node.children, aliased=True, from_joinpoint=True).\ join(Node.children, from_joinpoint=True) q2 = sess.query(Node).join(nalias.children, aliased=True).\ join("children", aliased=True, from_joinpoint=True).\ join("children", from_joinpoint=True) for q in (q1, q2): self.assert_compile( q, "SELECT nodes.id AS nodes_id, nodes.parent_id AS " "nodes_parent_id, nodes.data AS nodes_data FROM (SELECT " "nodes.id AS id, nodes.parent_id AS parent_id, nodes.data " "AS data FROM nodes WHERE nodes.data = :data_1) AS anon_1 " "JOIN nodes AS nodes_1 ON anon_1.id = nodes_1.parent_id " "JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id " "JOIN nodes ON nodes_2.id = nodes.parent_id", use_default_dialect=True ) def test_from_self_inside_excludes_outside(self): """test the propagation of aliased() from inside to outside on a from_self().. """ Node = self.classes.Node sess = create_session() n1 = aliased(Node) # n1 is not inside the from_self(), so all cols must be maintained # on the outside self.assert_compile( sess.query(Node).filter(Node.data=='n122').from_self(n1, Node.id), "SELECT nodes_1.id AS nodes_1_id, nodes_1.parent_id AS nodes_1_parent_id, " "nodes_1.data AS nodes_1_data, anon_1.nodes_id AS anon_1_nodes_id " "FROM nodes AS nodes_1, (SELECT nodes.id AS nodes_id, " "nodes.parent_id AS nodes_parent_id, nodes.data AS nodes_data FROM " "nodes WHERE nodes.data = :data_1) AS anon_1", use_default_dialect=True ) parent = aliased(Node) grandparent = aliased(Node) q = sess.query(Node, parent, grandparent).\ join(parent, Node.parent).\ join(grandparent, parent.parent).\ filter(Node.data=='n122').filter(parent.data=='n12').\ filter(grandparent.data=='n1').from_self().limit(1) # parent, grandparent *are* inside the from_self(), so they # should get aliased to the outside. self.assert_compile( q, "SELECT anon_1.nodes_id AS anon_1_nodes_id, " "anon_1.nodes_parent_id AS anon_1_nodes_parent_id, " "anon_1.nodes_data AS anon_1_nodes_data, " "anon_1.nodes_1_id AS anon_1_nodes_1_id, " "anon_1.nodes_1_parent_id AS anon_1_nodes_1_parent_id, " "anon_1.nodes_1_data AS anon_1_nodes_1_data, " "anon_1.nodes_2_id AS anon_1_nodes_2_id, " "anon_1.nodes_2_parent_id AS anon_1_nodes_2_parent_id, " "anon_1.nodes_2_data AS anon_1_nodes_2_data " "FROM (SELECT nodes.id AS nodes_id, nodes.parent_id " "AS nodes_parent_id, nodes.data AS nodes_data, " "nodes_1.id AS nodes_1_id, nodes_1.parent_id AS nodes_1_parent_id, " "nodes_1.data AS nodes_1_data, nodes_2.id AS nodes_2_id, " "nodes_2.parent_id AS nodes_2_parent_id, nodes_2.data AS " "nodes_2_data FROM nodes JOIN nodes AS nodes_1 ON " "nodes_1.id = nodes.parent_id JOIN nodes AS nodes_2 " "ON nodes_2.id = nodes_1.parent_id " "WHERE nodes.data = :data_1 AND nodes_1.data = :data_2 AND " "nodes_2.data = :data_3) AS anon_1 LIMIT :param_1", {'param_1':1}, use_default_dialect=True ) def test_explicit_join_1(self): Node = self.classes.Node n1 = aliased(Node) n2 = aliased(Node) self.assert_compile( join(Node, n1, 'children').join(n2, 'children'), "nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id", use_default_dialect=True ) def test_explicit_join_2(self): Node = self.classes.Node n1 = aliased(Node) n2 = aliased(Node) self.assert_compile( join(Node, n1, Node.children).join(n2, n1.children), "nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id", use_default_dialect=True ) def test_explicit_join_3(self): Node = self.classes.Node n1 = aliased(Node) n2 = aliased(Node) # the join_to_left=False here is unfortunate. the default on this flag should # be False. self.assert_compile( join(Node, n1, Node.children).join(n2, Node.children, join_to_left=False), "nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN nodes AS nodes_2 ON nodes.id = nodes_2.parent_id", use_default_dialect=True ) def test_explicit_join_4(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) n2 = aliased(Node) self.assert_compile( sess.query(Node).join(n1, Node.children).join(n2, n1.children), "SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.data AS " "nodes_data FROM nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id", use_default_dialect=True ) def test_explicit_join_5(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) n2 = aliased(Node) self.assert_compile( sess.query(Node).join(n1, Node.children).join(n2, Node.children), "SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.data AS " "nodes_data FROM nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN nodes AS nodes_2 ON nodes.id = nodes_2.parent_id", use_default_dialect=True ) def test_explicit_join_6(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) node = sess.query(Node).select_from(join(Node, n1, 'children')).\ filter(n1.data == 'n122').first() assert node.data == 'n12' def test_explicit_join_7(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) n2 = aliased(Node) node = sess.query(Node).select_from( join(Node, n1, 'children').join(n2, 'children')).\ filter(n2.data == 'n122').first() assert node.data == 'n1' def test_explicit_join_8(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) n2 = aliased(Node) # mix explicit and named onclauses node = sess.query(Node).select_from( join(Node, n1, Node.id == n1.parent_id).join(n2, 'children')).\ filter(n2.data == 'n122').first() assert node.data == 'n1' def test_explicit_join_9(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) n2 = aliased(Node) node = sess.query(Node).select_from(join(Node, n1, 'parent').join(n2, 'parent')).\ filter(and_(Node.data == 'n122', n1.data == 'n12', n2.data == 'n1')).first() assert node.data == 'n122' def test_explicit_join_10(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) n2 = aliased(Node) eq_( list(sess.query(Node).select_from(join(Node, n1, 'parent').join(n2, 'parent')).\ filter(and_(Node.data == 'n122', n1.data == 'n12', n2.data == 'n1')).values(Node.data, n1.data, n2.data)), [('n122', 'n12', 'n1')]) def test_join_to_nonaliased(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) # using 'n1.parent' implicitly joins to unaliased Node eq_( sess.query(n1).join(n1.parent).filter(Node.data=='n1').all(), [Node(parent_id=1,data='n11',id=2), Node(parent_id=1,data='n12',id=3), Node(parent_id=1,data='n13',id=4)] ) # explicit (new syntax) eq_( sess.query(n1).join(Node, n1.parent).filter(Node.data=='n1').all(), [Node(parent_id=1,data='n11',id=2), Node(parent_id=1,data='n12',id=3), Node(parent_id=1,data='n13',id=4)] ) def test_multiple_explicit_entities_one(self): Node = self.classes.Node sess = create_session() parent = aliased(Node) grandparent = aliased(Node) eq_( sess.query(Node, parent, grandparent).\ join(parent, Node.parent).\ join(grandparent, parent.parent).\ filter(Node.data=='n122').filter(parent.data=='n12').\ filter(grandparent.data=='n1').first(), (Node(data='n122'), Node(data='n12'), Node(data='n1')) ) def test_multiple_explicit_entities_two(self): Node = self.classes.Node sess = create_session() parent = aliased(Node) grandparent = aliased(Node) eq_( sess.query(Node, parent, grandparent).\ join(parent, Node.parent).\ join(grandparent, parent.parent).\ filter(Node.data == 'n122').filter(parent.data == 'n12').\ filter(grandparent.data == 'n1').from_self().first(), (Node(data='n122'), Node(data='n12'), Node(data='n1')) ) def test_multiple_explicit_entities_three(self): Node = self.classes.Node sess = create_session() parent = aliased(Node) grandparent = aliased(Node) # same, change order around eq_( sess.query(parent, grandparent, Node).\ join(parent, Node.parent).\ join(grandparent, parent.parent).\ filter(Node.data == 'n122').filter(parent.data == 'n12').\ filter(grandparent.data == 'n1').from_self().first(), (Node(data='n12'), Node(data='n1'), Node(data='n122')) ) def test_multiple_explicit_entities_four(self): Node = self.classes.Node sess = create_session() parent = aliased(Node) grandparent = aliased(Node) eq_( sess.query(Node, parent, grandparent).\ join(parent, Node.parent).\ join(grandparent, parent.parent).\ filter(Node.data=='n122').filter(parent.data=='n12').\ filter(grandparent.data=='n1').\ options(joinedload(Node.children)).first(), (Node(data='n122'), Node(data='n12'), Node(data='n1')) ) def test_multiple_explicit_entities_five(self): Node = self.classes.Node sess = create_session() parent = aliased(Node) grandparent = aliased(Node) eq_( sess.query(Node, parent, grandparent).\ join(parent, Node.parent).\ join(grandparent, parent.parent).\ filter(Node.data=='n122').filter(parent.data=='n12').\ filter(grandparent.data=='n1').from_self().\ options(joinedload(Node.children)).first(), (Node(data='n122'), Node(data='n12'), Node(data='n1')) ) def test_any(self): Node = self.classes.Node sess = create_session() eq_(sess.query(Node).filter(Node.children.any(Node.data=='n1')).all(), []) eq_(sess.query(Node).filter(Node.children.any(Node.data=='n12')).all(), [Node(data='n1')]) eq_(sess.query(Node).filter(~Node.children.any()).order_by(Node.id).all(), [Node(data='n11'), Node(data='n13'),Node(data='n121'),Node(data='n122'),Node(data='n123'),]) def test_has(self): Node = self.classes.Node sess = create_session() eq_(sess.query(Node).filter(Node.parent.has(Node.data=='n12')).order_by(Node.id).all(), [Node(data='n121'),Node(data='n122'),Node(data='n123')]) eq_(sess.query(Node).filter(Node.parent.has(Node.data=='n122')).all(), []) eq_(sess.query(Node).filter(~Node.parent.has()).all(), [Node(data='n1')]) def test_contains(self): Node = self.classes.Node sess = create_session() n122 = sess.query(Node).filter(Node.data=='n122').one() eq_(sess.query(Node).filter(Node.children.contains(n122)).all(), [Node(data='n12')]) n13 = sess.query(Node).filter(Node.data=='n13').one() eq_(sess.query(Node).filter(Node.children.contains(n13)).all(), [Node(data='n1')]) def test_eq_ne(self): Node = self.classes.Node sess = create_session() n12 = sess.query(Node).filter(Node.data=='n12').one() eq_(sess.query(Node).filter(Node.parent==n12).all(), [Node(data='n121'),Node(data='n122'),Node(data='n123')]) eq_(sess.query(Node).filter(Node.parent != n12).all(), [Node(data='n1'), Node(data='n11'), Node(data='n12'), Node(data='n13')]) class SelfReferentialM2MTest(fixtures.MappedTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): nodes = Table('nodes', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30))) node_to_nodes =Table('node_to_nodes', metadata, Column('left_node_id', Integer, ForeignKey('nodes.id'),primary_key=True), Column('right_node_id', Integer, ForeignKey('nodes.id'),primary_key=True), ) @classmethod def setup_classes(cls): class Node(cls.Comparable): pass @classmethod def insert_data(cls): Node, nodes, node_to_nodes = (cls.classes.Node, cls.tables.nodes, cls.tables.node_to_nodes) mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='select', secondary=node_to_nodes, primaryjoin=nodes.c.id==node_to_nodes.c.left_node_id, secondaryjoin=nodes.c.id==node_to_nodes.c.right_node_id, ) }) sess = create_session() n1 = Node(data='n1') n2 = Node(data='n2') n3 = Node(data='n3') n4 = Node(data='n4') n5 = Node(data='n5') n6 = Node(data='n6') n7 = Node(data='n7') n1.children = [n2, n3, n4] n2.children = [n3, n6, n7] n3.children = [n5, n4] sess.add(n1) sess.add(n2) sess.add(n3) sess.add(n4) sess.flush() sess.close() def test_any(self): Node = self.classes.Node sess = create_session() eq_(sess.query(Node).filter(Node.children.any(Node.data == 'n3' )).order_by(Node.data).all(), [Node(data='n1'), Node(data='n2')]) def test_contains(self): Node = self.classes.Node sess = create_session() n4 = sess.query(Node).filter_by(data='n4').one() eq_(sess.query(Node).filter(Node.children.contains(n4)).order_by(Node.data).all(), [Node(data='n1'), Node(data='n3')]) eq_(sess.query(Node).filter(not_(Node.children.contains(n4))).order_by(Node.data).all(), [Node(data='n2'), Node(data='n4'), Node(data='n5'), Node(data='n6'), Node(data='n7')]) def test_explicit_join(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) eq_( sess.query(Node).select_from(join(Node, n1, 'children' )).filter(n1.data.in_(['n3', 'n7' ])).order_by(Node.id).all(), [Node(data='n1'), Node(data='n2')] )
codeparrot/github-code-clean
# -*- coding: utf-8 -*- from sympy.matrices import Matrix from sympy.core import Add, diff, Symbol from sympy.simplify import simplify from tensor_analysis.arraypy import Arraypy, TensorArray, matrix2arraypy, \ matrix2tensor, list2arraypy, list2tensor from tensor_analysis.tensor_methods import is_symmetric from tensor_analysis.helper_functions import check_vector_of_arguments, \ check_metric_tensor, check_the_vector_field, replace_index_to_k, \ check_the_christoffel_symbols_2 """Module riemannian_geometry contains functions for work with tensor fields: - the calculation of the scalar product; - the Christoffel symbols of the first and second kind; - the covariant derivative of the curvature tensor; - the Ricci tensor; - scalar and sectional curvature; - the covariant derivative the tensor field; - the covariant divergence of a tensor field; - the Riemann curvature tensor and sectional curvature for left-invariant metric; - the product of Kulkarni-Nomizu; - the Gaussian curvature; - the second quadratic form. To implement the functions used modules: matrices and tensor (with classes arraypy and tensor). All functions take arguments, the types of which may be such as list, matrix, or array Arraypy tensor. Some functions have optional parameter indicating the type of the function result. Starting index of arguments with type Arraypy or TensorArray is not necessarily and by default equal to 0. The function determines the range of the index in array to return the object with the same range of index. Functions are work with multidimensional arrays Arraypy and tensors, classes and methods are contained in the module Arraypy. """ def scal_prod(X, Y, g): """Returns scalar product of vectors g(X,Y). Examples: ========= >>> from tensor_analysis.riemannian_geometry import scal_prod >>> from sympy import symbols, cos >>> from tensor_analysis.arraypy import Arraypy, TensorArray >>> x1, x2 = symbols('x1, x2') X, Y it's a vector or a vector field. They can be a list, one-dimensional arraypy or TensorArray with valence of indices (+1): >>> X = [1, 2] >>> Y = [3, 4] g it's a metric tensor must be symmetric matrix, array of arraypy or covariant tensor with valence of indices (-1, -1): >>> A = Arraypy((2, 2)) >>> g = TensorArray(A,(-1, -1)) >>> g[0,0] = cos(x2)**2 >>> g[0,1] = 0 >>> g[1,0] = 0 >>> g[1,1] = 1 The scalar product: >>> sc = scal_prod(X, Y, g) >>> print(sc) 3*cos(x2)**2 + 8 """ # Handling of a input argument - metric tensor g check_metric_tensor(g) if isinstance(g, (Arraypy, TensorArray)): g = g.to_matrix() if not g.is_symmetric(): raise ValueError('The metric tensor must be symmetric.') # Handling of a input arguments - vector or vector fields X check_the_vector_field(X) if isinstance(X, (TensorArray, Arraypy)): X = X.to_list() # Handling of a input arguments - vector or vector fields Y check_the_vector_field(Y) if isinstance(Y, (TensorArray, Arraypy)): Y = Y.to_list() if not len(X) == len(Y): raise ValueError('The vectors must be identical length') elif len(X) != g.rows: raise ValueError( 'The vector fields and dimension of metric tensor must be identical length') # Calculation indices = range(len(X)) scal = sum([g[i, j] * X[i] * Y[j] for i in indices for j in indices]) # Output return scal def christoffel_1(g, var, type_output='t'): """Return the (-1,-1,-1) - tensor of Christoffel symbols for the given metric. This returns the Christoffel symbol of first kind that represents the Levi-Civita connection for the given metric. Examples: ========= >>> from tensor_analysis.riemannian_geometry import christoffel_1 >>> from tensor_analysis.arraypy import Arraypy, TensorArray >>> from sympy import symbols, cos >>> x1, x2 = symbols('x1, x2') var is a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] >>> A = Arraypy((2, 2)) >>> g = TensorArray(A,(-1, -1)) >>> g[0,0] = cos(x2)**2 >>> g[0,1] = 0 >>> g[1,0] = 0 >>> g[1,1] = 1 type_output it's optional parameter function, indicating the type of calculation result and receiving the character or string value: - symbol 't' means that the type of the result will match TensorArray; - symbol 'a' means that the type of the result will be Arraypy; - default function takes a parameter 't', so that the result will be a TensorArray. The Christoffel symbols of the first kind: >>> ch_1 = christoffel_1(g, var, 't') >>> print(ch_1) 0 sin(x2)*cos(x2) -sin(x2)*cos(x2) 0 -sin(x2)*cos(x2) 0 0 0 >>> ch_1.type_pq (0, 3) """ # Handling of input vector of arguments - var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Definition of number of variables n = len(var) # Handling of a input argument - metric tensor g check_metric_tensor(g) if isinstance(g, (Arraypy, TensorArray)): if not (g.start_index[0] == g.start_index[1]): raise ValueError( 'The starting indices of metric tensor must be identical') idx_start = g.start_index[0] elif isinstance(g, Matrix): if not g.is_symmetric(): raise ValueError('The metric tensor must be symmetric.') idx_start = 0 # The definition of diapason changes in an index [n1, n2] = g.shape if not n == n1: raise ValueError( 'The rank of the metric tensor does not coincide with the number of variables.') indices = range(idx_start, idx_start + n) # Creating of output array with new indices Ch = Arraypy([3, n, idx_start]) # Calculation for i in indices: for j in indices: for k in indices: Ch[i, j, k] = (diff(g[j, k], var[i - idx_start]) + diff(g[i, k], var[j - idx_start]) - diff(g[i, j], var[k - idx_start])) / 2 # Handling of an output array if type_output == str('t') or type_output == Symbol('t'): christoffel_1 = Ch.to_tensor((-1, -1, -1)) elif type_output == str('a') or type_output == Symbol('a'): christoffel_1 = Ch else: raise ValueError( "The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.") # Output return christoffel_1 def christoffel_2(g, var, type_output='t'): """Return the (1, -1, -1) - tensor of Christoffel symbols for the given metric. This returns the Christoffel symbol of second kind that represents the Levi-Civita connection for the given metric. Examples: ========= >>> from tensor_analysis.riemannian_geometry import christoffel_2 >>> from tensor_analysis.arraypy import Arraypy, TensorArray >>> from sympy import symbols, cos >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] g it's a metric tensor must be symmetric matrix, arraypy or tensor with valence indices (-1, -1): >>> A = Arraypy((2, 2)) >>> g = TensorArray(A,(-1, -1)) >>> g[0,0] = cos(x2)**2 >>> g[0,1] = 0 >>> g[1,0] = 0 >>> g[1,1] = 1 type_output it's optional parameter function, indicating the type of calculation result and receiving the character or string value: - symbol 't' means that the type of the result will match TensorArray; - symbol 'a' means that the type of the result will be Arraypy; - default function takes a parameter 't', so that the result will be a TensorArray. The Christoffel symbols of the second kind: >>> ch_2 = christoffel_2(g, var, 'a') >>> print(ch_2) 0 sin(x2)*cos(x2) -sin(x2)/cos(x2) 0 -sin(x2)/cos(x2) 0 0 0 """ # Handling of input vector of arguments - var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Definition of number of variables n = len(var) # Handling of a input argument - metric tensor g check_metric_tensor(g) if isinstance(g, (Arraypy, TensorArray)): if not (g.start_index[0] == g.start_index[1]): raise ValueError( 'The starting indices of metric tensor must be identical') idx_start = g.start_index[0] g_inv = (g.to_matrix()).inv() elif isinstance(g, Matrix): if not g.is_symmetric(): raise ValueError('The metric tensor must be symmetric.') idx_start = 0 g_inv = g.inv() # The definition of diapason changes in an index [n1, n2] = g.shape if not n == n1: raise ValueError( 'The rank of the metric tensor does not coincide with the number of variables.') indices = range(idx_start, idx_start + n) # Creating of output array with new indices Ch = Arraypy([3, n, idx_start]) # Calculation for i in indices: for j in indices: for k in indices: Ch[i, j, k] = Add(*[g_inv[k - idx_start, l - idx_start] * (diff(g[j, l], var[i - idx_start]) + diff(g[i, l], var[j - idx_start]) - diff(g[i, j], var[l - idx_start])) / 2 for l in indices]) # Other variant calculation # christ_1 = christoffel_1(g, var) # for i in indices: # for j in indices: # for k in indices: # Ch[i, # j, # k] = Add(*[g_inv[k, # l] *christ_1[i, # j, # l] for l in indices]) # Handling of an output array if type_output == str('t') or type_output == Symbol('t'): christoffel_2 = Ch.to_tensor((1, -1, -1)) elif type_output == str('a') or type_output == Symbol('a'): christoffel_2 = Ch else: raise ValueError( "The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.") # Output return christoffel_2 def covar_der(X, g, var, type_output='t'): """Return the covariant derivative the vector field. Examples: ========= >>> from tensor_analysis.riemannian_geometry import covar_der >>> from tensor_analysis.arraypy import Arraypy, TensorArray >>> from sympy import symbols, cos >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] g it's a metric tensor must be symmetric matrix, arraypy or tensor with valence indices (-1, -1): >>> A = Arraypy((2, 2)) >>> g = TensorArray(A,(-1, -1)) >>> g[0,0] = cos(x2)**2 >>> g[0,1] = 0 >>> g[1,0] = 0 >>> g[1,1] = 1 X it's vector field can be a list, one-dimensional arraypy, or one-dimensional tensor with valences of indices (+1): >>> X = [x1 * x2**3, x1 - cos(x2)] type_output it's optional parameter function, indicating the type of calculation result and receiving the character or string value: - symbol 't' means that the type of the result will match TensorArray; - symbol 'a' means that the type of the result will be Arraypy; - default function takes a parameter 't', so that the result will be a TensorArray. The covariant derivative: >>> c_v = covar_der(X, g, var, 't') >>> print(c_v) x2**3 - (x1 - cos(x2))*sin(x2)/cos(x2) x1*x2**3*sin(x2)*cos(x2) + 1 -x1*x2**3*sin(x2)/cos(x2) + 3*x1*x2**2 sin(x2) >>> c_v.type_pq (1, 1) """ # Handling of input vector of arguments - var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Definition of number of variables n = len(var) # Handling of a input argument - metric tensor g check_metric_tensor(g) if isinstance(g, (Arraypy, TensorArray)): if not (g.start_index[0] == g.start_index[1]): raise ValueError( 'The starting indices of metric tensor must be identical') idx_g = g.start_index[0] elif isinstance(g, Matrix): if not g.is_symmetric(): raise ValueError('The metric tensor must be symmetric.') idx_g = 0 # Handling of a input argument - vector field X check_the_vector_field(X) if isinstance(X, (Arraypy, TensorArray)): idx_X = X.start_index[0] elif isinstance(X, list): idx_X = 0 # The definition of diapason changes in an index [n1, n2] = g.shape if not n == n1: raise ValueError( 'The rank of the metric tensor does not coincide with the number of variables.') if (idx_g != idx_X): raise ValueError( 'The start index of the metric tensor and vector field must be equal') else: idx_start = idx_g indices = range(idx_start, idx_start + n) # Creating of output array with new indices cov = Arraypy([2, n, idx_start]) ch_2 = christoffel_2(g, var) # Calculation for i in indices: for j in indices: cov[i, j] = diff(X[j], var[i - idx_start]) + \ Add(*[ch_2[k, i, j] * X[k] for k in indices]) # Handling of an output array if type_output == str('t') or type_output == Symbol('t'): cov_der = cov.to_tensor((1, -1)) elif type_output == str('a') or type_output == Symbol('a'): cov_der = cov else: raise ValueError( "The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.") # Output return cov_der def covar_der_xy(X, Y, g, var, type_output='t'): """Return the covariant derivative the vector field along another field. Examples: ========= >>> from tensor_analysis.riemannian_geometry import covar_der_xy >>> from tensor_analysis.arraypy import Arraypy, TensorArray >>> from sympy import symbols, cos >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] g it's a metric tensor must be symmetric matrix, arraypy or tensor with valences indices (-1, -1): >>> A = Arraypy((2, 2)) >>> g = TensorArray(A,(-1, -1)) >>> g[0,0] = cos(x2)**2 >>> g[0,1] = 0 >>> g[1,0] = 0 >>> g[1,1] = 1 X, Y it's vector fields may be lists, one-dimensional arraypy, or one-dimensional tensor indices with valences (+ 1): >>> X = [x1 * x2**3, x1 - cos(x2)] >>> Y = [1, 2] type_output it's optional parameter function, indicating the type of calculation result and receiving the character or string value: - symbol 't' means that the type of the result will match TensorArray; - symbol 'a' means that the type of the result will be Arraypy; - default function takes a parameter 't', so that the result will be a TensorArray. The covariant derivative along another vector field: >>> c_v_XY = covar_der_xy(X, Y, g, var, 't') >>> print(c_v_XY) -2*x1*x2**3*sin(x2)/cos(x2) + 6*x1*x2**2 + x2**3 - (x1 - cos(x2))*sin(x2)/cos(x2) \ x1*x2**3*sin(x2)*cos(x2) + 2*sin(x2) + 1 """ # Handling of input vector of arguments - var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Definition of number of variables n = len(var) # Handling of a input argument - metric tensor g check_metric_tensor(g) if isinstance(g, (Arraypy, TensorArray)): if not (g.start_index[0] == g.start_index[1]): raise ValueError( 'The starting indices of metric tensor must be identical') idx_g = g.start_index[0] elif isinstance(g, Matrix): if not g.is_symmetric(): raise ValueError('The metric tensor must be symmetric.') idx_g = 0 # Handling of a input argument - vector field X check_the_vector_field(X) if isinstance(X, (Arraypy, TensorArray)): idx_X = X.start_index[0] elif isinstance(X, list): idx_X = 0 # Handling of a input argument - vector field Y check_the_vector_field(Y) if isinstance(Y, (Arraypy, TensorArray)): idx_Y = Y.start_index[0] elif isinstance(Y, list): idx_Y = 0 [n1, n2] = g.shape if not len(X) == len(Y): raise ValueError('The vectors must be identical length') elif not idx_X == idx_Y: raise ValueError('The start index of vector fields must be equal') elif not(idx_g == idx_X): raise ValueError( 'The start index of the metric tensor and vector field must be equal') else: idx_start = idx_g if len(X) != n1: raise ValueError( 'The vector fields and dimension of metric tensor must be identical length') # The definition of diapason changes in an index if not n == n1: raise ValueError( 'The rank of the metric tensor does not concide with the number of variables.') indices = range(idx_start, idx_start + n) # Creating of output array with new indices nabla_XY = Arraypy([1, n, idx_start]) nabla_X = covar_der(X, g, var) # Calculation for j in indices: nabla_XY[j] = sum([nabla_X[i, j] * Y[i] for i in indices]) # Handling of an output array if type_output == str('t') or type_output == Symbol('t'): cov_der_XY = nabla_XY.to_tensor((1)) elif type_output == str('a') or type_output == Symbol('a'): cov_der_XY = nabla_XY else: raise ValueError( "The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.") # Output return cov_der_XY def riemann(g, var, type_output='t'): """Return the Riemann curvature tensor of type (1, -1, -1, -1) for the given metric tensor. Examples: ========= >>> from tensor_analysis.riemannian_geometry import riemann >>> from tensor_analysis.arraypy import Arraypy, TensorArray >>> from sympy import symbols, cos >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] g it's a metric tensor must be symmetric matrix, arraypy or tensor with valence indices (-1, -1): >>> A = Arraypy((2, 2)) >>> g = TensorArray(A,(-1, -1)) >>> g[0,0] = cos(x2)**2 >>> g[0,1] = 0 >>> g[1,0] = 0 >>> g[1,1] = 1 type_output it's optional parameter function, indicating the type of calculation result and receiving the character or string value: - symbol 't' means that the type of the result will match TensorArray; - symbol 'a' means that the type of the result will be Arraypy; - default function takes a parameter 't', so that the result will be a TensorArray. The curvature tensor: >>> r = riemann(g, var, 'a') >>> print(r) 0 0 0 0 0 -cos(x2)**2 1 0 0 cos(x2)**2 -1 0 0 0 0 0 """ # Handling of input vector of arguments - var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Definition of number of variables n = len(var) # Handling of a input argument - metric tensor g check_metric_tensor(g) if isinstance(g, (Arraypy, TensorArray)): if not (g.start_index[0] == g.start_index[1]): raise ValueError( 'The starting indices of metric tensor must be identical') idx_start = g.start_index[0] elif isinstance(g, Matrix): if not g.is_symmetric(): raise ValueError('The metric tensor must be symmetric.') idx_start = 0 # The definition of diapason changes in an index [n1, n2] = g.shape if not n == n1: raise ValueError( 'The rank of the metric tensor does not coincide with the number of variables.') indices = range(idx_start, idx_start + n) # Creating of output array with new indices R = Arraypy([4, n, idx_start]) ch_2 = christoffel_2(g, var) # Calculation for i in indices: for j in indices: for k in indices: for l in indices: R[i, j, k, l] = diff(ch_2[j, k, l], var[i - idx_start]) - diff(ch_2[i, k, l], var[j - idx_start]) + sum([ch_2[i, p, l] * ch_2[j, k, p] - ch_2[j, p, l] * ch_2[i, k, p] for p in indices]) # Handling of an output array if type_output == str('t') or type_output == Symbol('t'): riemann = R.to_tensor((1, -1, -1, -1)) elif type_output == str('a') or type_output == Symbol('a'): riemann = R else: raise ValueError( "The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.") # Output return riemann def ricci(riemann, var, type_output='t'): """Return the tensor Ricci of type (-1, -1), is symmetric tensor for given Riemann curvature tensor. Examples: ========= >>> from tensor_analysis.riemannian_geometry import ricci, riemann >>> from tensor_analysis.arraypy import Arraypy, TensorArray >>> from sympy import symbols, cos >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] g it's a metric tensor must be symmetric matrix, arraypy or tensor with valence indices (-1, -1): >>> A = Arraypy((2,2)) >>> g = TensorArray(A,(-1,-1)) >>> g[0,0] = cos(x2)**2 >>> g[0,1] = 0 >>> g[1,0] = 0 >>> g[1,1] = 1 riemann it's a Riemann curvature tensor must be symmetric matrix, arraypy or tensor with valences indices (-1, -1, -1, 1): >>> cur = riemann(g, var, 't') type_output it's optional parameter function, indicating the type of calculation result and receiving the character or string value: - symbol 't' means that the type of the result will match TensorArray; - symbol 'a' means that the type of the result will be Arraypy; - default function takes a parameter 't', so that the result will be a TensorArray. The Ricci tensor: >>> r = ricci(cur, var, 't') >>> print(r) cos(x2)**2 0 0 1 >>> r.type_pq (0, 2) """ # Handling of input vector of arguments - var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Definition of number of variables n = len(var) # Handling of a input argument Riemann curvature tensor - riemann if not isinstance(riemann, (Matrix, Arraypy, TensorArray)): raise TypeError( 'The type of Riemann curvature tensor must be Matrix, Arraypy or TensorArray') else: if isinstance(riemann, (Arraypy, TensorArray)): if isinstance(riemann, TensorArray): if not riemann.type_pq == (1, 3): raise ValueError( 'The valence of Riemann curvature tensor must be (1, -1, -1, -1)') if not ( riemann.start_index.count( riemann.start_index[0]) == 4): raise ValueError( 'The starting indices of Riemann curvature tensor must be identical') idx_start = riemann.start_index[0] else: idx_start = 0 # The definition of diapason changes in an index [n1, n2, n3, n4] = riemann.shape if not n == n1: raise ValueError( 'The rank of the Riemann curvature tensor does not coincide with the number of variables.') indices = range(idx_start, idx_start + n) # Creating of output array with new indices Ri = Arraypy([2, n, idx_start]) # Calculation for j in indices: for k in indices: Ri[j, k] = sum([riemann[i, j, k, i] for i in indices]) # Handling of an output array if type_output == str('t') or type_output == Symbol('t'): ricci = Ri.to_tensor((-1, -1)) elif type_output == str('a') or type_output == Symbol('a'): ricci = Ri else: raise ValueError( "The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.") # Output return ricci def scal_curv(g, ricci, var): """The scalar curvature (or the Ricci scalar) is the simplest curvature invariant of a Riemannian manifold. Examples: ========= >>> from tensor_analysis.riemannian_geometry import scal_curv, ricci, riemann >>> from tensor_analysis.arraypy import Arraypy, TensorArray >>> from sympy import symbols, cos >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] g it's a metric tensor must be symmetric matrix, arraypy or tensor with valence indices (-1, -1): >>> A = Arraypy((2,2)) >>> g = TensorArray(A,(-1,-1)) >>> g[0,0] = cos(x2)**2 >>> g[0,1] = 0 >>> g[1,0] = 0 >>> g[1,1] = 1 riemann it's a Riemann curvature tensor must be symmetric matrix, arraypy or tensor with valences indices (-1, -1, -1, 1): >>> cur = riemann(g, var, 't') ricci it's Ricci tensor must be a matrix, arraypy or valences with tensor indices (-1, -1): >>> r = ricci(cur, var, 't') The Ricci tensor for the Riemann curvature tensor: >>> sc_c = scal_curv(g, r, var) >>> print(sc_c) 1 """ # Handling of input vector of arguments - var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Definition of number of variables n = len(var) # Handling of a input argument - metric tensor g check_metric_tensor(g) if isinstance(g, (Arraypy, TensorArray)): g = g.to_matrix() if not g.is_symmetric(): raise ValueError('The metric tensor must be symmetric.') # The definition of inverse matrix of the metric tensor g_inv = g.inv() # Handling of a input argument tensor Ricci - ricci if not isinstance(ricci, (Matrix, Arraypy, TensorArray)): raise TypeError( 'The type of tensor Ricci must be Matrix, TensorArray or Arraypy') else: if isinstance(ricci, (Arraypy, TensorArray)): if isinstance(ricci, TensorArray): if not ricci.type_pq == (0, 2): raise ValueError( 'The valence of tensor Ricci must be (-1,-1)') ricci = ricci.to_matrix() if not ricci.is_symmetric(): raise ValueError('The Ricci tensor must be symmetric.') if not (g.shape == ricci.shape): raise ValueError( 'The rank of the metric tensor does not coincide with the rank of tensor Ricci.') # The definition of diapason changes in an index [n1, n2] = g.shape if not n == n1: raise ValueError( 'The rank of the metric tensor does not coincide with the number of variables.') # Calculation indices = range(n) for i in indices: for j in indices: scal_curv = g_inv[i, j] * ricci[i, j] # Output return scal_curv def k_sigma(X, Y, R, g, var): """Return Sectional curvature of thу Riemannian space in the direction за two-dimensional area formed by vectors X, Y for the given metric tensor. Examples: ========= >>> from tensor_analysis.riemannian_geometry import k_sigma, riemann >>> from tensor_analysis.arraypy import Arraypy, TensorArray >>> from sympy import symbols, cos >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] X, Y it's a vector or a vector field. They can be a list, one-dimensional arraypy or tensor with valence of indices (+1): >>> X = [1, 2] >>> Y = [3, 4] g it's a metric tensor must be symmetric matrix, arraypy or tensor with valence indices (-1, -1): >>> A = Arraypy((2, 2)) >>> g = TensorArray(A,(-1, -1)) >>> g[0,0] = cos(x2)**2 >>> g[0,1] = 0 >>> g[1,0] = 0 >>> g[1,1] = 1 R it's a Riemann curvature tensor must be symmetric matrix, arraypy or tensor with valences indices (1, -1, -1, -1): >>> R = riemann(g, var) The sectional curvature: >>> k_sig = k_sigma(X, Y, R, g, var) >>> print(k_sig) 1 """ # Handling of input vector of arguments - var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Definition of number of variables n = len(var) # Handling of a input argument - metric tensor g check_metric_tensor(g) if isinstance(g, (Arraypy, TensorArray)): g = g.to_matrix() if not g.is_symmetric(): raise ValueError('The metric tensor must be symmetric.') # Handling of a input arguments - vector or vector fields X check_the_vector_field(X) if isinstance(X, (TensorArray, Arraypy)): X = X.to_list() # Handling of a input arguments - vector or vector fields Y check_the_vector_field(Y) if isinstance(Y, (TensorArray, Arraypy)): Y = Y.to_list() if not len(X) == len(Y): raise ValueError('The vectors must be identical length') elif len(X) != g.rows: raise ValueError( 'The vector fields and dimension of metric tensor must be identical length') # Handling of a input argument Riemann curvature tensor - R if not isinstance(R, (Matrix, Arraypy, TensorArray)): raise TypeError( 'The type of Riemann curvature tensor must be Matrix, Arraypy or TensorArray') else: if isinstance(R, (Arraypy, TensorArray)): if isinstance(R, TensorArray): if not R.type_pq == (1, 3): raise ValueError( 'The valence of Riemann curvature tensor must be (1, -1,- 1, -1)') if not (R.start_index[0] == R.start_index[1]): raise ValueError( 'The starting indices of Riemann curtivate tensor must be identical') idx_R = R.start_index[0] # The definition of diapason changes in an index [n1, n2] = g.shape if not n == n1: raise ValueError( 'The rank of the metric tensor does not coincide with the number of variables.') [n1, n2, n3, n4] = R.shape if not n == n1: raise ValueError( 'The rank of the Riemann curvature tensor does not concide with the number of variables.') indices = range(len(X)) # Calculation Sc_pr = scal_prod(X, X, g) * scal_prod(Y, Y, g) - scal_prod(X, Y, g)**2 if (Sc_pr == 0): raise ValueError('The two-dimensional area is a degenerate!') numerator = sum([g[r, s] * R[i + idx_R, j + idx_R, k + idx_R, r + idx_R] * X[i] * Y[j] * Y[k] * X[s] for i in indices for j in indices for k in indices for r in indices for s in indices]) k_sigma = simplify(numerator / Sc_pr) # Output return k_sigma def nabla(T, ch_2, var): """Return the covariant derivative the tensor field. Examples: ========= >>> from tensor_analysis.riemannian_geometry import nabla >>> from tensor_analysis.arraypy import Arraypy >>> from sympy import symbols, cos, sin >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] T it's a tensor field must be tensor: >>> T = Arraypy([2, 2, 0]).to_tensor((1, -1)) >>> T[0,0] = x2 >>> T[0,1] = -x2 >>> T[1,0] = -x1 >>> T[1,1] = x1 ch_2 it's a Christoffel symbol of second kind must be arraypy or tensor with valence indices (1, -1, -1): >>> ch_2 = Arraypy([3, 2, 0]).to_tensor((1, -1, -1)) >>> ch_2[0,0,0] = 0 >>> ch_2[0,0,1] = sin(x2)*cos(x2) >>> ch_2[0,1,1] = 0 >>> ch_2[1,1,1] = 0 >>> ch_2[1,0,1] = 0 >>> ch_2[1,1,0] = 0 >>> ch_2[1,0,0] = -sin(x2)*cos(x2) >>> ch_2[0,1,0] = -sin(x2)*cos(x2) The covariant derivative of tensor field: >>> nabla_t = nabla(T, ch_2, var) >>> print(nabla_t) -x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2) 0 x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2) x2*sin(x2)*cos(x2) - 1 -x1*sin(x2)*cos(x2) - x2*sin(x2)*cos(x2) -x1*sin(x2)*cos(x2) - 1 -x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2) 0 """ # Handling of a input argument - var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Handling of a input argument - Christoffel symbol of second kind check_the_christoffel_symbols_2(ch_2) idx_ch = ch_2.start_index[0] # Handling of a input argument - tensor field T if not isinstance(T, TensorArray): raise TypeError( 'The type of tensor field must be TensorArray') idx_start_T = T.start_index[0] if (idx_start_T != idx_ch): raise ValueError( 'The start index of the tensor field and Christoffel symbol \ of second kind must be equal') # The definition of diapason changes in an index # The number of upper indices p = T.type_pq[0] # The dimension of the input array n = T.shape[0] # The rank of the input array rank_T = len(T.shape) # The definition of the start index idx_char_T = T.ind_char idx_char_nabla_T = list(idx_char_T) + [-1] # upper_idx_numbers it is a list with the positions on which are the upper # indices upper_idx_numbers = [ k for k in range(len(idx_char_T)) if idx_char_T[k] == 1] # low_idx_numbers it is a list with the positions on which are the lower # indices low_idx_numbers = [ k for k in range(len(idx_char_T)) if idx_char_T[k] == -1] # Creating the output array in accordance with the start index nabla_T = Arraypy([rank_T + 1, n, idx_start_T]).to_tensor(idx_char_nabla_T) index_nabla_T = nabla_T.index_list # Calculation for index in index_nabla_T: index_T = list(index) del index_T[n] index_T = tuple(index_T) s = index[rank_T] dt = diff(T[index_T], var[index[s]]) k = idx_start_T nabla_T_up = 0 nabla_T_lo = 0 while k < n + idx_start_T: for i in upper_idx_numbers: index_T_ik = replace_index_to_k(index_T, i, k) nabla_T_up += T[index_T_ik] * ch_2[index_T[i], s, k] for j in low_idx_numbers: index_T_jk = replace_index_to_k(index_T, j, k) nabla_T_lo += T[index_T_jk] * ch_2[index_T[j], s, k] k = k + 1 nabla_T[index] = dt + nabla_T_up - nabla_T_lo # Output return nabla_T def nabla_x(T, ch_2, X, var): """Return the covariant derivative the tensor field along another vector field. Examples: ========= >>> from tensor_analysis.riemannian_geometry import nabla_x >>> from tensor_analysis.arraypy import Arraypy >>> from sympy import symbols, cos, sin >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] T it's a tensor field must be tensor: >>> T = Arraypy([2, 2, 0]).to_tensor((1, -1)) >>> T[0,0] = x2 >>> T[0,1] = -x2 >>> T[1,0] = -x1 >>> T[1,1] = x1 ch_2 it's a Christoffel symbol of second kind must be arraypy or tensor with valence indices (1, -1, -1): >>> ch_2 = Arraypy([3, 2, 0]).to_tensor((1, -1, -1)) >>> ch_2[0,0,0] = 0 >>> ch_2[0,0,1] = sin(x2)*cos(x2) >>> ch_2[0,1,1] = 0 >>> ch_2[1,1,1] = 0 >>> ch_2[1,0,1] = 0 >>> ch_2[1,1,0] = 0 >>> ch_2[1,0,0] = -sin(x2)*cos(x2) >>> ch_2[0,1,0] = -sin(x2)*cos(x2) X it's vector field can be a list, one-dimensional arraypy, or one-dimensional tensor with valences of indices (+1): >>> X = [x1 * x2**3, x1 - cos(x2)] The covariant derivative of tensor field along another vector field: >>> nabla_xt = nabla_x(T, ch_2, X, var) >>> print(nabla_xt) x1*x2**3*(-x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2)) x1*x2**3*(x1*sin(x2)*cos(x2) + \ x2*sin(x2)*cos(x2)) + (x1 - cos(x2))*(x2*sin(x2)*cos(x2) - 1) x1*x2**3*(-x1*sin(x2)*cos(x2) - x2*sin(x2)*cos(x2)) + \ (x1 - cos(x2))*(-x1*sin(x2)*cos(x2) - 1) x1*x2**3*(-x1*sin(x2)*cos(x2) + x2*sin(x2)*cos(x2)) """ # Handling of a input argument - var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Handling of a input argument - Christoffel symbol of second kind check_the_christoffel_symbols_2(ch_2) idx_ch = ch_2.start_index[0] # Handling of a input argument - vector field X check_the_vector_field(X) if isinstance(X, (Arraypy, TensorArray)): idx_X = X.start_index[0] elif isinstance(X, list): idx_X = 0 # Handling of a input argument - tensor field T if not isinstance(T, TensorArray): raise TypeError( 'The type of tensor field must be TensorArray') idx_start_T = T.start_index[0] if (idx_start_T != idx_ch != idx_X): raise ValueError( 'The start index of the tensor field and Christoffel symbol \ of second kind and vector field must be equal') # The definition of diapason changes in an index # The number of upper indices p = T.type_pq[0] # The dimension of the input array n = T.shape[0] # The rank of the input array rank_T = len(T.shape) # The definition of the start index idx_char_T = T.ind_char # Creating the output array in accordance with the start index nabla_TX = Arraypy([rank_T, n, idx_start_T]).to_tensor(idx_char_T) index_nabla_TX = nabla_TX.index_list nabla_T = nabla(T, ch_2, var) # Calculation for index in index_nabla_TX: k = idx_start_T while k < n + idx_start_T: idx_nabla_T = tuple(list(index) + [k]) nabla_TX[index] += nabla_T[idx_nabla_T] * X[k] k = k + 1 # Output return nabla_TX def delta(T, g, ch_2, var): """Return the covariant divergence of a tensor field T. Examples: ========= >>> from tensor_analysis.riemannian_geometry import delta >>> from tensor_analysis.arraypy import Arraypy >>> from sympy import symbols, cos, sin >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] T it's a tensor field must be tensor: >>> T = Arraypy([2, 2, 0]).to_tensor((1, -1)) >>> T[0,0] = x2 >>> T[0,1] = -x2 >>> T[1,0] = -x1 >>> T[1,1] = x1 ch_2 it's a Christoffel symbol of second kind must be arraypy or tensor with valence indices (1, -1, -1): >>> ch_2 = Arraypy([3, 2, 0]).to_tensor((1, -1, -1)) >>> ch_2[0,0,0] = 0 >>> ch_2[0,0,1] = sin(x2)*cos(x2) >>> ch_2[0,1,1] = 0 >>> ch_2[1,1,1] = 0 >>> ch_2[1,0,1] = 0 >>> ch_2[1,1,0] = 0 >>> ch_2[1,0,0] = -sin(x2)*cos(x2) >>> ch_2[0,1,0] = -sin(x2)*cos(x2) g it's a metric tensor must be symmetric matrix, arraypy or tensor with valence indices (-1, -1): >>> g = Arraypy((2, 2)).to_tensor((-1, -1)) >>> g[0,0] = cos(x2)**2 >>> g[0,1] = 0 >>> g[1,0] = 0 >>> g[1,1] = 1 The covariant divergence of a tensor field: >>> delta_T = delta(T, g, ch_2, var) >>> print(delta_T) x1*sin(x2)*cos(x2) + 1 0 """ # Handling of a input argument - var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Handling of a input argument - metric tensor g check_metric_tensor(g) if isinstance(g, (Arraypy, TensorArray)): g = g.to_matrix() if not g.is_symmetric(): raise ValueError('The metric tensor must be symmetric.') # Handling of a input argument - Christoffel symbol of second kind check_the_christoffel_symbols_2(ch_2) idx_ch = ch_2.start_index[0] # Handling of a input argument - tensor field T if not isinstance(T, TensorArray): raise TypeError( 'The type of vector field must be TensorArray') idx_start_T = T.start_index[0] # The definition of inverse matrix of the metric tensor g_inv = g.inv() # The definition of diapason changes in an index # The dimension of the input array n = T.shape[0] # The rank of the input array rank_T = len(T.shape) index_T = T.index_list idx_char_delta_T = [(-1) for i in range(rank_T - 1)] nabla_T = nabla(T, ch_2, var) # Creating the output array in accordance with the start index delta_T = Arraypy([rank_T - 1, n, idx_start_T]).to_tensor(idx_char_delta_T) # Calculation for index in index_T: k = idx_start_T while k < n + idx_start_T: for j in range(n): idx_nabla_T = tuple(list(index) + [k]) idx_delta_T = list(index) del idx_delta_T[0] idx_delta_T = tuple(idx_delta_T) delta_T[idx_delta_T] = (-1) * \ nabla_T[idx_nabla_T] * g_inv[k, j] k = k + 1 # Output return delta_T def riemann_li(C, g, var, type_output='t'): """Return the Riemann curvature tensor of type (1, -1, -1, -1) for the given left-invariant metric tensor. Examples: ========= >>> from tensor_analysis.riemannian_geometry import riemann_li >>> from tensor_analysis.arraypy import Arraypy >>> from sympy import symbols, cos, sin >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] C it's a structural constant must be tensor with valence indices (1,-1,-1): >>> C = Arraypy([3, 2, 0]).to_tensor((1, -1, -1)) >>> C[0,0,0] = 0 >>> C[0,0,1] = sin(x2)*cos(x2) >>> C[0,1,1] = 0 >>> C[1,1,1] = 0 >>> C[1,0,1] = 0 >>> C[1,1,0] = 0 >>> C[1,0,0] = -sin(x2)*cos(x2) >>> C[0,1,0] = -sin(x2)*cos(x2) g it's a left-invariant metric tensor must be symmetric matrix, arraypy or tensor with valence indices (-1, -1): >>> g = Arraypy((2, 2)).to_tensor((-1, -1)) >>> g[0,0] = cos(x2)**2 >>> g[0,1] = 0 >>> g[1,0] = 0 >>> g[1,1] = 1 type_output it's optional parameter function, indicating the type of calculation result and receiving the character or string value: - symbol 't' means that the type of the result will match TensorArray; - symbol 'a' means that the type of the result will be Arraypy; - default function takes a parameter 't', so that the result will be a TensorArray. The curvature tensor: >>> r_li = riemann_li(C, g, var, 'a') >>> print(r_li) -0.25*sin(x2)**2*cos(x2)**2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 """ # Handling of input vector arguments var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Definition of number of variables n = len(var) # Handling of a input argument - metric tensor g check_metric_tensor(g) if isinstance(g, (Arraypy, TensorArray)): if not (g.start_index[0] == g.start_index[1]): raise ValueError( 'The starting indices of metric tensor must be identical') idx_g = g.start_index[0] g_inv = (g.to_matrix()).inv() elif isinstance(g, Matrix): if not g.is_symmetric(): raise ValueError('The metric tensor must be symmetric.') idx_g = 0 g_inv = g.inv() # Handling of a input argument - structure constant if not isinstance(C, TensorArray): raise TypeError( 'The type of must be TensorArray') else: if isinstance(C, TensorArray): if not C.type_pq == (1, 2): raise ValueError( 'The valence or ind_char of must be (1,-1,-1)') idx_c = C.start_index[0] # The definition of diapason changes in an index [n1, n2] = g.shape if not n == n1: raise ValueError( 'The rank of the metric tensor does not coincide with the number of variables.') if (idx_g != idx_c): raise ValueError( 'The start index of the tensor field and Christoffel symbol \ of second kind must be equal') else: idx_start = idx_g indices = range(idx_start, idx_start + n) gamma = Arraypy([3, n, idx_start]) for p in indices: for i in indices: for j in indices: for s in indices: for k in indices: gamma[p, i, j] = 0.5 * (C[p, i, j] + g[j, s] * C[s, k, i] * g_inv[ k, p] + g[i, s] * C[s, k, j] * g_inv[k, p]) # Creating the output array in accordance with the start index R = Arraypy([4, n, idx_start]) # Calculation for s in indices: for i in indices: for j in indices: for k in indices: for p in indices: R[i, j, k, s] = gamma[s, i, p] * gamma[p, j, k] - gamma[s, j, p] * gamma[p, i, k] - \ gamma[s, p, k] * gamma[p, i, j] # Handling of an output array if type_output == str('t') or type_output == Symbol('t'): riemann = R.to_tensor((1, -1, -1, -1)) elif type_output == str('a') or type_output == Symbol('a'): riemann = R else: raise ValueError( "The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.") # Output return riemann def k_sigma_li(R, g, var): """Return Sectional curvature in the direction of coordinate areas. Examples: ========= >>> from tensor_analysis.riemannian_geometry import k_sigma_li, riemann_li >>> from tensor_analysis.arraypy import Arraypy, TensorArray >>> from sympy import symbols, cos, sin >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] g it's a metric tensor must be symmetric matrix, arraypy or tensor with valence indices (-1, -1): >>> g = Arraypy((2, 2)).to_tensor((-1, -1)) >>> g[0,0] = cos(x2)**2 >>> g[0,1] = 0 >>> g[1,0] = 0 >>> g[1,1] = 1 C it's a structural constant must be tensor with valence indices (1,-1,-1): >>> C = Arraypy([3, 2, 0]).to_tensor((1, -1, -1)) >>> C[0,0,0] = 0 >>> C[0,0,1] = sin(x2) >>> C[0,1,1] = cos(x2) >>> C[1,1,1] = cos(x2) >>> C[1,0,1] = cos(x2) >>> C[1,1,0] = 0 >>> C[1,0,0] = -sin(x2) >>> C[0,1,0] = -sin(x2) R it's a Riemann curvature tensor must be symmetric matrix, arraypy or tensor with valences indices (1, -1, -1, -1): >>> R = riemann_li(C, g, var, 't') The sectional curvature: >>> k_sig_li = k_sigma_li(R, g, var) >>> print(k_sig_li) Division by zero! """ # Handling of input vector arguments var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Definition of number of variables n = len(var) # Handling of a input argument - metric tensor g if isinstance(g, (Arraypy, TensorArray)): if not (g.start_index[0] == g.start_index[1]): raise ValueError( 'The starting indices of metric tensor must be identical') idx_start = g.start_index[0] elif isinstance(g, Matrix): if not g.is_symmetric(): raise ValueError('The metric tensor must be symmetric.') idx_start = 0 # Handling of a input argument Riemann curvature tensor - R if not isinstance(R, (Matrix, Arraypy, TensorArray)): raise TypeError( 'The type of Riemann curvature tensor must be Matrix, Arraypy or TensorArray') else: if isinstance(R, (Arraypy, TensorArray)): if isinstance(R, TensorArray): if not R.type_pq == (1, 3): raise ValueError( 'The valence or ind_char of Riemann curvature tensor must be (-1,-1,-1,+1)') if not (R.start_index[0] == R.start_index[1]): raise ValueError( 'The starting indices of Riemann curtivate tensor must be identical') idx_R = R.start_index[0] # The definition of diapason changes in an index [n1, n2] = g.shape if not n == n1: raise ValueError( 'The rank of the metric tensor does not coincide with the number of variables.') [n1, n2, n3, n4] = R.shape if not n == n1: raise ValueError( 'The rank of the Riemann curvature tensor does not concide with the number of variables.') indices = range(n) k_sig_li = Arraypy([2, n, idx_start]) # Calculation for i in indices: for j in indices: for k in indices: if (g[i, j] * g[j, j] - g[i, j]**2) == 0: raise ValueError('Division by zero!') else: k_sig_li = sum( (g[k, i] * R[k, i, j, j]) / (g[i, i] * g[j, j] - g[i, j]**2)) # Output return k_sig_li def kulkarni_nomizu(h, k, var, type_output='t'): """Return the product of Kulkarni-Nomizu of type (-1, -1, -1, -1) for the given two symmetric tensor. Examples: ========= >>> from tensor_analysis.riemannian_geometry import kulkarni_nomizu >>> from tensor_analysis.arraypy import Arraypy >>> from sympy import symbols, cos >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] h,k it's a tensor must be symmetric arraypy or tensor with valence indices (-1, -1): >>> h = Arraypy((2, 2)).to_tensor((-1, -1)) >>> h[0,0] = x1 >>> h[0,1] = 0 >>> h[1,0] = 0 >>> h[1,1] = x2 >>> k = Arraypy((2, 2)).to_tensor((-1, -1)) >>> k[0,0] = x2 >>> k[0,1] = 0 >>> k[1,0] = 0 >>> k[1,1] = x1 type_output it's optional parameter function, indicating the type of calculation result and receiving the character or string value: - symbol 't' means that the type of the result will match TensorArray; - symbol 'a' means that the type of the result will be Arraypy; - default function takes a parameter 't', so that the result will be a TensorArray. The curvature tensor: >>> k_n = kulkarni_nomizu(h, k, var, 'a') >>> print(k_n) 0 0 0 0 0 x1**2 + x2**2 -x1**2 - x2**2 0 0 -x1**2 - x2**2 x1**2 + x2**2 0 0 0 0 0 """ # Handling of input vector arguments var check_vector_of_arguments(var) if isinstance(var, (TensorArray, Arraypy)): var = var.to_list() # Handling of input symmetric tensor h if not isinstance(h, TensorArray): raise TypeError( 'The type of input tensor must be a TensorArray') if isinstance(h, TensorArray): if not h.type_pq == (0, 2): raise ValueError( 'The valence or ind_char of tensor must be (-1,-1)') if not (h.to_matrix()).is_symmetric(): raise ValueError('The tensor must be symmetric.') # Handling of input symmetric tensor k if not isinstance(k, TensorArray): raise TypeError( 'The type of input tensor must be a TensorArray') if isinstance(k, TensorArray): if not k.type_pq == (0, 2): raise ValueError( 'The valence or ind_char of tensor must be (-1,-1)') if not (k.to_matrix()).is_symmetric(): raise ValueError('The tensor must be symmetric.') if (h.start_index[0] != k.start_index[0]): raise ValueError( 'The start index of the tensors must be equal') else: idx_start = h.start_index[0] # Definition of number of variables n = len(var) kul_nom = Arraypy([4, n, idx_start]) indices = range(idx_start, idx_start + n) # Calculation for i in indices: for j in indices: for t in indices: for l in indices: kul_nom[i, j, t, l] = ( h[i, t] * k[j, l] - h[i, l] * k[j, t]) - (h[j, t] * k[i, l] - h[j, l] * k[i, t]) # Handling of an output array if type_output == str('t') or type_output == Symbol('t'): K = kul_nom.to_tensor((-1, -1, -1, -1)) elif type_output == str('a') or type_output == Symbol('a'): K = kul_nom else: raise ValueError( "The parameter of type output result must 'a' - Arraypy or 't' and None - TensorArray.") # Output return K def second_surf(surf, var, type_output='t'): """Return the second quadratic form. Examples: ========= >>> from sympy import symbols >>> from tensor_analysis.riemannian_geometry import second_surf >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] surf it's list of functions, must be consist of one or three functions. type_output it's optional parameter function, indicating the type of calculation result and receiving the character or string value: - symbol 't' means that the type of the result will match TensorArray; - symbol 'a' means that the type of the result will be Arraypy; - default function takes a parameter 't', so that the result will be a TensorArray. The the second quadratic form. >>> surf3 = [x1+x2, 2*x1**2-3*x2, (1+x2)*x1+x2-4] >>> print(second_surf(surf3, var, 't')) (-x1 + x2)/(3*x1) -(4*x1 + 3)/((x1 + 1)*(x2 + 1)) -(4*x1 + 3)/((x1 + 1)*(x2 + 1)) 0 >>> surf1 = [x1 + 4*x2**2] >>> print(second_surf(surf1, var, 't')) 0 0 0 8 """ # The definition symbols i, j, k i = Symbol('i') j = Symbol('j') k = Symbol('k') b = Arraypy((2, 2)) # Calculation if (len(surf) == 1): b[0, 0] = diff(diff(surf[0], var[0]), var[0]) b[0, 1] = b[1, 0] = diff((diff(surf[0], var[0])), var[1]) b[1, 1] = diff((diff(surf[0], var[1])), var[1]) elif (len(surf) == 3): # The first partial derivatives r_u = diff(surf[0], var[0]) * i + diff(surf[1], var[0]) * j +\ diff(surf[2], var[0]) * k r_v = diff(surf[0], var[1]) * i + diff(surf[1], var[1]) * j +\ diff(surf[2], var[1]) * k # The vector product vect_prod = (r_u.coeff(j) * r_v.coeff(k) - r_v.coeff(j) * r_u.coeff(k)) * i - \ (r_u.coeff(k) * r_v.coeff(i) - r_v.coeff(k) * r_u.coeff(i)) * j + \ (r_u.coeff(i) * r_v.coeff(j) - r_v.coeff(i) * r_u.coeff(j)) * k # The length of vector product len_r_uv = r_u.coeff(i) * r_v.coeff(i) * i + r_u.coeff(j) * r_v.coeff(j) * j + \ r_u.coeff(k) * r_v.coeff(k) * k if (len_r_uv == 0): raise ValueError('The two-dimensional area is a degenerate!') # The components of the normal vector n = (simplify(vect_prod.coeff(i) / len_r_uv.coeff(i)) * i + simplify(vect_prod.coeff(j) / len_r_uv.coeff(j)) * j + simplify(vect_prod.coeff(k) / len_r_uv.coeff(k)) * k) # The second partial derivatives r_uu = diff(r_u.coeff(i), var[0]) * i + diff(r_u.coeff(j), var[0]) * j + \ diff(r_u.coeff(k), var[0]) * k r_uv = diff(r_u.coeff(i), var[1]) * i + diff(r_u.coeff(j), var[1]) * j + \ diff(r_u.coeff(k), var[1]) * k r_vv = diff(r_v.coeff(i), var[1]) * i + diff(r_v.coeff(j), var[1]) * j + \ diff(r_v.coeff(k), var[1]) * k b[0, 0] = r_uu.coeff(i) * n.coeff(i) + r_uu.coeff(j) * n.coeff(j) + \ r_uu.coeff(k) * n.coeff(k) b[0, 1] = b[1, 0] = r_uv.coeff(i) * n.coeff(i) + r_uv.coeff(j) * n.coeff(j) + \ r_uv.coeff(k) * n.coeff(k) b[1, 1] = r_vv.coeff(i) * n.coeff(i) + r_vv.coeff(j) * n.coeff(j) + \ r_vv.coeff(k) * n.coeff(k) else: raise ValueError( "The argument surf must be consist one function or three functions") # Handling of an output array if type_output == str('t') or type_output == Symbol('t'): b = b.to_tensor((-1, -1)) elif type_output == str('a') or type_output == Symbol('a'): b = b elif type_output == str('m') or type_output == Symbol('m'): b = b.to_matrix() else: raise ValueError( "The parameter of type output result must 'a' - Arraypy or 'm' - Matrix\ 't' and None - TensorArray.") # Output return b def k_surf(surf, var): """Return the Gaussian curvature. Examples: ========= >>> from sympy import symbols >>> from tensor_analysis.riemannian_geometry import k_surf >>> x1, x2 = symbols('x1, x2') var it's a list of symbolic arguments. May be a list, one-dimensional arraypy or one-dimensional tensor with valence of indices (+1): >>> var = [x1, x2] surf it's list of functions, must be consist of one or three functions. The Gaussian curvature: >>> surf3 = [x1+x2, 2*x1**2-3*x2, (1+x2)*x1+x2-4] >>> print(k_surf(surf3, var)) -(4*x1 + 3)**2/((x1 + 1)**2*(x2 + 1)**2*(((x1 + 1)**2 + 10)* \ (16*x1**2 + (x2 + 1)**2 + 1) - (-12*x1 + (x1 + 1)*(x2 + 1) + 1)**2)) >>> surf1 = [x1 + 4*x2**2] >>> print(k_surf(surf1, var)) 0 """ # Calculation if (len(surf) == 1): K = diff(diff(surf[0], var[0]), var[0]) * diff(diff(surf[0], var[1]), var[1]) -\ (diff(diff(surf[0], var[0]), var[1]))**2 / \ (1 + diff(surf[0], var[0])**2 + diff(surf[0], var[1])**2)**2 elif (len(surf) == 3): g = Arraypy((2, 2)) g[0, 0] = diff(surf[0], var[0])**2 + \ diff(surf[1], var[0])**2 + diff(surf[2], var[0])**2 g[0, 1] = g[1, 0] = diff(surf[0], var[0]) * diff(surf[0], var[1]) + diff( surf[1], var[0]) * diff(surf[1], var[1]) + diff(surf[2], var[0]) * diff(surf[2], var[1]) g[1, 1] = diff(surf[0], var[1])**2 + \ diff(surf[1], var[1])**2 + diff(surf[2], var[1])**2 b = second_surf(surf3, var, 't') K = simplify( (b[0, 0] * b[1, 1] - b[0, 1]**2) / (g[0, 0] * g[1, 1] - g[0, 1]**2)) else: raise ValueError( "The argument surf must be consist one function or three functions") # Output return K
codeparrot/github-code-clean
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.dsdv', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<0> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['0']) ## int-to-type.h (module 'core'): ns3::IntToType<0>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<1> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['1']) ## int-to-type.h (module 'core'): ns3::IntToType<1>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<2> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['2']) ## int-to-type.h (module 'core'): ns3::IntToType<2>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<3> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['3']) ## int-to-type.h (module 'core'): ns3::IntToType<3>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<4> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['4']) ## int-to-type.h (module 'core'): ns3::IntToType<4>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<5> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['5']) ## int-to-type.h (module 'core'): ns3::IntToType<5>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<6> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['6']) ## int-to-type.h (module 'core'): ns3::IntToType<6>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'], import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class] module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration] module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper [class] module.add_class('Ipv4RoutingHelper', allow_subclassing=True, import_from_module='ns.internet') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simulator.h (module 'core'): ns3::Simulator [class] module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core') ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## timer.h (module 'core'): ns3::Timer [class] module.add_class('Timer', import_from_module='ns.core') ## timer.h (module 'core'): ns3::Timer::DestroyPolicy [enumeration] module.add_enum('DestroyPolicy', ['CANCEL_ON_DESTROY', 'REMOVE_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core') ## timer.h (module 'core'): ns3::Timer::State [enumeration] module.add_enum('State', ['RUNNING', 'EXPIRED', 'SUSPENDED'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core') ## timer-impl.h (module 'core'): ns3::TimerImpl [class] module.add_class('TimerImpl', allow_subclassing=True, import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## dsdv-helper.h (module 'dsdv'): ns3::DsdvHelper [class] module.add_class('DsdvHelper', parent=root_module['ns3::Ipv4RoutingHelper']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header [class] module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType [enumeration] module.add_enum('DscpType', ['DscpDefault', 'CS1', 'AF11', 'AF12', 'AF13', 'CS2', 'AF21', 'AF22', 'AF23', 'CS3', 'AF31', 'AF32', 'AF33', 'CS4', 'AF41', 'AF42', 'AF43', 'CS5', 'EF', 'CS6', 'CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType [enumeration] module.add_enum('EcnType', ['NotECT', 'ECT1', 'ECT0', 'CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## socket.h (module 'network'): ns3::Socket [class] module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object']) ## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration] module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::Socket::SocketType [enumeration] module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::SocketAddressTag [class] module.add_class('SocketAddressTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpTtlTag [class] module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class] module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## ipv4.h (module 'internet'): ns3::Ipv4 [class] module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-interface.h (module 'internet'): ns3::Ipv4Interface [class] module.add_class('Ipv4Interface', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol [class] module.add_class('Ipv4L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv4']) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::DropReason [enumeration] module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_BAD_CHECKSUM', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv4L3Protocol'], import_from_module='ns.internet') ## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol [class] module.add_class('Ipv4L4Protocol', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::RxStatus [enumeration] module.add_enum('RxStatus', ['RX_OK', 'RX_CSUM_FAILED', 'RX_ENDPOINT_CLOSED', 'RX_ENDPOINT_UNREACH'], outer_class=root_module['ns3::Ipv4L4Protocol'], import_from_module='ns.internet') ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute [class] module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) ## ipv4-route.h (module 'internet'): ns3::Ipv4Route [class] module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol [class] module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class] module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## nstime.h (module 'core'): ns3::TimeChecker [class] module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type='map') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace dsdv nested_module = module.add_cpp_namespace('dsdv') register_types_ns3_dsdv(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_dsdv(module): root_module = module.get_root() ## dsdv-rtable.h (module 'dsdv'): ns3::dsdv::RouteFlags [enumeration] module.add_enum('RouteFlags', ['VALID', 'INVALID']) ## dsdv-packet.h (module 'dsdv'): ns3::dsdv::DsdvHeader [class] module.add_class('DsdvHeader', parent=root_module['ns3::Header']) ## dsdv-packet-queue.h (module 'dsdv'): ns3::dsdv::PacketQueue [class] module.add_class('PacketQueue') ## dsdv-packet-queue.h (module 'dsdv'): ns3::dsdv::QueueEntry [class] module.add_class('QueueEntry') ## dsdv-routing-protocol.h (module 'dsdv'): ns3::dsdv::RoutingProtocol [class] module.add_class('RoutingProtocol', parent=root_module['ns3::Ipv4RoutingProtocol']) ## dsdv-rtable.h (module 'dsdv'): ns3::dsdv::RoutingTable [class] module.add_class('RoutingTable') ## dsdv-rtable.h (module 'dsdv'): ns3::dsdv::RoutingTableEntry [class] module.add_class('RoutingTableEntry') module.add_container('std::map< ns3::Ipv4Address, ns3::dsdv::RoutingTableEntry >', ('ns3::Ipv4Address', 'ns3::dsdv::RoutingTableEntry'), container_type='map') def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >']) register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >']) register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >']) register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >']) register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >']) register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >']) register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv4RoutingHelper_methods(root_module, root_module['ns3::Ipv4RoutingHelper']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3Timer_methods(root_module, root_module['ns3::Timer']) register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3DsdvHelper_methods(root_module, root_module['ns3::DsdvHelper']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Socket_methods(root_module, root_module['ns3::Socket']) register_Ns3SocketAddressTag_methods(root_module, root_module['ns3::SocketAddressTag']) register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag']) register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4Interface_methods(root_module, root_module['ns3::Ipv4Interface']) register_Ns3Ipv4L3Protocol_methods(root_module, root_module['ns3::Ipv4L3Protocol']) register_Ns3Ipv4L4Protocol_methods(root_module, root_module['ns3::Ipv4L4Protocol']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute']) register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route']) register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3DsdvDsdvHeader_methods(root_module, root_module['ns3::dsdv::DsdvHeader']) register_Ns3DsdvPacketQueue_methods(root_module, root_module['ns3::dsdv::PacketQueue']) register_Ns3DsdvQueueEntry_methods(root_module, root_module['ns3::dsdv::QueueEntry']) register_Ns3DsdvRoutingProtocol_methods(root_module, root_module['ns3::dsdv::RoutingProtocol']) register_Ns3DsdvRoutingTable_methods(root_module, root_module['ns3::dsdv::RoutingTable']) register_Ns3DsdvRoutingTableEntry_methods(root_module, root_module['ns3::dsdv::RoutingTableEntry']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'bool', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'bool', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function] cls.add_method('CreateFullCopy', 'ns3::Buffer', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function] cls.add_method('GetCurrentEndOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function] cls.add_method('GetCurrentStartOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3IntToType__0_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType(ns3::IntToType<0> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 0 > const &', 'arg0')]) return def register_Ns3IntToType__1_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType(ns3::IntToType<1> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 1 > const &', 'arg0')]) return def register_Ns3IntToType__2_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType(ns3::IntToType<2> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 2 > const &', 'arg0')]) return def register_Ns3IntToType__3_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType(ns3::IntToType<3> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 3 > const &', 'arg0')]) return def register_Ns3IntToType__4_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType(ns3::IntToType<4> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 4 > const &', 'arg0')]) return def register_Ns3IntToType__5_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType(ns3::IntToType<5> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 5 > const &', 'arg0')]) return def register_Ns3IntToType__6_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType(ns3::IntToType<6> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 6 > const &', 'arg0')]) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress() [constructor] cls.add_constructor([]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4Address local, ns3::Ipv4Mask mask) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'local'), param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4InterfaceAddress const & o) [copy constructor] cls.add_constructor([param('ns3::Ipv4InterfaceAddress const &', 'o')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetLocal() const [member function] cls.add_method('GetLocal', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Mask ns3::Ipv4InterfaceAddress::GetMask() const [member function] cls.add_method('GetMask', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e ns3::Ipv4InterfaceAddress::GetScope() const [member function] cls.add_method('GetScope', 'ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): bool ns3::Ipv4InterfaceAddress::IsSecondary() const [member function] cls.add_method('IsSecondary', 'bool', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetBroadcast(ns3::Ipv4Address broadcast) [member function] cls.add_method('SetBroadcast', 'void', [param('ns3::Ipv4Address', 'broadcast')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetLocal(ns3::Ipv4Address local) [member function] cls.add_method('SetLocal', 'void', [param('ns3::Ipv4Address', 'local')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetMask(ns3::Ipv4Mask mask) [member function] cls.add_method('SetMask', 'void', [param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetPrimary() [member function] cls.add_method('SetPrimary', 'void', []) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetScope(ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SetScope', 'void', [param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetSecondary() [member function] cls.add_method('SetSecondary', 'void', []) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv4RoutingHelper_methods(root_module, cls): ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper() [constructor] cls.add_constructor([]) ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper(ns3::Ipv4RoutingHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4RoutingHelper const &', 'arg0')]) ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper * ns3::Ipv4RoutingHelper::Copy() const [member function] cls.add_method('Copy', 'ns3::Ipv4RoutingHelper *', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-helper.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4RoutingHelper::Create(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-helper.h (module 'internet'): void ns3::Ipv4RoutingHelper::PrintRoutingTableAllAt(ns3::Time printTime, ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTableAllAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) ## ipv4-routing-helper.h (module 'internet'): void ns3::Ipv4RoutingHelper::PrintRoutingTableAllEvery(ns3::Time printInterval, ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTableAllEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) ## ipv4-routing-helper.h (module 'internet'): void ns3::Ipv4RoutingHelper::PrintRoutingTableAt(ns3::Time printTime, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTableAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) ## ipv4-routing-helper.h (module 'internet'): void ns3::Ipv4RoutingHelper::PrintRoutingTableEvery(ns3::Time printInterval, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTableEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([
codeparrot/github-code-clean
# encoding=UTF8 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the DB API.""" import copy import datetime import uuid as stdlib_uuid import iso8601 import mock import netaddr from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db.sqlalchemy import test_base from oslo_db.sqlalchemy import update_match from oslo_db.sqlalchemy import utils as sqlalchemyutils from oslo_serialization import jsonutils from oslo_utils import timeutils from oslo_utils import uuidutils import six from six.moves import range from sqlalchemy import Column from sqlalchemy.dialects import sqlite from sqlalchemy.exc import OperationalError from sqlalchemy.exc import SQLAlchemyError from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy.orm import query from sqlalchemy import sql from sqlalchemy import Table from nova import block_device from nova.compute import arch from nova.compute import task_states from nova.compute import vm_states from nova import context from nova import db from nova.db.sqlalchemy import api as sqlalchemy_api from nova.db.sqlalchemy import models from nova.db.sqlalchemy import types as col_types from nova.db.sqlalchemy import utils as db_utils from nova import exception from nova import objects from nova.objects import fields from nova import quota from nova import test from nova.tests.unit import matchers from nova import utils CONF = cfg.CONF CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker') CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker') get_engine = sqlalchemy_api.get_engine get_session = sqlalchemy_api.get_session def _reservation_get(context, uuid): result = sqlalchemy_api.model_query(context, models.Reservation, read_deleted="no").filter_by(uuid=uuid).first() if not result: raise exception.ReservationNotFound(uuid=uuid) return result def _quota_reserve(context, project_id, user_id): """Create sample Quota, QuotaUsage and Reservation objects. There is no method db.quota_usage_create(), so we have to use db.quota_reserve() for creating QuotaUsage objects. Returns reservations uuids. """ def get_sync(resource, usage): def sync(elevated, project_id, user_id, session): return {resource: usage} return sync quotas = {} user_quotas = {} resources = {} deltas = {} for i in range(3): resource = 'resource%d' % i if i == 2: # test for project level resources resource = 'fixed_ips' quotas[resource] = db.quota_create(context, project_id, resource, i + 2).hard_limit user_quotas[resource] = quotas[resource] else: quotas[resource] = db.quota_create(context, project_id, resource, i + 1).hard_limit user_quotas[resource] = db.quota_create(context, project_id, resource, i + 1, user_id=user_id).hard_limit sync_name = '_sync_%s' % resource resources[resource] = quota.ReservableResource( resource, sync_name, 'quota_res_%d' % i) deltas[resource] = i setattr(sqlalchemy_api, sync_name, get_sync(resource, i)) sqlalchemy_api.QUOTA_SYNC_FUNCTIONS[sync_name] = getattr( sqlalchemy_api, sync_name) return db.quota_reserve(context, resources, quotas, user_quotas, deltas, timeutils.utcnow(), CONF.until_refresh, datetime.timedelta(days=1), project_id, user_id) class DbTestCase(test.TestCase): def setUp(self): super(DbTestCase, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) def create_instance_with_args(self, **kwargs): args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1', 'node': 'node1', 'project_id': self.project_id, 'vm_state': 'fake'} if 'context' in kwargs: ctxt = kwargs.pop('context') args['project_id'] = ctxt.project_id else: ctxt = self.context args.update(kwargs) return db.instance_create(ctxt, args) def fake_metadata(self, content): meta = {} for i in range(0, 10): meta["foo%i" % i] = "this is %s item %i" % (content, i) return meta def create_metadata_for_instance(self, instance_uuid): meta = self.fake_metadata('metadata') db.instance_metadata_update(self.context, instance_uuid, meta, False) sys_meta = self.fake_metadata('system_metadata') db.instance_system_metadata_update(self.context, instance_uuid, sys_meta, False) return meta, sys_meta class DecoratorTestCase(test.TestCase): def _test_decorator_wraps_helper(self, decorator): def test_func(): """Test docstring.""" decorated_func = decorator(test_func) self.assertEqual(test_func.__name__, decorated_func.__name__) self.assertEqual(test_func.__doc__, decorated_func.__doc__) self.assertEqual(test_func.__module__, decorated_func.__module__) def test_require_context_decorator_wraps_functions_properly(self): self._test_decorator_wraps_helper(sqlalchemy_api.require_context) def test_require_deadlock_retry_wraps_functions_properly(self): self._test_decorator_wraps_helper( oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)) def _get_fake_aggr_values(): return {'name': 'fake_aggregate'} def _get_fake_aggr_metadata(): return {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2', 'availability_zone': 'fake_avail_zone'} def _get_fake_aggr_hosts(): return ['foo.openstack.org'] def _create_aggregate(context=context.get_admin_context(), values=_get_fake_aggr_values(), metadata=_get_fake_aggr_metadata()): return db.aggregate_create(context, values, metadata) def _create_aggregate_with_hosts(context=context.get_admin_context(), values=_get_fake_aggr_values(), metadata=_get_fake_aggr_metadata(), hosts=_get_fake_aggr_hosts()): result = _create_aggregate(context=context, values=values, metadata=metadata) for host in hosts: db.aggregate_host_add(context, result['id'], host) return result @mock.patch.object(sqlalchemy_api, '_get_regexp_op_for_connection', return_value='LIKE') class UnsupportedDbRegexpTestCase(DbTestCase): def test_instance_get_all_by_filters_paginate(self, mock_get_regexp): test1 = self.create_instance_with_args(display_name='test1') test2 = self.create_instance_with_args(display_name='test2') test3 = self.create_instance_with_args(display_name='test3') result = db.instance_get_all_by_filters(self.context, {'display_name': '%test%'}, marker=None) self.assertEqual(3, len(result)) result = db.instance_get_all_by_filters(self.context, {'display_name': '%test%'}, sort_dir="asc", marker=test1['uuid']) self.assertEqual(2, len(result)) result = db.instance_get_all_by_filters(self.context, {'display_name': '%test%'}, sort_dir="asc", marker=test2['uuid']) self.assertEqual(1, len(result)) result = db.instance_get_all_by_filters(self.context, {'display_name': '%test%'}, sort_dir="asc", marker=test3['uuid']) self.assertEqual(0, len(result)) self.assertRaises(exception.MarkerNotFound, db.instance_get_all_by_filters, self.context, {'display_name': '%test%'}, marker=str(stdlib_uuid.uuid4())) def _assert_equals_inst_order(self, correct_order, filters, sort_keys=None, sort_dirs=None, limit=None, marker=None, match_keys=['uuid', 'vm_state', 'display_name', 'id']): '''Retrieves instances based on the given filters and sorting information and verifies that the instances are returned in the correct sorted order by ensuring that the supplied keys match. ''' result = db.instance_get_all_by_filters_sort( self.context, filters, limit=limit, marker=marker, sort_keys=sort_keys, sort_dirs=sort_dirs) self.assertEqual(len(correct_order), len(result)) for inst1, inst2 in zip(result, correct_order): for key in match_keys: self.assertEqual(inst1.get(key), inst2.get(key)) return result def test_instance_get_all_by_filters_sort_keys(self, mock_get_regexp): '''Verifies sort order and direction for multiple instances.''' # Instances that will reply to the query test1_active = self.create_instance_with_args( display_name='test1', vm_state=vm_states.ACTIVE) test1_error = self.create_instance_with_args( display_name='test1', vm_state=vm_states.ERROR) test1_error2 = self.create_instance_with_args( display_name='test1', vm_state=vm_states.ERROR) test2_active = self.create_instance_with_args( display_name='test2', vm_state=vm_states.ACTIVE) test2_error = self.create_instance_with_args( display_name='test2', vm_state=vm_states.ERROR) test2_error2 = self.create_instance_with_args( display_name='test2', vm_state=vm_states.ERROR) # Other instances in the DB, will not match name filter other_error = self.create_instance_with_args( display_name='other', vm_state=vm_states.ERROR) other_active = self.create_instance_with_args( display_name='other', vm_state=vm_states.ACTIVE) filters = {'display_name': '%test%'} # Verify different sort key/direction combinations sort_keys = ['display_name', 'vm_state', 'created_at'] sort_dirs = ['asc', 'asc', 'asc'] correct_order = [test1_active, test1_error, test1_error2, test2_active, test2_error, test2_error2] self._assert_equals_inst_order(correct_order, filters, sort_keys=sort_keys, sort_dirs=sort_dirs) sort_dirs = ['asc', 'desc', 'asc'] correct_order = [test1_error, test1_error2, test1_active, test2_error, test2_error2, test2_active] self._assert_equals_inst_order(correct_order, filters, sort_keys=sort_keys, sort_dirs=sort_dirs) sort_dirs = ['desc', 'desc', 'asc'] correct_order = [test2_error, test2_error2, test2_active, test1_error, test1_error2, test1_active] self._assert_equals_inst_order(correct_order, filters, sort_keys=sort_keys, sort_dirs=sort_dirs) # created_at is added by default if not supplied, descending order sort_keys = ['display_name', 'vm_state'] sort_dirs = ['desc', 'desc'] correct_order = [test2_error2, test2_error, test2_active, test1_error2, test1_error, test1_active] self._assert_equals_inst_order(correct_order, filters, sort_keys=sort_keys, sort_dirs=sort_dirs) # Now created_at should be in ascending order (defaults to the first # sort dir direction) sort_dirs = ['asc', 'asc'] correct_order = [test1_active, test1_error, test1_error2, test2_active, test2_error, test2_error2] self._assert_equals_inst_order(correct_order, filters, sort_keys=sort_keys, sort_dirs=sort_dirs) # Remove name filter, get all instances correct_order = [other_active, other_error, test1_active, test1_error, test1_error2, test2_active, test2_error, test2_error2] self._assert_equals_inst_order(correct_order, {}, sort_keys=sort_keys, sort_dirs=sort_dirs) # Default sorting, 'created_at' then 'id' in desc order correct_order = [other_active, other_error, test2_error2, test2_error, test2_active, test1_error2, test1_error, test1_active] self._assert_equals_inst_order(correct_order, {}) def test_instance_get_all_by_filters_sort_keys_paginate(self, mock_get_regexp): '''Verifies sort order with pagination.''' # Instances that will reply to the query test1_active = self.create_instance_with_args( display_name='test1', vm_state=vm_states.ACTIVE) test1_error = self.create_instance_with_args( display_name='test1', vm_state=vm_states.ERROR) test1_error2 = self.create_instance_with_args( display_name='test1', vm_state=vm_states.ERROR) test2_active = self.create_instance_with_args( display_name='test2', vm_state=vm_states.ACTIVE) test2_error = self.create_instance_with_args( display_name='test2', vm_state=vm_states.ERROR) test2_error2 = self.create_instance_with_args( display_name='test2', vm_state=vm_states.ERROR) # Other instances in the DB, will not match name filter self.create_instance_with_args(display_name='other') self.create_instance_with_args(display_name='other') filters = {'display_name': '%test%'} # Common sort information for every query sort_keys = ['display_name', 'vm_state', 'created_at'] sort_dirs = ['asc', 'desc', 'asc'] # Overall correct instance order based on the sort keys correct_order = [test1_error, test1_error2, test1_active, test2_error, test2_error2, test2_active] # Limits of 1, 2, and 3, verify that the instances returned are in the # correct sorted order, update the marker to get the next correct page for limit in range(1, 4): marker = None # Include the maximum number of instances (ie, 6) to ensure that # the last query (with marker pointing to the last instance) # returns 0 servers for i in range(0, 7, limit): if i == len(correct_order): correct = [] else: correct = correct_order[i:i + limit] insts = self._assert_equals_inst_order( correct, filters, sort_keys=sort_keys, sort_dirs=sort_dirs, limit=limit, marker=marker) if correct: marker = insts[-1]['uuid'] self.assertEqual(correct[-1]['uuid'], marker) def test_instance_get_deleted_by_filters_sort_keys_paginate(self, mock_get_regexp): '''Verifies sort order with pagination for deleted instances.''' ctxt = context.get_admin_context() # Instances that will reply to the query test1_active = self.create_instance_with_args( display_name='test1', vm_state=vm_states.ACTIVE) db.instance_destroy(ctxt, test1_active['uuid']) test1_error = self.create_instance_with_args( display_name='test1', vm_state=vm_states.ERROR) db.instance_destroy(ctxt, test1_error['uuid']) test1_error2 = self.create_instance_with_args( display_name='test1', vm_state=vm_states.ERROR) db.instance_destroy(ctxt, test1_error2['uuid']) test2_active = self.create_instance_with_args( display_name='test2', vm_state=vm_states.ACTIVE) db.instance_destroy(ctxt, test2_active['uuid']) test2_error = self.create_instance_with_args( display_name='test2', vm_state=vm_states.ERROR) db.instance_destroy(ctxt, test2_error['uuid']) test2_error2 = self.create_instance_with_args( display_name='test2', vm_state=vm_states.ERROR) db.instance_destroy(ctxt, test2_error2['uuid']) # Other instances in the DB, will not match name filter self.create_instance_with_args(display_name='other') self.create_instance_with_args(display_name='other') filters = {'display_name': '%test%', 'deleted': True} # Common sort information for every query sort_keys = ['display_name', 'vm_state', 'created_at'] sort_dirs = ['asc', 'desc', 'asc'] # Overall correct instance order based on the sort keys correct_order = [test1_error, test1_error2, test1_active, test2_error, test2_error2, test2_active] # Limits of 1, 2, and 3, verify that the instances returned are in the # correct sorted order, update the marker to get the next correct page for limit in range(1, 4): marker = None # Include the maximum number of instances (ie, 6) to ensure that # the last query (with marker pointing to the last instance) # returns 0 servers for i in range(0, 7, limit): if i == len(correct_order): correct = [] else: correct = correct_order[i:i + limit] insts = self._assert_equals_inst_order( correct, filters, sort_keys=sort_keys, sort_dirs=sort_dirs, limit=limit, marker=marker) if correct: marker = insts[-1]['uuid'] self.assertEqual(correct[-1]['uuid'], marker) class ModelQueryTestCase(DbTestCase): def test_model_query_invalid_arguments(self): # read_deleted shouldn't accept invalid values self.assertRaises(ValueError, sqlalchemy_api.model_query, self.context, models.Instance, read_deleted=False) self.assertRaises(ValueError, sqlalchemy_api.model_query, self.context, models.Instance, read_deleted="foo") # Check model is a valid model self.assertRaises(TypeError, sqlalchemy_api.model_query, self.context, "") @mock.patch.object(sqlalchemy_api, 'get_session') def test_model_query_use_slave_false(self, mock_get_session): sqlalchemy_api.model_query(self.context, models.Instance, use_slave=False) mock_get_session.assert_called_once_with(use_slave=False) @mock.patch.object(sqlalchemy_api, 'get_session') def test_model_query_use_slave_no_slave_connection(self, mock_get_session): self.flags(slave_connection='', group='database') sqlalchemy_api.model_query(self.context, models.Instance, use_slave=True) mock_get_session.assert_called_once_with(use_slave=False) @mock.patch.object(sqlalchemy_api, 'get_session') def test_model_query_use_slave_true(self, mock_get_session): self.flags(slave_connection='foo://bar', group='database') sqlalchemy_api.model_query(self.context, models.Instance, use_slave=True) mock_get_session.assert_called_once_with(use_slave=True) @mock.patch.object(sqlalchemy_api, 'get_session') def test_model_query_lazy_session_default(self, mock_get_session): sqlalchemy_api.model_query(self.context, models.Instance, session=mock.MagicMock()) self.assertFalse(mock_get_session.called) @mock.patch.object(sqlalchemy_api, 'get_session') @mock.patch.object(sqlalchemyutils, 'model_query') def test_model_query_use_context_session(self, mock_model_query, mock_get_session): @sqlalchemy_api.main_context_manager.reader def fake_method(context): session = context.session sqlalchemy_api.model_query(context, models.Instance) return session session = fake_method(self.context) self.assertFalse(mock_get_session.called) mock_model_query.assert_called_once_with(models.Instance, session, None, deleted=False) class EngineFacadeTestCase(DbTestCase): @mock.patch.object(sqlalchemy_api, 'get_session') def test_use_single_context_session_writer(self, mock_get_session): # Checks that session in context would not be overwritten by # annotation @sqlalchemy_api.main_context_manager.writer if annotation # is used twice. @sqlalchemy_api.main_context_manager.writer def fake_parent_method(context): session = context.session return fake_child_method(context), session @sqlalchemy_api.main_context_manager.writer def fake_child_method(context): session = context.session sqlalchemy_api.model_query(context, models.Instance) return session parent_session, child_session = fake_parent_method(self.context) self.assertFalse(mock_get_session.called) self.assertEqual(parent_session, child_session) @mock.patch.object(sqlalchemy_api, 'get_session') def test_use_single_context_session_reader(self, mock_get_session): # Checks that session in context would not be overwritten by # annotation @sqlalchemy_api.main_context_manager.reader if annotation # is used twice. @sqlalchemy_api.main_context_manager.reader def fake_parent_method(context): session = context.session return fake_child_method(context), session @sqlalchemy_api.main_context_manager.reader def fake_child_method(context): session = context.session sqlalchemy_api.model_query(context, models.Instance) return session parent_session, child_session = fake_parent_method(self.context) self.assertFalse(mock_get_session.called) self.assertEqual(parent_session, child_session) class AggregateDBApiTestCase(test.TestCase): def setUp(self): super(AggregateDBApiTestCase, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) def test_aggregate_create_no_metadata(self): result = _create_aggregate(metadata=None) self.assertEqual(result['name'], 'fake_aggregate') def test_aggregate_create_avoid_name_conflict(self): r1 = _create_aggregate(metadata=None) db.aggregate_delete(context.get_admin_context(), r1['id']) values = {'name': r1['name']} metadata = {'availability_zone': 'new_zone'} r2 = _create_aggregate(values=values, metadata=metadata) self.assertEqual(r2['name'], values['name']) self.assertEqual(r2['availability_zone'], metadata['availability_zone']) def test_aggregate_create_raise_exist_exc(self): _create_aggregate(metadata=None) self.assertRaises(exception.AggregateNameExists, _create_aggregate, metadata=None) def test_aggregate_get_raise_not_found(self): ctxt = context.get_admin_context() # this does not exist! aggregate_id = 1 self.assertRaises(exception.AggregateNotFound, db.aggregate_get, ctxt, aggregate_id) def test_aggregate_metadata_get_raise_not_found(self): ctxt = context.get_admin_context() # this does not exist! aggregate_id = 1 self.assertRaises(exception.AggregateNotFound, db.aggregate_metadata_get, ctxt, aggregate_id) def test_aggregate_create_with_metadata(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt) expected_metadata = db.aggregate_metadata_get(ctxt, result['id']) self.assertThat(expected_metadata, matchers.DictMatches(_get_fake_aggr_metadata())) def test_aggregate_create_delete_create_with_metadata(self): # test for bug 1052479 ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt) expected_metadata = db.aggregate_metadata_get(ctxt, result['id']) self.assertThat(expected_metadata, matchers.DictMatches(_get_fake_aggr_metadata())) db.aggregate_delete(ctxt, result['id']) result = _create_aggregate(metadata={'availability_zone': 'fake_avail_zone'}) expected_metadata = db.aggregate_metadata_get(ctxt, result['id']) self.assertEqual(expected_metadata, {'availability_zone': 'fake_avail_zone'}) def test_aggregate_get(self): ctxt = context.get_admin_context() result = _create_aggregate_with_hosts(context=ctxt) expected = db.aggregate_get(ctxt, result['id']) self.assertEqual(_get_fake_aggr_hosts(), expected['hosts']) self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails']) def test_aggregate_get_by_host(self): ctxt = context.get_admin_context() values2 = {'name': 'fake_aggregate2'} values3 = {'name': 'fake_aggregate3'} values4 = {'name': 'fake_aggregate4'} values5 = {'name': 'fake_aggregate5'} a1 = _create_aggregate_with_hosts(context=ctxt) a2 = _create_aggregate_with_hosts(context=ctxt, values=values2) # a3 has no hosts and should not be in the results. _create_aggregate(context=ctxt, values=values3) # a4 has no matching hosts. _create_aggregate_with_hosts(context=ctxt, values=values4, hosts=['foo4.openstack.org']) # a5 has no matching hosts after deleting the only matching host. a5 = _create_aggregate_with_hosts(context=ctxt, values=values5, hosts=['foo5.openstack.org', 'foo.openstack.org']) db.aggregate_host_delete(ctxt, a5['id'], 'foo.openstack.org') r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org') self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1]) def test_aggregate_get_by_host_with_key(self): ctxt = context.get_admin_context() values2 = {'name': 'fake_aggregate2'} values3 = {'name': 'fake_aggregate3'} values4 = {'name': 'fake_aggregate4'} a1 = _create_aggregate_with_hosts(context=ctxt, metadata={'goodkey': 'good'}) _create_aggregate_with_hosts(context=ctxt, values=values2) _create_aggregate(context=ctxt, values=values3) _create_aggregate_with_hosts(context=ctxt, values=values4, hosts=['foo4.openstack.org'], metadata={'goodkey': 'bad'}) # filter result by key r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey') self.assertEqual([a1['id']], [x['id'] for x in r1]) def test_aggregate_metadata_get_by_host(self): ctxt = context.get_admin_context() values = {'name': 'fake_aggregate2'} values2 = {'name': 'fake_aggregate3'} _create_aggregate_with_hosts(context=ctxt) _create_aggregate_with_hosts(context=ctxt, values=values) _create_aggregate_with_hosts(context=ctxt, values=values2, hosts=['bar.openstack.org'], metadata={'badkey': 'bad'}) r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org') self.assertEqual(r1['fake_key1'], set(['fake_value1'])) self.assertNotIn('badkey', r1) def test_aggregate_metadata_get_by_host_with_key(self): ctxt = context.get_admin_context() values2 = {'name': 'fake_aggregate12'} values3 = {'name': 'fake_aggregate23'} a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org'] a2_metadata = {'good': 'value12', 'bad': 'badvalue12'} a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org'] a3_metadata = {'good': 'value23', 'bad': 'badvalue23'} _create_aggregate_with_hosts(context=ctxt) _create_aggregate_with_hosts(context=ctxt, values=values2, hosts=a2_hosts, metadata=a2_metadata) a3 = _create_aggregate_with_hosts(context=ctxt, values=values3, hosts=a3_hosts, metadata=a3_metadata) r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo2.openstack.org', key='good') self.assertEqual(r1['good'], set(['value12', 'value23'])) self.assertNotIn('fake_key1', r1) self.assertNotIn('bad', r1) # Delete metadata db.aggregate_metadata_delete(ctxt, a3['id'], 'good') r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo3.openstack.org', key='good') self.assertNotIn('good', r2) def test_aggregate_get_by_host_not_found(self): ctxt = context.get_admin_context() _create_aggregate_with_hosts(context=ctxt) self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host')) def test_aggregate_delete_raise_not_found(self): ctxt = context.get_admin_context() # this does not exist! aggregate_id = 1 self.assertRaises(exception.AggregateNotFound, db.aggregate_delete, ctxt, aggregate_id) def test_aggregate_delete(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt, metadata=None) db.aggregate_delete(ctxt, result['id']) expected = db.aggregate_get_all(ctxt) self.assertEqual(0, len(expected)) aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'), result['id']) self.assertEqual(aggregate['deleted'], result['id']) def test_aggregate_update(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt, metadata={'availability_zone': 'fake_avail_zone'}) self.assertEqual(result['availability_zone'], 'fake_avail_zone') new_values = _get_fake_aggr_values() new_values['availability_zone'] = 'different_avail_zone' updated = db.aggregate_update(ctxt, result['id'], new_values) self.assertNotEqual(result['availability_zone'], updated['availability_zone']) def test_aggregate_update_with_metadata(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt, metadata=None) values = _get_fake_aggr_values() values['metadata'] = _get_fake_aggr_metadata() values['availability_zone'] = 'different_avail_zone' expected_metadata = copy.deepcopy(values['metadata']) expected_metadata['availability_zone'] = values['availability_zone'] db.aggregate_update(ctxt, result['id'], values) metadata = db.aggregate_metadata_get(ctxt, result['id']) updated = db.aggregate_get(ctxt, result['id']) self.assertThat(metadata, matchers.DictMatches(expected_metadata)) self.assertNotEqual(result['availability_zone'], updated['availability_zone']) def test_aggregate_update_with_existing_metadata(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt) values = _get_fake_aggr_values() values['metadata'] = _get_fake_aggr_metadata() values['metadata']['fake_key1'] = 'foo' expected_metadata = copy.deepcopy(values['metadata']) db.aggregate_update(ctxt, result['id'], values) metadata = db.aggregate_metadata_get(ctxt, result['id']) self.assertThat(metadata, matchers.DictMatches(expected_metadata)) def test_aggregate_update_zone_with_existing_metadata(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt) new_zone = {'availability_zone': 'fake_avail_zone_2'} metadata = _get_fake_aggr_metadata() metadata.update(new_zone) db.aggregate_update(ctxt, result['id'], new_zone) expected = db.aggregate_metadata_get(ctxt, result['id']) self.assertThat(metadata, matchers.DictMatches(expected)) def test_aggregate_update_raise_not_found(self): ctxt = context.get_admin_context() # this does not exist! aggregate_id = 1 new_values = _get_fake_aggr_values() self.assertRaises(exception.AggregateNotFound, db.aggregate_update, ctxt, aggregate_id, new_values) def test_aggregate_update_raise_name_exist(self): ctxt = context.get_admin_context() _create_aggregate(context=ctxt, values={'name': 'test1'}, metadata={'availability_zone': 'fake_avail_zone'}) _create_aggregate(context=ctxt, values={'name': 'test2'}, metadata={'availability_zone': 'fake_avail_zone'}) aggregate_id = 1 new_values = {'name': 'test2'} self.assertRaises(exception.AggregateNameExists, db.aggregate_update, ctxt, aggregate_id, new_values) def test_aggregate_get_all(self): ctxt = context.get_admin_context() counter = 3 for c in range(counter): _create_aggregate(context=ctxt, values={'name': 'fake_aggregate_%d' % c}, metadata=None) results = db.aggregate_get_all(ctxt) self.assertEqual(len(results), counter) def test_aggregate_get_all_non_deleted(self): ctxt = context.get_admin_context() add_counter = 5 remove_counter = 2 aggregates = [] for c in range(1, add_counter): values = {'name': 'fake_aggregate_%d' % c} aggregates.append(_create_aggregate(context=ctxt, values=values, metadata=None)) for c in range(1, remove_counter): db.aggregate_delete(ctxt, aggregates[c - 1]['id']) results = db.aggregate_get_all(ctxt) self.assertEqual(len(results), add_counter - remove_counter) def test_aggregate_metadata_add(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt, metadata=None) metadata = _get_fake_aggr_metadata() db.aggregate_metadata_add(ctxt, result['id'], metadata) expected = db.aggregate_metadata_get(ctxt, result['id']) self.assertThat(metadata, matchers.DictMatches(expected)) def test_aggregate_metadata_add_empty_metadata(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt, metadata=None) metadata = {} db.aggregate_metadata_add(ctxt, result['id'], metadata) expected = db.aggregate_metadata_get(ctxt, result['id']) self.assertThat(metadata, matchers.DictMatches(expected)) def test_aggregate_metadata_add_and_update(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt) metadata = _get_fake_aggr_metadata() key = list(metadata.keys())[0] new_metadata = {key: 'foo', 'fake_new_key': 'fake_new_value'} metadata.update(new_metadata) db.aggregate_metadata_add(ctxt, result['id'], new_metadata) expected = db.aggregate_metadata_get(ctxt, result['id']) self.assertThat(metadata, matchers.DictMatches(expected)) def test_aggregate_metadata_add_retry(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt, metadata=None) def counted(): def get_query(context, id, session, read_deleted): get_query.counter += 1 raise db_exc.DBDuplicateEntry get_query.counter = 0 return get_query get_query = counted() self.stubs.Set(sqlalchemy_api, '_aggregate_metadata_get_query', get_query) self.assertRaises(db_exc.DBDuplicateEntry, sqlalchemy_api. aggregate_metadata_add, ctxt, result['id'], {}, max_retries=5) self.assertEqual(get_query.counter, 5) def test_aggregate_metadata_update(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt) metadata = _get_fake_aggr_metadata() key = list(metadata.keys())[0] db.aggregate_metadata_delete(ctxt, result['id'], key) new_metadata = {key: 'foo'} db.aggregate_metadata_add(ctxt, result['id'], new_metadata) expected = db.aggregate_metadata_get(ctxt, result['id']) metadata[key] = 'foo' self.assertThat(metadata, matchers.DictMatches(expected)) def test_aggregate_metadata_delete(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt, metadata=None) metadata = _get_fake_aggr_metadata() db.aggregate_metadata_add(ctxt, result['id'], metadata) db.aggregate_metadata_delete(ctxt, result['id'], list(metadata.keys())[0]) expected = db.aggregate_metadata_get(ctxt, result['id']) del metadata[list(metadata.keys())[0]] self.assertThat(metadata, matchers.DictMatches(expected)) def test_aggregate_remove_availability_zone(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt, metadata={'availability_zone': 'fake_avail_zone'}) db.aggregate_metadata_delete(ctxt, result['id'], 'availability_zone') expected = db.aggregate_metadata_get(ctxt, result['id']) aggregate = db.aggregate_get(ctxt, result['id']) self.assertIsNone(aggregate['availability_zone']) self.assertThat({}, matchers.DictMatches(expected)) def test_aggregate_metadata_delete_raise_not_found(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt) self.assertRaises(exception.AggregateMetadataNotFound, db.aggregate_metadata_delete, ctxt, result['id'], 'foo_key') def test_aggregate_host_add(self): ctxt = context.get_admin_context() result = _create_aggregate_with_hosts(context=ctxt, metadata=None) expected = db.aggregate_host_get_all(ctxt, result['id']) self.assertEqual(_get_fake_aggr_hosts(), expected) def test_aggregate_host_re_add(self): ctxt = context.get_admin_context() result = _create_aggregate_with_hosts(context=ctxt, metadata=None) host = _get_fake_aggr_hosts()[0] db.aggregate_host_delete(ctxt, result['id'], host) db.aggregate_host_add(ctxt, result['id'], host) expected = db.aggregate_host_get_all(ctxt, result['id']) self.assertEqual(len(expected), 1) def test_aggregate_host_add_duplicate_works(self): ctxt = context.get_admin_context() r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None) r2 = _create_aggregate_with_hosts(ctxt, values={'name': 'fake_aggregate2'}, metadata={'availability_zone': 'fake_avail_zone2'}) h1 = db.aggregate_host_get_all(ctxt, r1['id']) h2 = db.aggregate_host_get_all(ctxt, r2['id']) self.assertEqual(h1, h2) def test_aggregate_host_add_duplicate_raise_exist_exc(self): ctxt = context.get_admin_context() result = _create_aggregate_with_hosts(context=ctxt, metadata=None) self.assertRaises(exception.AggregateHostExists, db.aggregate_host_add, ctxt, result['id'], _get_fake_aggr_hosts()[0]) def test_aggregate_host_add_raise_not_found(self): ctxt = context.get_admin_context() # this does not exist! aggregate_id = 1 host = _get_fake_aggr_hosts()[0] self.assertRaises(exception.AggregateNotFound, db.aggregate_host_add, ctxt, aggregate_id, host) def test_aggregate_host_delete(self): ctxt = context.get_admin_context() result = _create_aggregate_with_hosts(context=ctxt, metadata=None) db.aggregate_host_delete(ctxt, result['id'], _get_fake_aggr_hosts()[0]) expected = db.aggregate_host_get_all(ctxt, result['id']) self.assertEqual(0, len(expected)) def test_aggregate_host_delete_raise_not_found(self): ctxt = context.get_admin_context() result = _create_aggregate(context=ctxt) self.assertRaises(exception.AggregateHostNotFound, db.aggregate_host_delete, ctxt, result['id'], _get_fake_aggr_hosts()[0]) class SqlAlchemyDbApiNoDbTestCase(test.NoDBTestCase): """No-DB test class for simple test cases that do not require a backend.""" def test_manual_join_columns_immutable_list(self): # Tests that _manual_join_columns doesn't modify the list passed in. columns_to_join = ['system_metadata', 'test'] manual_joins, columns_to_join2 = ( sqlalchemy_api._manual_join_columns(columns_to_join)) self.assertEqual(['system_metadata'], manual_joins) self.assertEqual(['test'], columns_to_join2) self.assertEqual(['system_metadata', 'test'], columns_to_join) def test_convert_objects_related_datetimes(self): t1 = timeutils.utcnow() t2 = t1 + datetime.timedelta(seconds=10) t3 = t2 + datetime.timedelta(hours=1) t2_utc = t2.replace(tzinfo=iso8601.iso8601.Utc()) t3_utc = t3.replace(tzinfo=iso8601.iso8601.Utc()) datetime_keys = ('created_at', 'deleted_at') test1 = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3} expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3} sqlalchemy_api.convert_objects_related_datetimes(test1, *datetime_keys) self.assertEqual(test1, expected_dict) test2 = {'created_at': t1, 'deleted_at': t2_utc, 'updated_at': t3} expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3} sqlalchemy_api.convert_objects_related_datetimes(test2, *datetime_keys) self.assertEqual(test2, expected_dict) test3 = {'deleted_at': t2_utc, 'updated_at': t3_utc} expected_dict = {'deleted_at': t2, 'updated_at': t3_utc} sqlalchemy_api.convert_objects_related_datetimes(test3, *datetime_keys) self.assertEqual(test3, expected_dict) def test_convert_objects_related_datetimes_with_strings(self): t1 = '2015-05-28T17:15:53.000000' t2 = '2012-04-21T18:25:43-05:00' t3 = '2012-04-23T18:25:43.511Z' datetime_keys = ('created_at', 'deleted_at', 'updated_at') test1 = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3} expected_dict = { 'created_at': timeutils.parse_strtime(t1).replace(tzinfo=None), 'deleted_at': timeutils.parse_isotime(t2).replace(tzinfo=None), 'updated_at': timeutils.parse_isotime(t3).replace(tzinfo=None)} sqlalchemy_api.convert_objects_related_datetimes(test1) self.assertEqual(test1, expected_dict) sqlalchemy_api.convert_objects_related_datetimes(test1, *datetime_keys) self.assertEqual(test1, expected_dict) def test_get_regexp_op_for_database_sqlite(self): op = sqlalchemy_api._get_regexp_op_for_connection('sqlite:///') self.assertEqual('REGEXP', op) def test_get_regexp_op_for_database_mysql(self): op = sqlalchemy_api._get_regexp_op_for_connection( 'mysql+pymysql://root@localhost') self.assertEqual('REGEXP', op) def test_get_regexp_op_for_database_postgresql(self): op = sqlalchemy_api._get_regexp_op_for_connection( 'postgresql://localhost') self.assertEqual('~', op) def test_get_regexp_op_for_database_unknown(self): op = sqlalchemy_api._get_regexp_op_for_connection('notdb:///') self.assertEqual('LIKE', op) @mock.patch.object(sqlalchemy_api.main_context_manager._factory, 'get_legacy_facade') def test_get_engine(self, mock_create_facade): mock_facade = mock.MagicMock() mock_create_facade.return_value = mock_facade sqlalchemy_api.get_engine() mock_create_facade.assert_called_once_with() mock_facade.get_engine.assert_called_once_with(use_slave=False) @mock.patch.object(sqlalchemy_api.api_context_manager._factory, 'get_legacy_facade') def test_get_api_engine(self, mock_create_facade): mock_facade = mock.MagicMock() mock_create_facade.return_value = mock_facade sqlalchemy_api.get_api_engine() mock_create_facade.assert_called_once_with() mock_facade.get_engine.assert_called_once_with() @mock.patch.object(sqlalchemy_api.main_context_manager._factory, 'get_legacy_facade') def test_get_session(self, mock_create_facade): mock_facade = mock.MagicMock() mock_create_facade.return_value = mock_facade sqlalchemy_api.get_session() mock_create_facade.assert_called_once_with() mock_facade.get_session.assert_called_once_with(use_slave=False) @mock.patch.object(sqlalchemy_api.api_context_manager._factory, 'get_legacy_facade') def test_get_api_session(self, mock_create_facade): mock_facade = mock.MagicMock() mock_create_facade.return_value = mock_facade sqlalchemy_api.get_api_session() mock_create_facade.assert_called_once_with() mock_facade.get_session.assert_called_once_with() @mock.patch.object(sqlalchemy_api, '_instance_get_by_uuid') @mock.patch.object(sqlalchemy_api, '_instances_fill_metadata') @mock.patch('oslo_db.sqlalchemy.utils.paginate_query') def test_instance_get_all_by_filters_paginated_allows_deleted_marker( self, mock_paginate, mock_fill, mock_get): ctxt = mock.MagicMock() ctxt.elevated.return_value = mock.sentinel.elevated sqlalchemy_api.instance_get_all_by_filters_sort(ctxt, {}, marker='foo') mock_get.assert_called_once_with(mock.sentinel.elevated, 'foo', session=mock.ANY) ctxt.elevated.assert_called_once_with(read_deleted='yes') class SqlAlchemyDbApiTestCase(DbTestCase): def test_instance_get_all_by_host(self): ctxt = context.get_admin_context() self.create_instance_with_args() self.create_instance_with_args() self.create_instance_with_args(host='host2') result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1') self.assertEqual(2, len(result)) def test_instance_get_all_uuids_by_host(self): ctxt = context.get_admin_context() self.create_instance_with_args() self.create_instance_with_args() self.create_instance_with_args(host='host2') result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1') self.assertEqual(2, len(result)) self.assertEqual(six.text_type, type(result[0])) def test_instance_get_active_by_window_joined(self): now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701) start_time = now - datetime.timedelta(minutes=10) now1 = now + datetime.timedelta(minutes=1) now2 = now + datetime.timedelta(minutes=2) now3 = now + datetime.timedelta(minutes=3) ctxt = context.get_admin_context() # used for testing columns_to_join network_info = jsonutils.dumps({'ckey': 'cvalue'}) sample_data = { 'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'}, 'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'}, 'info_cache': {'network_info': network_info}, } self.create_instance_with_args(launched_at=now, **sample_data) self.create_instance_with_args(launched_at=now1, terminated_at=now2, **sample_data) self.create_instance_with_args(launched_at=now2, terminated_at=now3, **sample_data) self.create_instance_with_args(launched_at=now3, terminated_at=None, **sample_data) result = sqlalchemy_api.instance_get_active_by_window_joined( ctxt, begin=now) self.assertEqual(4, len(result)) # verify that all default columns are joined meta = utils.metadata_to_dict(result[0]['metadata']) self.assertEqual(sample_data['metadata'], meta) sys_meta = utils.metadata_to_dict(result[0]['system_metadata']) self.assertEqual(sample_data['system_metadata'], sys_meta) self.assertIn('info_cache', result[0]) result = sqlalchemy_api.instance_get_active_by_window_joined( ctxt, begin=now3, columns_to_join=['info_cache']) self.assertEqual(2, len(result)) # verify that only info_cache is loaded meta = utils.metadata_to_dict(result[0]['metadata']) self.assertEqual({}, meta) self.assertIn('info_cache', result[0]) result = sqlalchemy_api.instance_get_active_by_window_joined( ctxt, begin=start_time, end=now) self.assertEqual(0, len(result)) result = sqlalchemy_api.instance_get_active_by_window_joined( ctxt, begin=start_time, end=now2, columns_to_join=['system_metadata']) self.assertEqual(2, len(result)) # verify that only system_metadata is loaded meta = utils.metadata_to_dict(result[0]['metadata']) self.assertEqual({}, meta) sys_meta = utils.metadata_to_dict(result[0]['system_metadata']) self.assertEqual(sample_data['system_metadata'], sys_meta) self.assertNotIn('info_cache', result[0]) result = sqlalchemy_api.instance_get_active_by_window_joined( ctxt, begin=now2, end=now3, columns_to_join=['metadata', 'info_cache']) self.assertEqual(2, len(result)) # verify that only metadata and info_cache are loaded meta = utils.metadata_to_dict(result[0]['metadata']) self.assertEqual(sample_data['metadata'], meta) sys_meta = utils.metadata_to_dict(result[0]['system_metadata']) self.assertEqual({}, sys_meta) self.assertIn('info_cache', result[0]) self.assertEqual(network_info, result[0]['info_cache']['network_info']) @mock.patch('nova.db.sqlalchemy.api.instance_get_all_by_filters_sort') def test_instance_get_all_by_filters_calls_sort(self, mock_get_all_filters_sort): '''Verifies instance_get_all_by_filters calls the sort function.''' # sort parameters should be wrapped in a list, all other parameters # should be passed through ctxt = context.get_admin_context() sqlalchemy_api.instance_get_all_by_filters(ctxt, {'foo': 'bar'}, 'sort_key', 'sort_dir', limit=100, marker='uuid', columns_to_join='columns', use_slave=True) mock_get_all_filters_sort.assert_called_once_with(ctxt, {'foo': 'bar'}, limit=100, marker='uuid', columns_to_join='columns', use_slave=True, sort_keys=['sort_key'], sort_dirs=['sort_dir']) def test_instance_get_all_by_filters_sort_key_invalid(self): '''InvalidSortKey raised if an invalid key is given.''' for keys in [['foo'], ['uuid', 'foo']]: self.assertRaises(exception.InvalidSortKey, db.instance_get_all_by_filters_sort, self.context, filters={}, sort_keys=keys) class ProcessSortParamTestCase(test.TestCase): def test_process_sort_params_defaults(self): '''Verifies default sort parameters.''' sort_keys, sort_dirs = sqlalchemy_api.process_sort_params([], []) self.assertEqual(['created_at', 'id'], sort_keys) self.assertEqual(['asc', 'asc'], sort_dirs) sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(None, None) self.assertEqual(['created_at', 'id'], sort_keys) self.assertEqual(['asc', 'asc'], sort_dirs) def test_process_sort_params_override_default_keys(self): '''Verifies that the default keys can be overridden.''' sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( [], [], default_keys=['key1', 'key2', 'key3']) self.assertEqual(['key1', 'key2', 'key3'], sort_keys) self.assertEqual(['asc', 'asc', 'asc'], sort_dirs) def test_process_sort_params_override_default_dir(self): '''Verifies that the default direction can be overridden.''' sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( [], [], default_dir='dir1') self.assertEqual(['created_at', 'id'], sort_keys) self.assertEqual(['dir1', 'dir1'], sort_dirs) def test_process_sort_params_override_default_key_and_dir(self): '''Verifies that the default key and dir can be overridden.''' sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( [], [], default_keys=['key1', 'key2', 'key3'], default_dir='dir1') self.assertEqual(['key1', 'key2', 'key3'], sort_keys) self.assertEqual(['dir1', 'dir1', 'dir1'], sort_dirs) sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( [], [], default_keys=[], default_dir='dir1') self.assertEqual([], sort_keys) self.assertEqual([], sort_dirs) def test_process_sort_params_non_default(self): '''Verifies that non-default keys are added correctly.''' sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['key1', 'key2'], ['asc', 'desc']) self.assertEqual(['key1', 'key2', 'created_at', 'id'], sort_keys) # First sort_dir in list is used when adding the default keys self.assertEqual(['asc', 'desc', 'asc', 'asc'], sort_dirs) def test_process_sort_params_default(self): '''Verifies that default keys are added correctly.''' sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2'], ['asc', 'desc']) self.assertEqual(['id', 'key2', 'created_at'], sort_keys) self.assertEqual(['asc', 'desc', 'asc'], sort_dirs) # Include default key value, rely on default direction sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2'], []) self.assertEqual(['id', 'key2', 'created_at'], sort_keys) self.assertEqual(['asc', 'asc', 'asc'], sort_dirs) def test_process_sort_params_default_dir(self): '''Verifies that the default dir is applied to all keys.''' # Direction is set, ignore default dir sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2'], ['desc'], default_dir='dir') self.assertEqual(['id', 'key2', 'created_at'], sort_keys) self.assertEqual(['desc', 'desc', 'desc'], sort_dirs) # But should be used if no direction is set sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2'], [], default_dir='dir') self.assertEqual(['id', 'key2', 'created_at'], sort_keys) self.assertEqual(['dir', 'dir', 'dir'], sort_dirs) def test_process_sort_params_unequal_length(self): '''Verifies that a sort direction list is applied correctly.''' sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2', 'key3'], ['desc']) self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys) self.assertEqual(['desc', 'desc', 'desc', 'desc'], sort_dirs) # Default direction is the first key in the list sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2', 'key3'], ['desc', 'asc']) self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys) self.assertEqual(['desc', 'asc', 'desc', 'desc'], sort_dirs) sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2', 'key3'], ['desc', 'asc', 'asc']) self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys) self.assertEqual(['desc', 'asc', 'asc', 'desc'], sort_dirs) def test_process_sort_params_extra_dirs_lengths(self): '''InvalidInput raised if more directions are given.''' self.assertRaises(exception.InvalidInput, sqlalchemy_api.process_sort_params, ['key1', 'key2'], ['asc', 'desc', 'desc']) def test_process_sort_params_invalid_sort_dir(self): '''InvalidInput raised if invalid directions are given.''' for dirs in [['foo'], ['asc', 'foo'], ['asc', 'desc', 'foo']]: self.assertRaises(exception.InvalidInput, sqlalchemy_api.process_sort_params, ['key'], dirs) class MigrationTestCase(test.TestCase): def setUp(self): super(MigrationTestCase, self).setUp() self.ctxt = context.get_admin_context() self._create() self._create() self._create(status='reverted') self._create(status='confirmed') self._create(status='error') self._create(status='accepted') self._create(source_compute='host2', source_node='b', dest_compute='host1', dest_node='a') self._create(source_compute='host2', dest_compute='host3') self._create(source_compute='host3', dest_compute='host4') def _create(self, status='migrating', source_compute='host1', source_node='a', dest_compute='host2', dest_node='b', system_metadata=None, migration_type=None): values = {'host': source_compute} instance = db.instance_create(self.ctxt, values) if system_metadata: db.instance_system_metadata_update(self.ctxt, instance['uuid'], system_metadata, False) values = {'status': status, 'source_compute': source_compute, 'source_node': source_node, 'dest_compute': dest_compute, 'dest_node': dest_node, 'instance_uuid': instance['uuid'], 'migration_type': migration_type} db.migration_create(self.ctxt, values) def _assert_in_progress(self, migrations): for migration in migrations: self.assertNotEqual('confirmed', migration['status']) self.assertNotEqual('reverted', migration['status']) self.assertNotEqual('error', migration['status']) self.assertNotEqual('accepted', migration['status']) def test_migration_get_in_progress_joins(self): self._create(source_compute='foo', system_metadata={'foo': 'bar'}) migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt, 'foo', 'a') system_metadata = migrations[0]['instance']['system_metadata'][0] self.assertEqual(system_metadata['key'], 'foo') self.assertEqual(system_metadata['value'], 'bar') def test_in_progress_host1_nodea(self): migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt, 'host1', 'a') # 2 as source + 1 as dest self.assertEqual(3, len(migrations)) self._assert_in_progress(migrations) def test_in_progress_host1_nodeb(self): migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt, 'host1', 'b') # some migrations are to/from host1, but none with a node 'b' self.assertEqual(0, len(migrations)) def test_in_progress_host2_nodeb(self): migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt, 'host2', 'b') # 2 as dest, 1 as source self.assertEqual(3, len(migrations)) self._assert_in_progress(migrations) def test_instance_join(self): migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt, 'host2', 'b') for migration in migrations: instance = migration['instance'] self.assertEqual(migration['instance_uuid'], instance['uuid']) def test_get_migrations_by_filters(self): filters = {"status": "migrating", "host": "host3", "migration_type": None, "hidden": False} migrations = db.migration_get_all_by_filters(self.ctxt, filters) self.assertEqual(2, len(migrations)) for migration in migrations: self.assertEqual(filters["status"], migration['status']) hosts = [migration['source_compute'], migration['dest_compute']] self.assertIn(filters["host"], hosts) def test_get_migrations_by_filters_with_type(self): self._create(status="special", source_compute="host9", migration_type="evacuation") self._create(status="special", source_compute="host9", migration_type="live-migration") filters = {"status": "special", "host": "host9", "migration_type": "evacuation", "hidden": False} migrations = db.migration_get_all_by_filters(self.ctxt, filters) self.assertEqual(1, len(migrations)) def test_get_migrations_by_filters_source_compute(self): filters = {'source_compute': 'host2'} migrations = db.migration_get_all_by_filters(self.ctxt, filters) self.assertEqual(2, len(migrations)) sources = [x['source_compute'] for x in migrations] self.assertEqual(['host2', 'host2'], sources) dests = [x['dest_compute'] for x in migrations] self.assertEqual(['host1', 'host3'], dests) def test_migration_get_unconfirmed_by_dest_compute(self): # Ensure no migrations are returned. results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10, 'fake_host') self.assertEqual(0, len(results)) # Ensure no migrations are returned. results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10, 'fake_host2') self.assertEqual(0, len(results)) updated_at = datetime.datetime(2000, 1, 1, 12, 0, 0) values = {"status": "finished", "updated_at": updated_at, "dest_compute": "fake_host2"} migration = db.migration_create(self.ctxt, values) # Ensure different host is not returned results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10, 'fake_host') self.assertEqual(0, len(results)) # Ensure one migration older than 10 seconds is returned. results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10, 'fake_host2') self.assertEqual(1, len(results)) db.migration_update(self.ctxt, migration['id'], {"status": "CONFIRMED"}) # Ensure the new migration is not returned. updated_at = timeutils.utcnow() values = {"status": "finished", "updated_at": updated_at, "dest_compute": "fake_host2"} migration = db.migration_create(self.ctxt, values) results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10, "fake_host2") self.assertEqual(0, len(results)) db.migration_update(self.ctxt, migration['id'], {"status": "CONFIRMED"}) def test_migration_update_not_found(self): self.assertRaises(exception.MigrationNotFound, db.migration_update, self.ctxt, 42, {}) class ModelsObjectComparatorMixin(object): def _dict_from_object(self, obj, ignored_keys): if ignored_keys is None: ignored_keys = [] if isinstance(obj, dict): obj_items = obj.items() else: obj_items = obj.iteritems() return {k: v for k, v in obj_items if k not in ignored_keys} def _assertEqualObjects(self, obj1, obj2, ignored_keys=None): obj1 = self._dict_from_object(obj1, ignored_keys) obj2 = self._dict_from_object(obj2, ignored_keys) self.assertEqual(len(obj1), len(obj2), "Keys mismatch: %s" % str(set(obj1.keys()) ^ set(obj2.keys()))) for key, value in obj1.items(): self.assertEqual(value, obj2[key]) def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None): obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) sort_key = lambda d: [d[k] for k in sorted(d)] conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key) self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2)) def _assertEqualOrderedListOfObjects(self, objs1, objs2, ignored_keys=None): obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) conv = lambda objs: [obj_to_dict(obj) for obj in objs] self.assertEqual(conv(objs1), conv(objs2)) def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2): self.assertEqual(len(primitives1), len(primitives2)) for primitive in primitives1: self.assertIn(primitive, primitives2) for primitive in primitives2: self.assertIn(primitive, primitives1) class InstanceSystemMetadataTestCase(test.TestCase): """Tests for db.api.instance_system_metadata_* methods.""" def setUp(self): super(InstanceSystemMetadataTestCase, self).setUp() values = {'host': 'h1', 'project_id': 'p1', 'system_metadata': {'key': 'value'}} self.ctxt = context.get_admin_context() self.instance = db.instance_create(self.ctxt, values) def test_instance_system_metadata_get(self): metadata = db.instance_system_metadata_get(self.ctxt, self.instance['uuid']) self.assertEqual(metadata, {'key': 'value'}) def test_instance_system_metadata_update_new_pair(self): db.instance_system_metadata_update( self.ctxt, self.instance['uuid'], {'new_key': 'new_value'}, False) metadata = db.instance_system_metadata_get(self.ctxt, self.instance['uuid']) self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'}) def test_instance_system_metadata_update_existent_pair(self): db.instance_system_metadata_update( self.ctxt, self.instance['uuid'], {'key': 'new_value'}, True) metadata = db.instance_system_metadata_get(self.ctxt, self.instance['uuid']) self.assertEqual(metadata, {'key': 'new_value'}) def test_instance_system_metadata_update_delete_true(self): db.instance_system_metadata_update( self.ctxt, self.instance['uuid'], {'new_key': 'new_value'}, True) metadata = db.instance_system_metadata_get(self.ctxt, self.instance['uuid']) self.assertEqual(metadata, {'new_key': 'new_value'}) @test.testtools.skip("bug 1189462") def test_instance_system_metadata_update_nonexistent(self): self.assertRaises(exception.InstanceNotFound, db.instance_system_metadata_update, self.ctxt, 'nonexistent-uuid', {'key': 'value'}, True) class ReservationTestCase(test.TestCase, ModelsObjectComparatorMixin): """Tests for db.api.reservation_* methods.""" def setUp(self): super(ReservationTestCase, self).setUp() self.ctxt = context.get_admin_context() self.reservations = _quota_reserve(self.ctxt, 'project1', 'user1') usage = db.quota_usage_get(self.ctxt, 'project1', 'resource1', 'user1') self.values = {'uuid': 'sample-uuid', 'project_id': 'project1', 'user_id': 'user1', 'resource': 'resource1', 'delta': 42, 'expire': timeutils.utcnow() + datetime.timedelta(days=1), 'usage': {'id': usage.id}} def test_reservation_commit(self): expected = {'project_id': 'project1', 'user_id': 'user1', 'resource0': {'reserved': 0, 'in_use': 0}, 'resource1': {'reserved': 1, 'in_use': 1}, 'fixed_ips': {'reserved': 2, 'in_use': 2}} self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user( self.ctxt, 'project1', 'user1')) _reservation_get(self.ctxt, self.reservations[0]) db.reservation_commit(self.ctxt, self.reservations, 'project1', 'user1') self.assertRaises(exception.ReservationNotFound, _reservation_get, self.ctxt, self.reservations[0]) expected = {'project_id': 'project1', 'user_id': 'user1', 'resource0': {'reserved': 0, 'in_use': 0}, 'resource1': {'reserved': 0, 'in_use': 2}, 'fixed_ips': {'reserved': 0, 'in_use': 4}} self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user( self.ctxt, 'project1', 'user1')) def test_reservation_rollback(self): expected = {'project_id': 'project1', 'user_id': 'user1', 'resource0': {'reserved': 0, 'in_use': 0}, 'resource1': {'reserved': 1, 'in_use': 1}, 'fixed_ips': {'reserved': 2, 'in_use': 2}} self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user( self.ctxt, 'project1', 'user1')) _reservation_get(self.ctxt, self.reservations[0]) db.reservation_rollback(self.ctxt, self.reservations, 'project1', 'user1') self.assertRaises(exception.ReservationNotFound, _reservation_get, self.ctxt, self.reservations[0]) expected = {'project_id': 'project1', 'user_id': 'user1', 'resource0': {'reserved': 0, 'in_use': 0}, 'resource1': {'reserved': 0, 'in_use': 1}, 'fixed_ips': {'reserved': 0, 'in_use': 2}} self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user( self.ctxt, 'project1', 'user1')) def test_reservation_expire(self): db.reservation_expire(self.ctxt) expected = {'project_id': 'project1', 'user_id': 'user1', 'resource0': {'reserved': 0, 'in_use': 0}, 'resource1': {'reserved': 0, 'in_use': 1}, 'fixed_ips': {'reserved': 0, 'in_use': 2}} self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user( self.ctxt, 'project1', 'user1')) class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin): def setUp(self): super(SecurityGroupRuleTestCase, self).setUp() self.ctxt = context.get_admin_context() def _get_base_values(self): return { 'name': 'fake_sec_group', 'description': 'fake_sec_group_descr', 'user_id': 'fake', 'project_id': 'fake', 'instances': [] } def _get_base_rule_values(self): return { 'protocol': "tcp", 'from_port': 80, 'to_port': 8080, 'cidr': None, 'deleted': 0, 'deleted_at': None, 'grantee_group': None, 'updated_at': None } def _create_security_group(self, values): v = self._get_base_values() v.update(values) return db.security_group_create(self.ctxt, v) def _create_security_group_rule(self, values): v = self._get_base_rule_values() v.update(values) return db.security_group_rule_create(self.ctxt, v) def test_security_group_rule_create(self): security_group_rule = self._create_security_group_rule({}) self.assertIsNotNone(security_group_rule['id']) for key, value in self._get_base_rule_values().items(): self.assertEqual(value, security_group_rule[key]) def _test_security_group_rule_get_by_security_group(self, columns=None): instance = db.instance_create(self.ctxt, {'system_metadata': {'foo': 'bar'}}) security_group = self._create_security_group({ 'instances': [instance]}) security_group_rule = self._create_security_group_rule( {'parent_group': security_group, 'grantee_group': security_group}) security_group_rule1 = self._create_security_group_rule( {'parent_group': security_group, 'grantee_group': security_group}) found_rules = db.security_group_rule_get_by_security_group( self.ctxt, security_group['id'], columns_to_join=columns) self.assertEqual(len(found_rules), 2) rules_ids = [security_group_rule['id'], security_group_rule1['id']] for rule in found_rules: if columns is None: self.assertIn('grantee_group', dict(rule)) self.assertIn('instances', dict(rule.grantee_group)) self.assertIn( 'system_metadata', dict(rule.grantee_group.instances[0])) self.assertIn(rule['id'], rules_ids) else: self.assertNotIn('grantee_group', dict(rule)) def test_security_group_rule_get_by_security_group(self): self._test_security_group_rule_get_by_security_group() def test_security_group_rule_get_by_security_group_no_joins(self): self._test_security_group_rule_get_by_security_group(columns=[]) def test_security_group_rule_destroy(self): self._create_security_group({'name': 'fake1'}) self._create_security_group({'name': 'fake2'}) security_group_rule1 = self._create_security_group_rule({}) security_group_rule2 = self._create_security_group_rule({}) db.security_group_rule_destroy(self.ctxt, security_group_rule1['id']) self.assertRaises(exception.SecurityGroupNotFound, db.security_group_rule_get, self.ctxt, security_group_rule1['id']) self._assertEqualObjects(db.security_group_rule_get(self.ctxt, security_group_rule2['id']), security_group_rule2, ['grantee_group']) def test_security_group_rule_destroy_not_found_exception(self): self.assertRaises(exception.SecurityGroupNotFound, db.security_group_rule_destroy, self.ctxt, 100500) def test_security_group_rule_get(self): security_group_rule1 = ( self._create_security_group_rule({})) self._create_security_group_rule({}) real_security_group_rule = db.security_group_rule_get(self.ctxt, security_group_rule1['id']) self._assertEqualObjects(security_group_rule1, real_security_group_rule, ['grantee_group']) def test_security_group_rule_get_not_found_exception(self): self.assertRaises(exception.SecurityGroupNotFound, db.security_group_rule_get, self.ctxt, 100500) def test_security_group_rule_count_by_group(self): sg1 = self._create_security_group({'name': 'fake1'}) sg2 = self._create_security_group({'name': 'fake2'}) rules_by_group = {sg1: [], sg2: []} for group in rules_by_group: rules = rules_by_group[group] for i in range(0, 10): rules.append( self._create_security_group_rule({'parent_group_id': group['id']})) db.security_group_rule_destroy(self.ctxt, rules_by_group[sg1][0]['id']) counted_groups = [db.security_group_rule_count_by_group(self.ctxt, group['id']) for group in [sg1, sg2]] expected = [9, 10] self.assertEqual(counted_groups, expected) class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin): def setUp(self): super(SecurityGroupTestCase, self).setUp() self.ctxt = context.get_admin_context() def _get_base_values(self): return { 'name': 'fake_sec_group', 'description': 'fake_sec_group_descr', 'user_id': 'fake', 'project_id': 'fake', 'instances': [] } def _create_security_group(self, values): v = self._get_base_values() v.update(values) return db.security_group_create(self.ctxt, v) def test_security_group_create(self): security_group = self._create_security_group({}) self.assertIsNotNone(security_group['id']) for key, value in self._get_base_values().items(): self.assertEqual(value, security_group[key]) def test_security_group_destroy(self): security_group1 = self._create_security_group({}) security_group2 = \ self._create_security_group({'name': 'fake_sec_group2'}) db.security_group_destroy(self.ctxt, security_group1['id']) self.assertRaises(exception.SecurityGroupNotFound, db.security_group_get, self.ctxt, security_group1['id']) self._assertEqualObjects(db.security_group_get( self.ctxt, security_group2['id'], columns_to_join=['instances']), security_group2) def test_security_group_get(self): security_group1 = self._create_security_group({}) self._create_security_group({'name': 'fake_sec_group2'}) real_security_group = db.security_group_get(self.ctxt, security_group1['id'], columns_to_join=['instances']) self._assertEqualObjects(security_group1, real_security_group) def test_security_group_get_with_instance_columns(self): instance = db.instance_create(self.ctxt, {'system_metadata': {'foo': 'bar'}}) secgroup = self._create_security_group({'instances': [instance]}) secgroup = db.security_group_get( self.ctxt, secgroup['id'], columns_to_join=['instances.system_metadata']) inst = secgroup.instances[0] self.assertIn('system_metadata', dict(inst).keys()) def test_security_group_get_no_instances(self): instance = db.instance_create(self.ctxt, {}) sid = self._create_security_group({'instances': [instance]})['id'] security_group = db.security_group_get(self.ctxt, sid, columns_to_join=['instances']) self.assertIn('instances', security_group.__dict__) security_group = db.security_group_get(self.ctxt, sid) self.assertNotIn('instances', security_group.__dict__) def test_security_group_get_not_found_exception(self): self.assertRaises(exception.SecurityGroupNotFound, db.security_group_get, self.ctxt, 100500) def test_security_group_get_by_name(self): security_group1 = self._create_security_group({'name': 'fake1'}) security_group2 = self._create_security_group({'name': 'fake2'}) real_security_group1 = db.security_group_get_by_name( self.ctxt, security_group1['project_id'], security_group1['name'], columns_to_join=None) real_security_group2 = db.security_group_get_by_name( self.ctxt, security_group2['project_id'], security_group2['name'], columns_to_join=None) self._assertEqualObjects(security_group1, real_security_group1) self._assertEqualObjects(security_group2, real_security_group2) def test_security_group_get_by_project(self): security_group1 = self._create_security_group( {'name': 'fake1', 'project_id': 'fake_proj1'}) security_group2 = self._create_security_group( {'name': 'fake2', 'project_id': 'fake_proj2'}) real1 = db.security_group_get_by_project( self.ctxt, security_group1['project_id']) real2 = db.security_group_get_by_project( self.ctxt, security_group2['project_id']) expected1, expected2 = [security_group1], [security_group2] self._assertEqualListsOfObjects(expected1, real1, ignored_keys=['instances']) self._assertEqualListsOfObjects(expected2, real2, ignored_keys=['instances']) def test_security_group_get_by_instance(self): instance = db.instance_create(self.ctxt, dict(host='foo')) values = [ {'name': 'fake1', 'instances': [instance]}, {'name': 'fake2', 'instances': [instance]}, {'name': 'fake3', 'instances': []}, ] security_groups = [self._create_security_group(vals) for vals in values] real = db.security_group_get_by_instance(self.ctxt, instance['uuid']) expected = security_groups[:2] self._assertEqualListsOfObjects(expected, real, ignored_keys=['instances']) def test_security_group_get_all(self): values = [ {'name': 'fake1', 'project_id': 'fake_proj1'}, {'name': 'fake2', 'project_id': 'fake_proj2'}, ] security_groups = [self._create_security_group(vals) for vals in values] real = db.security_group_get_all(self.ctxt) self._assertEqualListsOfObjects(security_groups, real, ignored_keys=['instances']) def test_security_group_in_use(self): instance = db.instance_create(self.ctxt, dict(host='foo')) values = [ {'instances': [instance], 'name': 'fake_in_use'}, {'instances': []}, ] security_groups = [self._create_security_group(vals) for vals in values] real = [] for security_group in security_groups: in_use = db.security_group_in_use(self.ctxt, security_group['id']) real.append(in_use) expected = [True, False] self.assertEqual(expected, real) def test_security_group_ensure_default(self): self.ctxt.project_id = 'fake' self.ctxt.user_id = 'fake' self.assertEqual(0, len(db.security_group_get_by_project( self.ctxt, self.ctxt.project_id))) db.security_group_ensure_default(self.ctxt) security_groups = db.security_group_get_by_project( self.ctxt, self.ctxt.project_id) self.assertEqual(1, len(security_groups)) self.assertEqual("default", security_groups[0]["name"]) usage = db.quota_usage_get(self.ctxt, self.ctxt.project_id, 'security_groups', self.ctxt.user_id) self.assertEqual(1, usage.in_use) def test_security_group_ensure_default_until_refresh(self): self.flags(until_refresh=2) self.ctxt.project_id = 'fake' self.ctxt.user_id = 'fake' db.security_group_ensure_default(self.ctxt) usage = db.quota_usage_get(self.ctxt, self.ctxt.project_id, 'security_groups', self.ctxt.user_id) self.assertEqual(2, usage.until_refresh) @mock.patch.object(db.sqlalchemy.api, '_security_group_get_by_names') def test_security_group_ensure_default_called_concurrently(self, sg_mock): # make sure NotFound is always raised here to trick Nova to insert the # duplicate security group entry sg_mock.side_effect = exception.NotFound # create the first db entry self.ctxt.project_id = 1 db.security_group_ensure_default(self.ctxt) security_groups = db.security_group_get_by_project( self.ctxt, self.ctxt.project_id) self.assertEqual(1, len(security_groups)) # create the second one and ensure the exception is handled properly default_group = db.security_group_ensure_default(self.ctxt) self.assertEqual('default', default_group.name) def test_security_group_update(self): security_group = self._create_security_group({}) new_values = { 'name': 'sec_group1', 'description': 'sec_group_descr1', 'user_id': 'fake_user1', 'project_id': 'fake_proj1', } updated_group = db.security_group_update(self.ctxt, security_group['id'], new_values, columns_to_join=['rules.grantee_group']) for key, value in new_values.items(): self.assertEqual(updated_group[key], value) self.assertEqual(updated_group['rules'], []) def test_security_group_update_to_duplicate(self): self._create_security_group( {'name': 'fake1', 'project_id': 'fake_proj1'}) security_group2 = self._create_security_group( {'name': 'fake1', 'project_id': 'fake_proj2'}) self.assertRaises(exception.SecurityGroupExists, db.security_group_update, self.ctxt, security_group2['id'], {'project_id': 'fake_proj1'}) class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin): """Tests for db.api.instance_* methods.""" sample_data = { 'project_id': 'project1', 'hostname': 'example.com', 'host': 'h1', 'node': 'n1', 'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'}, 'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'}, 'info_cache': {'ckey': 'cvalue'}, } def setUp(self): super(InstanceTestCase, self).setUp() self.ctxt = context.get_admin_context() def _assertEqualInstances(self, instance1, instance2): self._assertEqualObjects(instance1, instance2, ignored_keys=['metadata', 'system_metadata', 'info_cache', 'extra']) def _assertEqualListsOfInstances(self, list1, list2): self._assertEqualListsOfObjects(list1, list2, ignored_keys=['metadata', 'system_metadata', 'info_cache', 'extra']) def create_instance_with_args(self, **kwargs): if 'context' in kwargs: context = kwargs.pop('context') else: context = self.ctxt args = self.sample_data.copy() args.update(kwargs) return db.instance_create(context, args) def test_instance_create(self): instance = self.create_instance_with_args() self.assertTrue(uuidutils.is_uuid_like(instance['uuid'])) def test_instance_create_with_object_values(self): values = { 'access_ip_v4': netaddr.IPAddress('1.2.3.4'), 'access_ip_v6': netaddr.IPAddress('::1'), } dt_keys = ('created_at', 'deleted_at', 'updated_at', 'launched_at', 'terminated_at') dt = timeutils.utcnow() dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc()) for key in dt_keys: values[key] = dt_utc inst = db.instance_create(self.ctxt, values) self.assertEqual(inst['access_ip_v4'], '1.2.3.4') self.assertEqual(inst['access_ip_v6'], '::1') for key in dt_keys: self.assertEqual(inst[key], dt) def test_instance_update_with_object_values(self): values = { 'access_ip_v4': netaddr.IPAddress('1.2.3.4'), 'access_ip_v6': netaddr.IPAddress('::1'), } dt_keys = ('created_at', 'deleted_at', 'updated_at', 'launched_at', 'terminated_at') dt = timeutils.utcnow() dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc()) for key in dt_keys: values[key] = dt_utc inst = db.instance_create(self.ctxt, {}) inst = db.instance_update(self.ctxt, inst['uuid'], values) self.assertEqual(inst['access_ip_v4'], '1.2.3.4') self.assertEqual(inst['access_ip_v6'], '::1') for key in dt_keys: self.assertEqual(inst[key], dt) def test_instance_update_no_metadata_clobber(self): meta = {'foo': 'bar'} sys_meta = {'sfoo': 'sbar'} values = { 'metadata': meta, 'system_metadata': sys_meta, } inst = db.instance_create(self.ctxt, {}) inst = db.instance_update(self.ctxt, inst['uuid'], values) self.assertEqual(meta, utils.metadata_to_dict(inst['metadata'])) self.assertEqual(sys_meta, utils.metadata_to_dict(inst['system_metadata'])) def test_instance_get_all_with_meta(self): self.create_instance_with_args() for inst in db.instance_get_all(self.ctxt): meta = utils.metadata_to_dict(inst['metadata']) self.assertEqual(meta, self.sample_data['metadata']) sys_meta = utils.metadata_to_dict(inst['system_metadata']) self.assertEqual(sys_meta, self.sample_data['system_metadata']) def test_instance_update(self): instance = self.create_instance_with_args() metadata = {'host': 'bar', 'key2': 'wuff'} system_metadata = {'original_image_ref': 'baz'} # Update the metadata db.instance_update(self.ctxt, instance['uuid'], {'metadata': metadata, 'system_metadata': system_metadata}) # Retrieve the user-provided metadata to ensure it was successfully # updated self.assertEqual(metadata, db.instance_metadata_get(self.ctxt, instance['uuid'])) self.assertEqual(system_metadata, db.instance_system_metadata_get(self.ctxt, instance['uuid'])) def test_instance_update_bad_str_dates(self): instance = self.create_instance_with_args() values = {'created_at': '123'} self.assertRaises(ValueError, db.instance_update, self.ctxt, instance['uuid'], values) def test_instance_update_good_str_dates(self): instance = self.create_instance_with_args() values = {'created_at': '2011-01-31T00:00:00.0'} actual = db.instance_update(self.ctxt, instance['uuid'], values) expected = datetime.datetime(2011, 1, 31) self.assertEqual(expected, actual["created_at"]) def test_create_instance_unique_hostname(self): context1 = context.RequestContext('user1', 'p1') context2 = context.RequestContext('user2', 'p2') self.create_instance_with_args(hostname='h1', project_id='p1') # With scope 'global' any duplicate should fail, be it this project: self.flags(osapi_compute_unique_server_name_scope='global') self.assertRaises(exception.InstanceExists, self.create_instance_with_args, context=context1, hostname='h1', project_id='p3') # or another: self.assertRaises(exception.InstanceExists, self.create_instance_with_args, context=context2, hostname='h1', project_id='p2') # With scope 'project' a duplicate in the project should fail: self.flags(osapi_compute_unique_server_name_scope='project') self.assertRaises(exception.InstanceExists, self.create_instance_with_args, context=context1, hostname='h1', project_id='p1') # With scope 'project' a duplicate in a different project should work: self.flags(osapi_compute_unique_server_name_scope='project') self.create_instance_with_args(context=context2, hostname='h2') self.flags(osapi_compute_unique_server_name_scope=None) def test_instance_get_all_by_filters_empty_list_filter(self): filters = {'uuid': []} instances = db.instance_get_all_by_filters_sort(self.ctxt, filters) self.assertEqual([], instances) @mock.patch('nova.db.sqlalchemy.api.undefer') @mock.patch('nova.db.sqlalchemy.api.joinedload') def test_instance_get_all_by_filters_extra_columns(self, mock_joinedload, mock_undefer): db.instance_get_all_by_filters_sort( self.ctxt, {}, columns_to_join=['info_cache', 'extra.pci_requests']) mock_joinedload.assert_called_once_with('info_cache') mock_undefer.assert_called_once_with('extra.pci_requests') @mock.patch('nova.db.sqlalchemy.api.undefer') @mock.patch('nova.db.sqlalchemy.api.joinedload') def test_instance_get_active_by_window_extra_columns(self, mock_joinedload, mock_undefer): now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701) db.instance_get_active_by_window_joined( self.ctxt, now, columns_to_join=['info_cache', 'extra.pci_requests']) mock_joinedload.assert_called_once_with('info_cache') mock_undefer.assert_called_once_with('extra.pci_requests') def test_instance_get_all_by_filters_with_meta(self): self.create_instance_with_args() for inst in db.instance_get_all_by_filters(self.ctxt, {}): meta = utils.metadata_to_dict(inst['metadata']) self.assertEqual(meta, self.sample_data['metadata']) sys_meta = utils.metadata_to_dict(inst['system_metadata']) self.assertEqual(sys_meta, self.sample_data['system_metadata']) def test_instance_get_all_by_filters_without_meta(self): self.create_instance_with_args() result = db.instance_get_all_by_filters(self.ctxt, {}, columns_to_join=[]) for inst in result: meta = utils.metadata_to_dict(inst['metadata']) self.assertEqual(meta, {}) sys_meta = utils.metadata_to_dict(inst['system_metadata']) self.assertEqual(sys_meta, {}) def test_instance_get_all_by_filters(self): instances = [self.create_instance_with_args() for i in range(3)] filtered_instances = db.instance_get_all_by_filters(self.ctxt, {}) self._assertEqualListsOfInstances(instances, filtered_instances) def test_instance_get_all_by_filters_zero_limit(self): self.create_instance_with_args() instances = db.instance_get_all_by_filters(self.ctxt, {}, limit=0) self.assertEqual([], instances) def test_instance_metadata_get_multi(self): uuids = [self.create_instance_with_args()['uuid'] for i in range(3)] meta = sqlalchemy_api._instance_metadata_get_multi(self.ctxt, uuids) for row in meta: self.assertIn(row['instance_uuid'], uuids) def test_instance_metadata_get_multi_no_uuids(self): self.mox.StubOutWithMock(query.Query, 'filter') self.mox.ReplayAll() sqlalchemy_api._instance_metadata_get_multi(self.ctxt, []) def test_instance_system_system_metadata_get_multi(self): uuids = [self.create_instance_with_args()['uuid'] for i in range(3)] sys_meta = sqlalchemy_api._instance_system_metadata_get_multi( self.ctxt, uuids) for row in sys_meta: self.assertIn(row['instance_uuid'], uuids) def test_instance_system_metadata_get_multi_no_uuids(self): self.mox.StubOutWithMock(query.Query, 'filter') self.mox.ReplayAll() sqlalchemy_api._instance_system_metadata_get_multi(self.ctxt, []) def test_instance_get_all_by_filters_regex(self): i1 = self.create_instance_with_args(display_name='test1') i2 = self.create_instance_with_args(display_name='teeeest2') self.create_instance_with_args(display_name='diff') result = db.instance_get_all_by_filters(self.ctxt, {'display_name': 't.*st.'}) self._assertEqualListsOfInstances(result, [i1, i2]) def test_instance_get_all_by_filters_changes_since(self): i1 = self.create_instance_with_args(updated_at= '2013-12-05T15:03:25.000000') i2 = self.create_instance_with_args(updated_at= '2013-12-05T15:03:26.000000') changes_since = iso8601.parse_date('2013-12-05T15:03:25.000000') result = db.instance_get_all_by_filters(self.ctxt, {'changes-since': changes_since}) self._assertEqualListsOfInstances([i1, i2], result) changes_since = iso8601.parse_date('2013-12-05T15:03:26.000000') result = db.instance_get_all_by_filters(self.ctxt, {'changes-since': changes_since}) self._assertEqualListsOfInstances([i2], result) db.instance_destroy(self.ctxt, i1['uuid']) filters = {} filters['changes-since'] = changes_since filters['marker'] = i1['uuid'] result = db.instance_get_all_by_filters(self.ctxt, filters) self._assertEqualListsOfInstances([i2], result) def test_instance_get_all_by_filters_exact_match(self): instance = self.create_instance_with_args(host='host1') self.create_instance_with_args(host='host12') result = db.instance_get_all_by_filters(self.ctxt, {'host': 'host1'}) self._assertEqualListsOfInstances([instance], result) def test_instance_get_all_by_filters_metadata(self): instance = self.create_instance_with_args(metadata={'foo': 'bar'}) self.create_instance_with_args() result = db.instance_get_all_by_filters(self.ctxt, {'metadata': {'foo': 'bar'}}) self._assertEqualListsOfInstances([instance], result) def test_instance_get_all_by_filters_system_metadata(self): instance = self.create_instance_with_args( system_metadata={'foo': 'bar'}) self.create_instance_with_args() result = db.instance_get_all_by_filters(self.ctxt, {'system_metadata': {'foo': 'bar'}}) self._assertEqualListsOfInstances([instance], result) def test_instance_get_all_by_filters_unicode_value(self): i1 = self.create_instance_with_args(display_name=u'test♥') i2 = self.create_instance_with_args(display_name=u'test') i3 = self.create_instance_with_args(display_name=u'test♥test') self.create_instance_with_args(display_name='diff') result = db.instance_get_all_by_filters(self.ctxt, {'display_name': u'test'}) self._assertEqualListsOfInstances([i1, i2, i3], result) result = db.instance_get_all_by_filters(self.ctxt, {'display_name': u'test♥'}) self._assertEqualListsOfInstances(result, [i1, i3]) def test_instance_get_all_by_filters_tags(self): instance = self.create_instance_with_args( metadata={'foo': 'bar'}) self.create_instance_with_args() # For format 'tag-' result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag-key', 'value': 'foo'}, {'name': 'tag-value', 'value': 'bar'}, ]}) self._assertEqualListsOfInstances([instance], result) # For format 'tag:' result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag:foo', 'value': 'bar'}, ]}) self._assertEqualListsOfInstances([instance], result) # For non-existent tag result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag:foo', 'value': 'barred'}, ]}) self.assertEqual([], result) # Confirm with deleted tags db.instance_metadata_delete(self.ctxt, instance['uuid'], 'foo') # For format 'tag-' result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag-key', 'value': 'foo'}, ]}) self.assertEqual([], result) result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag-value', 'value': 'bar'} ]}) self.assertEqual([], result) # For format 'tag:' result = db.instance_get_all_by_filters( self.ctxt, {'filter': [ {'name': 'tag:foo', 'value': 'bar'}, ]}) self.assertEqual([], result) def test_instance_get_by_uuid(self): inst = self.create_instance_with_args() result = db.instance_get_by_uuid(self.ctxt, inst['uuid']) self._assertEqualInstances(inst, result) def test_instance_get_by_uuid_join_empty(self): inst = self.create_instance_with_args() result = db.instance_get_by_uuid(self.ctxt, inst['uuid'], columns_to_join=[]) meta = utils.metadata_to_dict(result['metadata']) self.assertEqual(meta, {}) sys_meta = utils.metadata_to_dict(result['system_metadata']) self.assertEqual(sys_meta, {}) def test_instance_get_by_uuid_join_meta(self): inst = self.create_instance_with_args() result = db.instance_get_by_uuid(self.ctxt, inst['uuid'], columns_to_join=['metadata']) meta = utils.metadata_to_dict(result['metadata']) self.assertEqual(meta, self.sample_data['metadata']) sys_meta = utils.metadata_to_dict(result['system_metadata']) self.assertEqual(sys_meta, {}) def test_instance_get_by_uuid_join_sys_meta(self): inst = self.create_instance_with_args() result = db.instance_get_by_uuid(self.ctxt, inst['uuid'], columns_to_join=['system_metadata']) meta = utils.metadata_to_dict(result['metadata']) self.assertEqual(meta, {}) sys_meta = utils.metadata_to_dict(result['system_metadata']) self.assertEqual(sys_meta, self.sample_data['system_metadata']) def test_instance_get_all_by_filters_deleted(self): inst1 = self.create_instance_with_args() inst2 = self.create_instance_with_args(reservation_id='b') db.instance_destroy(self.ctxt, inst1['uuid']) result = db.instance_get_all_by_filters(self.ctxt, {}) self._assertEqualListsOfObjects([inst1, inst2], result, ignored_keys=['metadata', 'system_metadata', 'deleted', 'deleted_at', 'info_cache', 'pci_devices', 'extra']) def test_instance_get_all_by_filters_deleted_and_soft_deleted(self): inst1 = self.create_instance_with_args() inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED) self.create_instance_with_args() db.instance_destroy(self.ctxt, inst1['uuid']) result = db.instance_get_all_by_filters(self.ctxt, {'deleted': True}) self._assertEqualListsOfObjects([inst1, inst2], result, ignored_keys=['metadata', 'system_metadata', 'deleted', 'deleted_at', 'info_cache', 'pci_devices', 'extra']) def test_instance_get_all_by_filters_deleted_no_soft_deleted(self): inst1 = self.create_instance_with_args() self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED) self.create_instance_with_args() db.instance_destroy(self.ctxt, inst1['uuid']) result = db.instance_get_all_by_filters(self.ctxt, {'deleted': True, 'soft_deleted': False}) self._assertEqualListsOfObjects([inst1], result, ignored_keys=['deleted', 'deleted_at', 'metadata', 'system_metadata', 'info_cache', 'pci_devices', 'extra']) def test_instance_get_all_by_filters_alive_and_soft_deleted(self): inst1 = self.create_instance_with_args() inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED) inst3 = self.create_instance_with_args() db.instance_destroy(self.ctxt, inst1['uuid']) result = db.instance_get_all_by_filters(self.ctxt, {'deleted': False, 'soft_deleted': True}) self._assertEqualListsOfInstances([inst2, inst3], result) def test_instance_get_all_by_filters_not_deleted(self): inst1 = self.create_instance_with_args() self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED) inst3 = self.create_instance_with_args() inst4 = self.create_instance_with_args(vm_state=vm_states.ACTIVE) db.instance_destroy(self.ctxt, inst1['uuid']) result = db.instance_get_all_by_filters(self.ctxt, {'deleted': False}) self.assertIsNone(inst3.vm_state) self._assertEqualListsOfInstances([inst3, inst4], result) def test_instance_get_all_by_filters_cleaned(self): inst1 = self.create_instance_with_args() inst2 = self.create_instance_with_args(reservation_id='b') db.instance_update(self.ctxt, inst1['uuid'], {'cleaned': 1}) result = db.instance_get_all_by_filters(self.ctxt, {}) self.assertEqual(2, len(result)) self.assertIn(inst1['uuid'], [result[0]['uuid'], result[1]['uuid']]) self.assertIn(inst2['uuid'], [result[0]['uuid'], result[1]['uuid']]) if inst1['uuid'] == result[0]['uuid']: self.assertTrue(result[0]['cleaned']) self.assertFalse(result[1]['cleaned']) else: self.assertTrue(result[1]['cleaned']) self.assertFalse(result[0]['cleaned']) def test_instance_get_all_by_filters_tag_any(self): inst1 = self.create_instance_with_args() inst2 = self.create_instance_with_args() inst3 = self.create_instance_with_args() t1 = 'tag1' t2 = 'tag2' t3 = 'tag3' db.instance_tag_set(self.ctxt, inst1.uuid, [t1]) db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2, t3]) db.instance_tag_set(self.ctxt, inst3.uuid, [t3]) result = db.instance_get_all_by_filters(self.ctxt, {'tags-any': [t1, t2]}) self._assertEqualListsOfObjects([inst1, inst2], result, ignored_keys=['deleted', 'deleted_at', 'metadata', 'extra', 'system_metadata', 'info_cache', 'pci_devices']) def test_instance_get_all_by_filters_tag_any_empty(self): inst1 = self.create_instance_with_args() inst2 = self.create_instance_with_args() t1 = 'tag1' t2 = 'tag2' t3 = 'tag3' t4 = 'tag4' db.instance_tag_set(self.ctxt, inst1.uuid, [t1]) db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2]) result = db.instance_get_all_by_filters(self.ctxt, {'tags-any': [t3, t4]}) self.assertEqual([], result) def test_instance_get_all_by_filters_tag(self): inst1 = self.create_instance_with_args() inst2 = self.create_instance_with_args() inst3 = self.create_instance_with_args() t1 = 'tag1' t2 = 'tag2' t3 = 'tag3' db.instance_tag_set(self.ctxt, inst1.uuid, [t1, t3]) db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2]) db.instance_tag_set(self.ctxt, inst3.uuid, [t1, t2, t3]) result = db.instance_get_all_by_filters(self.ctxt, {'tags': [t1, t2]}) self._assertEqualListsOfObjects([inst2, inst3], result, ignored_keys=['deleted', 'deleted_at', 'metadata', 'extra', 'system_metadata', 'info_cache', 'pci_devices']) def test_instance_get_all_by_filters_tag_empty(self): inst1 = self.create_instance_with_args() inst2 = self.create_instance_with_args() t1 = 'tag1' t2 = 'tag2' t3 = 'tag3' db.instance_tag_set(self.ctxt, inst1.uuid, [t1]) db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2]) result = db.instance_get_all_by_filters(self.ctxt, {'tags': [t3]}) self.assertEqual([], result) def test_instance_get_all_by_filters_tag_any_and_tag(self): inst1 = self.create_instance_with_args() inst2 = self.create_instance_with_args() inst3 = self.create_instance_with_args() t1 = 'tag1' t2 = 'tag2' t3 = 'tag3' t4 = 'tag4' db.instance_tag_set(self.ctxt, inst1.uuid, [t1, t2]) db.instance_tag_set(self.ctxt, inst2.uuid, [t1, t2, t4]) db.instance_tag_set(self.ctxt, inst3.uuid, [t2, t3]) result = db.instance_get_all_by_filters(self.ctxt, {'tags': [t1, t2], 'tags-any': [t3, t4]}) self._assertEqualListsOfObjects([inst2], result, ignored_keys=['deleted', 'deleted_at', 'metadata', 'extra', 'system_metadata', 'info_cache', 'pci_devices']) def test_instance_get_all_by_host_and_node_no_join(self): instance = self.create_instance_with_args() result = db.instance_get_all_by_host_and_node(self.ctxt, 'h1', 'n1') self.assertEqual(result[0]['uuid'], instance['uuid']) self.assertEqual(result[0]['system_metadata'], []) def test_instance_get_all_by_host_and_node(self): instance = self.create_instance_with_args( system_metadata={'foo': 'bar'}) result = db.instance_get_all_by_host_and_node( self.ctxt, 'h1', 'n1', columns_to_join=['system_metadata', 'extra']) self.assertEqual(instance['uuid'], result[0]['uuid']) self.assertEqual('bar', result[0]['system_metadata'][0]['value']) self.assertEqual(instance['uuid'], result[0]['extra']['instance_uuid']) @mock.patch('nova.db.sqlalchemy.api._instances_fill_metadata') @mock.patch('nova.db.sqlalchemy.api._instance_get_all_query') def test_instance_get_all_by_host_and_node_fills_manually(self, mock_getall, mock_fill): db.instance_get_all_by_host_and_node( self.ctxt, 'h1', 'n1', columns_to_join=['metadata', 'system_metadata', 'extra', 'foo']) self.assertEqual(sorted(['extra', 'foo']), sorted(mock_getall.call_args[1]['joins'])) self.assertEqual(sorted(['metadata', 'system_metadata']), sorted(mock_fill.call_args[1]['manual_joins'])) def _get_base_values(self): return { 'name': 'fake_sec_group', 'description': 'fake_sec_group_descr', 'user_id': 'fake', 'project_id': 'fake', 'instances': [] } def _get_base_rule_values(self): return { 'protocol': "tcp", 'from_port': 80, 'to_port': 8080, 'cidr': None, 'deleted': 0, 'deleted_at': None, 'grantee_group': None, 'updated_at': None } def _create_security_group(self, values): v = self._get_base_values() v.update(values) return db.security_group_create(self.ctxt, v) def _create_security_group_rule(self, values): v = self._get_base_rule_values() v.update(values) return db.security_group_rule_create(self.ctxt, v) def test_instance_get_all_by_grantee_security_groups(self): instance1 = self.create_instance_with_args() instance2 = self.create_instance_with_args() instance3 = self.create_instance_with_args() secgroup1 = self._create_security_group( {'name': 'fake-secgroup1', 'instances': [instance1]}) secgroup2 = self._create_security_group( {'name': 'fake-secgroup2', 'instances': [instance1]}) secgroup3 = self._create_security_group( {'name': 'fake-secgroup3', 'instances': [instance2]}) secgroup4 = self._create_security_group( {'name': 'fake-secgroup4', 'instances': [instance2, instance3]}) self._create_security_group_rule({'grantee_group': secgroup1, 'parent_group': secgroup3}) self._create_security_group_rule({'grantee_group': secgroup2, 'parent_group': secgroup4}) group_ids = [secgroup['id'] for secgroup in [secgroup1, secgroup2]] instances = db.instance_get_all_by_grantee_security_groups(self.ctxt, group_ids) instance_uuids = [instance['uuid'] for instance in instances] self.assertEqual(len(instances), 2) self.assertIn(instance2['uuid'], instance_uuids) self.assertIn(instance3['uuid'], instance_uuids) def test_instance_get_all_by_grantee_security_groups_empty_group_ids(self): results = db.instance_get_all_by_grantee_security_groups(self.ctxt, []) self.assertEqual([], results) def test_instance_get_all_hung_in_rebooting(self): # Ensure no instances are returned. results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10) self.assertEqual([], results) # Ensure one rebooting instance with updated_at older than 10 seconds # is returned. instance = self.create_instance_with_args(task_state="rebooting", updated_at=datetime.datetime(2000, 1, 1, 12, 0, 0)) results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10) self._assertEqualListsOfObjects([instance], results, ignored_keys=['task_state', 'info_cache', 'security_groups', 'metadata', 'system_metadata', 'pci_devices', 'extra']) db.instance_update(self.ctxt, instance['uuid'], {"task_state": None}) # Ensure the newly rebooted instance is not returned. self.create_instance_with_args(task_state="rebooting", updated_at=timeutils.utcnow()) results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10) self.assertEqual([], results) def test_instance_update_with_expected_vm_state(self): instance = self.create_instance_with_args(vm_state='foo') db.instance_update(self.ctxt, instance['uuid'], {'host': 'h1', 'expected_vm_state': ('foo', 'bar')}) def test_instance_update_with_unexpected_vm_state(self): instance = self.create_instance_with_args(vm_state='foo') self.assertRaises(exception.InstanceUpdateConflict, db.instance_update, self.ctxt, instance['uuid'], {'host': 'h1', 'expected_vm_state': ('spam', 'bar')}) def test_instance_update_with_instance_uuid(self): # test instance_update() works when an instance UUID is passed. ctxt = context.get_admin_context() # Create an instance with some metadata values = {'metadata': {'host': 'foo', 'key1': 'meow'}, 'system_metadata': {'original_image_ref': 'blah'}} instance = db.instance_create(ctxt, values) # Update the metadata values = {'metadata': {'host': 'bar', 'key2': 'wuff'}, 'system_metadata': {'original_image_ref': 'baz'}} db.instance_update(ctxt, instance['uuid'], values) # Retrieve the user-provided metadata to ensure it was successfully # updated instance_meta = db.instance_metadata_get(ctxt, instance['uuid']) self.assertEqual('bar', instance_meta['host']) self.assertEqual('wuff', instance_meta['key2']) self.assertNotIn('key1', instance_meta) # Retrieve the system metadata to ensure it was successfully updated system_meta = db.instance_system_metadata_get(ctxt, instance['uuid']) self.assertEqual('baz', system_meta['original_image_ref']) def test_delete_instance_metadata_on_instance_destroy(self): ctxt = context.get_admin_context() # Create an instance with some metadata values = {'metadata': {'host': 'foo', 'key1': 'meow'}, 'system_metadata': {'original_image_ref': 'blah'}} instance = db.instance_create(ctxt, values) instance_meta = db.instance_metadata_get(ctxt, instance['uuid']) self.assertEqual('foo', instance_meta['host']) self.assertEqual('meow', instance_meta['key1']) db.instance_destroy(ctxt, instance['uuid']) instance_meta = db.instance_metadata_get(ctxt, instance['uuid']) # Make sure instance metadata is deleted as well self.assertEqual({}, instance_meta) def test_delete_instance_faults_on_instance_destroy(self): ctxt = context.get_admin_context() uuid = str(stdlib_uuid.uuid4()) # Create faults db.instance_create(ctxt, {'uuid': uuid}) fault_values = { 'message': 'message', 'details': 'detail', 'instance_uuid': uuid, 'code': 404, 'host': 'localhost' } fault = db.instance_fault_create(ctxt, fault_values) # Retrieve the fault to ensure it was successfully added faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid]) self.assertEqual(1, len(faults[uuid])) self._assertEqualObjects(fault, faults[uuid][0]) db.instance_destroy(ctxt, uuid) faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid]) # Make sure instance faults is deleted as well self.assertEqual(0, len(faults[uuid])) def test_instance_update_and_get_original(self): instance = self.create_instance_with_args(vm_state='building') (old_ref, new_ref) = db.instance_update_and_get_original(self.ctxt, instance['uuid'], {'vm_state': 'needscoffee'}) self.assertEqual('building', old_ref['vm_state']) self.assertEqual('needscoffee', new_ref['vm_state']) def test_instance_update_and_get_original_metadata(self): instance = self.create_instance_with_args() columns_to_join = ['metadata'] (old_ref, new_ref) = db.instance_update_and_get_original( self.ctxt, instance['uuid'], {'vm_state': 'needscoffee'}, columns_to_join=columns_to_join) meta = utils.metadata_to_dict(new_ref['metadata']) self.assertEqual(meta, self.sample_data['metadata']) sys_meta = utils.metadata_to_dict(new_ref['system_metadata']) self.assertEqual(sys_meta, {}) def test_instance_update_and_get_original_metadata_none_join(self): instance = self.create_instance_with_args() (old_ref, new_ref) = db.instance_update_and_get_original( self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}}) meta = utils.metadata_to_dict(new_ref['metadata']) self.assertEqual(meta, {'mk1': 'mv3'}) def test_instance_update_and_get_original_no_conflict_on_session(self): session = get_session() # patch get_session so that we may inspect it outside of the # method; once enginefacade is implemented, this can be simplified with mock.patch("nova.db.sqlalchemy.api.get_session", lambda: session): instance = self.create_instance_with_args() (old_ref, new_ref) = db.instance_update_and_get_original( self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}}) # test some regular persisted fields self.assertEqual(old_ref.uuid, new_ref.uuid) self.assertEqual(old_ref.project_id, new_ref.project_id) # after a copy operation, we can assert: # 1. the two states have their own InstanceState old_insp = inspect(old_ref) new_insp = inspect(new_ref) self.assertNotEqual(old_insp, new_insp) # 2. only one of the objects is still in our Session self.assertIs(new_insp.session, session) self.assertIsNone(old_insp.session) # 3. The "new" object remains persistent and ready # for updates self.assertTrue(new_insp.persistent) # 4. the "old" object is detached from this Session. self.assertTrue(old_insp.detached) def test_instance_update_and_get_original_conflict_race(self): # Ensure that we retry if update_on_match fails for no discernable # reason instance = self.create_instance_with_args() orig_update_on_match = update_match.update_on_match # Reproduce the conditions of a race between fetching and updating the # instance by making update_on_match fail for no discernable reason the # first time it is called, but work normally the second time. with mock.patch.object(update_match, 'update_on_match', side_effect=[update_match.NoRowsMatched, orig_update_on_match]): db.instance_update_and_get_original( self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}}) self.assertEqual(update_match.update_on_match.call_count, 2) def test_instance_update_and_get_original_conflict_race_fallthrough(self): # Ensure that is update_match continuously fails for no discernable # reason, we evantually raise UnknownInstanceUpdateConflict instance = self.create_instance_with_args() # Reproduce the conditions of a race between fetching and updating the # instance by making update_on_match fail for no discernable reason. with mock.patch.object(update_match, 'update_on_match', side_effect=update_match.NoRowsMatched): self.assertRaises(exception.UnknownInstanceUpdateConflict, db.instance_update_and_get_original, self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}}) def test_instance_update_and_get_original_expected_host(self): # Ensure that we allow update when expecting a host field instance = self.create_instance_with_args() (orig, new) = db.instance_update_and_get_original( self.ctxt, instance['uuid'], {'host': None}, expected={'host': 'h1'}) self.assertIsNone(new['host']) def test_instance_update_and_get_original_expected_host_fail(self): # Ensure that we detect a changed expected host and raise # InstanceUpdateConflict instance = self.create_instance_with_args() try: db.instance_update_and_get_original( self.ctxt, instance['uuid'], {'host': None}, expected={'host': 'h2'}) except exception.InstanceUpdateConflict as ex: self.assertEqual(ex.kwargs['instance_uuid'], instance['uuid']) self.assertEqual(ex.kwargs['actual'], {'host': 'h1'}) self.assertEqual(ex.kwargs['expected'], {'host': ['h2']}) else: self.fail('InstanceUpdateConflict was not raised') def test_instance_update_and_get_original_expected_host_none(self): # Ensure that we allow update when expecting a host field of None instance = self.create_instance_with_args(host=None) (old, new) = db.instance_update_and_get_original( self.ctxt, instance['uuid'], {'host': 'h1'}, expected={'host': None}) self.assertEqual('h1', new['host']) def test_instance_update_and_get_original_expected_host_none_fail(self): # Ensure that we detect a changed expected host of None and raise # InstanceUpdateConflict instance = self.create_instance_with_args() try: db.instance_update_and_get_original( self.ctxt, instance['uuid'], {'host': None}, expected={'host': None}) except exception.InstanceUpdateConflict as ex: self.assertEqual(ex.kwargs['instance_uuid'], instance['uuid']) self.assertEqual(ex.kwargs['actual'], {'host': 'h1'}) self.assertEqual(ex.kwargs['expected'], {'host': [None]}) else: self.fail('InstanceUpdateConflict was not raised') def test_instance_update_and_get_original_expected_task_state_single_fail(self): # noqa # Ensure that we detect a changed expected task and raise # UnexpectedTaskStateError instance = self.create_instance_with_args() try: db.instance_update_and_get_original( self.ctxt, instance['uuid'], { 'host': None, 'expected_task_state': task_states.SCHEDULING }) except exception.UnexpectedTaskStateError as ex: self.assertEqual(ex.kwargs['instance_uuid'], instance['uuid']) self.assertEqual(ex.kwargs['actual'], {'task_state': None}) self.assertEqual(ex.kwargs['expected'], {'task_state': [task_states.SCHEDULING]}) else: self.fail('UnexpectedTaskStateError was not raised') def test_instance_update_and_get_original_expected_task_state_single_pass(self): # noqa # Ensure that we allow an update when expected task is correct instance = self.create_instance_with_args() (orig, new) = db.instance_update_and_get_original( self.ctxt, instance['uuid'], { 'host': None, 'expected_task_state': None }) self.assertIsNone(new['host']) def test_instance_update_and_get_original_expected_task_state_multi_fail(self): # noqa # Ensure that we detect a changed expected task and raise # UnexpectedTaskStateError when there are multiple potential expected # tasks instance = self.create_instance_with_args() try: db.instance_update_and_get_original( self.ctxt, instance['uuid'], { 'host': None, 'expected_task_state': [task_states.SCHEDULING, task_states.REBUILDING] }) except exception.UnexpectedTaskStateError as ex: self.assertEqual(ex.kwargs['instance_uuid'], instance['uuid']) self.assertEqual(ex.kwargs['actual'], {'task_state': None}) self.assertEqual(ex.kwargs['expected'], {'task_state': [task_states.SCHEDULING, task_states.REBUILDING]}) else: self.fail('UnexpectedTaskStateError was not raised') def test_instance_update_and_get_original_expected_task_state_multi_pass(self): # noqa # Ensure that we allow an update when expected task is in a list of # expected tasks instance = self.create_instance_with_args() (orig, new) = db.instance_update_and_get_original( self.ctxt, instance['uuid'], { 'host': None, 'expected_task_state': [task_states.SCHEDULING, None] }) self.assertIsNone(new['host']) def test_instance_update_and_get_original_expected_task_state_deleting(self): # noqa # Ensure that we raise UnepectedDeletingTaskStateError when task state # is not as expected, and it is DELETING instance = self.create_instance_with_args( task_state=task_states.DELETING) try: db.instance_update_and_get_original( self.ctxt, instance['uuid'], { 'host': None, 'expected_task_state': task_states.SCHEDULING }) except exception.UnexpectedDeletingTaskStateError as ex: self.assertEqual(ex.kwargs['instance_uuid'], instance['uuid']) self.assertEqual(ex.kwargs['actual'], {'task_state': task_states.DELETING}) self.assertEqual(ex.kwargs['expected'], {'task_state': [task_states.SCHEDULING]}) else: self.fail('UnexpectedDeletingTaskStateError was not raised') def test_instance_update_unique_name(self): context1 = context.RequestContext('user1', 'p1') context2 = context.RequestContext('user2', 'p2') inst1 = self.create_instance_with_args(context=context1, project_id='p1', hostname='fake_name1') inst2 = self.create_instance_with_args(context=context1, project_id='p1', hostname='fake_name2') inst3 = self.create_instance_with_args(context=context2, project_id='p2', hostname='fake_name3') # osapi_compute_unique_server_name_scope is unset so this should work: db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name2'}) db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name1'}) # With scope 'global' any duplicate should fail. self.flags(osapi_compute_unique_server_name_scope='global') self.assertRaises(exception.InstanceExists, db.instance_update, context1, inst2['uuid'], {'hostname': 'fake_name1'}) self.assertRaises(exception.InstanceExists, db.instance_update, context2, inst3['uuid'], {'hostname': 'fake_name1'}) # But we should definitely be able to update our name if we aren't # really changing it. db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_NAME'}) # With scope 'project' a duplicate in the project should fail: self.flags(osapi_compute_unique_server_name_scope='project') self.assertRaises(exception.InstanceExists, db.instance_update, context1, inst2['uuid'], {'hostname': 'fake_NAME'}) # With scope 'project' a duplicate in a different project should work: self.flags(osapi_compute_unique_server_name_scope='project') db.instance_update(context2, inst3['uuid'], {'hostname': 'fake_NAME'}) def _test_instance_update_updates_metadata(self, metadata_type): instance = self.create_instance_with_args() def set_and_check(meta): inst = db.instance_update(self.ctxt, instance['uuid'], {metadata_type: dict(meta)}) _meta = utils.metadata_to_dict(inst[metadata_type]) self.assertEqual(meta, _meta) meta = {'speed': '88', 'units': 'MPH'} set_and_check(meta) meta['gigawatts'] = '1.21' set_and_check(meta) del meta['gigawatts'] set_and_check(meta) self.ctxt.read_deleted = 'yes' self.assertNotIn('gigawatts', db.instance_system_metadata_get(self.ctxt, instance.uuid)) def test_security_group_in_use(self): db.instance_create(self.ctxt, dict(host='foo')) def test_instance_update_updates_system_metadata(self): # Ensure that system_metadata is updated during instance_update self._test_instance_update_updates_metadata('system_metadata') def test_instance_update_updates_metadata(self): # Ensure that metadata is updated during instance_update self._test_instance_update_updates_metadata('metadata') def test_instance_floating_address_get_all(self): ctxt = context.get_admin_context() instance1 = db.instance_create(ctxt, {'host': 'h1', 'hostname': 'n1'}) instance2 = db.instance_create(ctxt, {'host': 'h2', 'hostname': 'n2'}) fixed_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3'] float_addresses = ['2.1.1.1', '2.1.1.2', '2.1.1.3'] instance_uuids = [instance1['uuid'], instance1['uuid'], instance2['uuid']] for fixed_addr, float_addr, instance_uuid in zip(fixed_addresses, float_addresses, instance_uuids): db.fixed_ip_create(ctxt, {'address': fixed_addr, 'instance_uuid': instance_uuid}) fixed_id = db.fixed_ip_get_by_address(ctxt, fixed_addr)['id'] db.floating_ip_create(ctxt, {'address': float_addr, 'fixed_ip_id': fixed_id}) real_float_addresses = \ db.instance_floating_address_get_all(ctxt, instance_uuids[0]) self.assertEqual(set(float_addresses[:2]), set(real_float_addresses)) real_float_addresses = \ db.instance_floating_address_get_all(ctxt, instance_uuids[2]) self.assertEqual(set([float_addresses[2]]), set(real_float_addresses)) self.assertRaises(exception.InvalidUUID, db.instance_floating_address_get_all, ctxt, 'invalid_uuid') def test_instance_stringified_ips(self): instance = self.create_instance_with_args() instance = db.instance_update( self.ctxt, instance['uuid'], {'access_ip_v4': netaddr.IPAddress('1.2.3.4'), 'access_ip_v6': netaddr.IPAddress('::1')}) self.assertIsInstance(instance['access_ip_v4'], six.string_types) self.assertIsInstance(instance['access_ip_v6'], six.string_types) instance = db.instance_get_by_uuid(self.ctxt, instance['uuid']) self.assertIsInstance(instance['access_ip_v4'], six.string_types) self.assertIsInstance(instance['access_ip_v6'], six.string_types) @mock.patch('nova.db.sqlalchemy.api._check_instance_exists_in_project', return_value=None) def test_instance_destroy(self, mock_check_inst_exists): ctxt = context.get_admin_context() values = { 'metadata': {'key': 'value'}, 'system_metadata': {'key': 'value'} } inst_uuid = self.create_instance_with_args(**values)['uuid'] db.instance_tag_set(ctxt, inst_uuid, ['tag1', 'tag2']) db.instance_destroy(ctxt, inst_uuid) self.assertRaises(exception.InstanceNotFound, db.instance_get, ctxt, inst_uuid) self.assertIsNone(db.instance_info_cache_get(ctxt, inst_uuid)) self.assertEqual({}, db.instance_metadata_get(ctxt, inst_uuid)) self.assertEqual([], db.instance_tag_get_by_instance_uuid( ctxt, inst_uuid)) ctxt.read_deleted = 'yes' self.assertEqual(values['system_metadata'], db.instance_system_metadata_get(ctxt, inst_uuid)) def test_instance_destroy_already_destroyed(self): ctxt = context.get_admin_context() instance = self.create_instance_with_args() db.instance_destroy(ctxt, instance['uuid']) self.assertRaises(exception.InstanceNotFound, db.instance_destroy, ctxt, instance['uuid']) def test_check_instance_exists(self): session = get_session() instance = self.create_instance_with_args() self.assertIsNone(sqlalchemy_api._check_instance_exists_in_project( self.ctxt, session, instance['uuid'])) def test_check_instance_exists_non_existing_instance(self): session = get_session() self.assertRaises(exception.InstanceNotFound, sqlalchemy_api._check_instance_exists_in_project, self.ctxt, session, '123') def test_check_instance_exists_from_different_tenant(self): context1 = context.RequestContext('user1', 'project1') context2 = context.RequestContext('user2', 'project2') session = get_session() instance = self.create_instance_with_args(context=context1) self.assertIsNone(sqlalchemy_api._check_instance_exists_in_project( context1, session, instance['uuid'])) self.assertRaises(exception.InstanceNotFound, sqlalchemy_api._check_instance_exists_in_project, context2, session, instance['uuid']) def test_check_instance_exists_admin_context(self): session = get_session() some_context = context.RequestContext('some_user', 'some_project') instance = self.create_instance_with_args(context=some_context) # Check that method works correctly with admin context self.assertIsNone(sqlalchemy_api._check_instance_exists_in_project( self.ctxt, session, instance['uuid'])) class InstanceMetadataTestCase(test.TestCase): """Tests for db.api.instance_metadata_* methods.""" def setUp(self): super(InstanceMetadataTestCase, self).setUp() self.ctxt = context.get_admin_context() def test_instance_metadata_get(self): instance = db.instance_create(self.ctxt, {'metadata': {'key': 'value'}}) self.assertEqual({'key': 'value'}, db.instance_metadata_get( self.ctxt, instance['uuid'])) def test_instance_metadata_delete(self): instance = db.instance_create(self.ctxt, {'metadata': {'key': 'val', 'key1': 'val1'}}) db.instance_metadata_delete(self.ctxt, instance['uuid'], 'key1') self.assertEqual({'key': 'val'}, db.instance_metadata_get( self.ctxt, instance['uuid'])) def test_instance_metadata_update(self): instance = db.instance_create(self.ctxt, {'host': 'h1', 'project_id': 'p1', 'metadata': {'key': 'value'}}) # This should add new key/value pair db.instance_metadata_update(self.ctxt, instance['uuid'], {'new_key': 'new_value'}, False) metadata = db.instance_metadata_get(self.ctxt, instance['uuid']) self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'}) # This should leave only one key/value pair db.instance_metadata_update(self.ctxt, instance['uuid'], {'new_key': 'new_value'}, True) metadata = db.instance_metadata_get(self.ctxt, instance['uuid']) self.assertEqual(metadata, {'new_key': 'new_value'}) class InstanceExtraTestCase(test.TestCase): def setUp(self): super(InstanceExtraTestCase, self).setUp() self.ctxt = context.get_admin_context() self.instance = db.instance_create(self.ctxt, {}) def test_instance_extra_get_by_uuid_instance_create(self): inst_extra = db.instance_extra_get_by_instance_uuid( self.ctxt, self.instance['uuid']) self.assertIsNotNone(inst_extra) def test_instance_extra_update_by_uuid(self): db.instance_extra_update_by_uuid(self.ctxt, self.instance['uuid'], {'numa_topology': 'changed'}) inst_extra = db.instance_extra_get_by_instance_uuid( self.ctxt, self.instance['uuid']) self.assertEqual('changed', inst_extra.numa_topology) def test_instance_extra_update_by_uuid_and_create(self): sqlalchemy_api.model_query(self.ctxt, models.InstanceExtra).\ filter_by(instance_uuid=self.instance['uuid']).\ delete() inst_extra = db.instance_extra_get_by_instance_uuid( self.ctxt, self.instance['uuid']) self.assertIsNone(inst_extra) db.instance_extra_update_by_uuid(self.ctxt, self.instance['uuid'], {'numa_topology': 'changed'}) inst_extra = db.instance_extra_get_by_instance_uuid( self.ctxt, self.instance['uuid']) self.assertEqual('changed', inst_extra.numa_topology) def test_instance_extra_get_with_columns(self): extra = db.instance_extra_get_by_instance_uuid( self.ctxt, self.instance['uuid'], columns=['numa_topology', 'vcpu_model']) self.assertRaises(SQLAlchemyError, extra.__getitem__, 'pci_requests') self.assertIn('numa_topology', extra) self.assertIn('vcpu_model', extra) class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin): def setUp(self): super(ServiceTestCase, self).setUp() self.ctxt = context.get_admin_context() def _get_base_values(self): return { 'host': 'fake_host', 'binary': 'fake_binary', 'topic': 'fake_topic', 'report_count': 3, 'disabled': False, 'forced_down': False } def _create_service(self, values): v = self._get_base_values() v.update(values) return db.service_create(self.ctxt, v) def test_service_create(self): service = self._create_service({}) self.assertIsNotNone(service['id']) for key, value in self._get_base_values().items(): self.assertEqual(value, service[key]) def test_service_create_disabled(self): self.flags(enable_new_services=False) service = self._create_service({}) self.assertTrue(service['disabled']) def test_service_destroy(self): service1 = self._create_service({}) service2 = self._create_service({'host': 'fake_host2'}) db.service_destroy(self.ctxt, service1['id']) self.assertRaises(exception.ServiceNotFound, db.service_get, self.ctxt, service1['id']) self._assertEqualObjects(db.service_get(self.ctxt, service2['id']), service2, ignored_keys=['compute_node']) def test_service_update(self): service = self._create_service({}) new_values = { 'host': 'fake_host1', 'binary': 'fake_binary1', 'topic': 'fake_topic1', 'report_count': 4, 'disabled': True } db.service_update(self.ctxt, service['id'], new_values) updated_service = db.service_get(self.ctxt, service['id']) for key, value in new_values.items(): self.assertEqual(value, updated_service[key]) def test_service_update_not_found_exception(self): self.assertRaises(exception.ServiceNotFound, db.service_update, self.ctxt, 100500, {}) def test_service_update_with_set_forced_down(self): service = self._create_service({}) db.service_update(self.ctxt, service['id'], {'forced_down': True}) updated_service = db.service_get(self.ctxt, service['id']) self.assertTrue(updated_service['forced_down']) def test_service_update_with_unset_forced_down(self): service = self._create_service({'forced_down': True}) db.service_update(self.ctxt, service['id'], {'forced_down': False}) updated_service = db.service_get(self.ctxt, service['id']) self.assertFalse(updated_service['forced_down']) def test_service_get(self): service1 = self._create_service({}) self._create_service({'host': 'some_other_fake_host'}) real_service1 = db.service_get(self.ctxt, service1['id']) self._assertEqualObjects(service1, real_service1, ignored_keys=['compute_node']) def test_service_get_minimum_version(self): self._create_service({'version': 1, 'host': 'host3', 'binary': 'compute', 'forced_down': True}) self._create_service({'version': 2, 'host': 'host1', 'binary': 'compute'}) self._create_service({'version': 3, 'host': 'host2', 'binary': 'compute'}) self.assertEqual(2, db.service_get_minimum_version(self.ctxt, 'compute')) def test_service_get_not_found_exception(self): self.assertRaises(exception.ServiceNotFound, db.service_get, self.ctxt, 100500) def test_service_get_by_host_and_topic(self): service1 = self._create_service({'host': 'host1', 'topic': 'topic1'}) self._create_service({'host': 'host2', 'topic': 'topic2'}) real_service1 = db.service_get_by_host_and_topic(self.ctxt, host='host1', topic='topic1') self._assertEqualObjects(service1, real_service1) def test_service_get_by_host_and_binary(self): service1 = self._create_service({'host': 'host1', 'binary': 'foo'}) self._create_service({'host': 'host2', 'binary': 'bar'}) real_service1 = db.service_get_by_host_and_binary(self.ctxt, host='host1', binary='foo') self._assertEqualObjects(service1, real_service1) def test_service_get_by_host_and_binary_raises(self): self.assertRaises(exception.HostBinaryNotFound, db.service_get_by_host_and_binary, self.ctxt, host='host1', binary='baz') def test_service_get_all(self): values = [ {'host': 'host1', 'topic': 'topic1'}, {'host': 'host2', 'topic': 'topic2'}, {'disabled': True} ] services = [self._create_service(vals) for vals in values] disabled_services = [services[-1]] non_disabled_services = services[:-1] compares = [ (services, db.service_get_all(self.ctxt)), (disabled_services, db.service_get_all(self.ctxt, True)), (non_disabled_services, db.service_get_all(self.ctxt, False)) ] for comp in compares: self._assertEqualListsOfObjects(*comp) def test_service_get_all_by_topic(self): values = [ {'host': 'host1', 'topic': 't1'}, {'host': 'host2', 'topic': 't1'}, {'disabled': True, 'topic': 't1'}, {'host': 'host3', 'topic': 't2'} ] services = [self._create_service(vals) for vals in values] expected = services[:2] real = db.service_get_all_by_topic(self.ctxt, 't1') self._assertEqualListsOfObjects(expected, real) def test_service_get_all_by_binary(self): values = [ {'host': 'host1', 'binary': 'b1'}, {'host': 'host2', 'binary': 'b1'}, {'disabled': True, 'binary': 'b1'}, {'host': 'host3', 'binary': 'b2'} ] services = [self._create_service(vals) for vals in values] expected = services[:2] real = db.service_get_all_by_binary(self.ctxt, 'b1') self._assertEqualListsOfObjects(expected, real) def test_service_get_all_by_host(self): values = [ {'host': 'host1', 'topic': 't11', 'binary': 'b11'}, {'host': 'host1', 'topic': 't12', 'binary': 'b12'}, {'host': 'host2', 'topic': 't1'}, {'host': 'host3', 'topic': 't1'} ] services = [self._create_service(vals) for vals in values] expected = services[:2] real = db.service_get_all_by_host(self.ctxt, 'host1') self._assertEqualListsOfObjects(expected, real) def test_service_get_by_compute_host(self): values = [ {'host': 'host1', 'binary': 'nova-compute'}, {'host': 'host2', 'binary': 'nova-scheduler'}, {'host': 'host3', 'binary': 'nova-compute'} ] services = [self._create_service(vals) for vals in values] real_service = db.service_get_by_compute_host(self.ctxt, 'host1') self._assertEqualObjects(services[0], real_service) self.assertRaises(exception.ComputeHostNotFound, db.service_get_by_compute_host, self.ctxt, 'non-exists-host') def test_service_get_by_compute_host_not_found(self): self.assertRaises(exception.ComputeHostNotFound, db.service_get_by_compute_host, self.ctxt, 'non-exists-host') def test_service_binary_exists_exception(self): db.service_create(self.ctxt, self._get_base_values()) values = self._get_base_values() values.update({'topic': 'top1'}) self.assertRaises(exception.ServiceBinaryExists, db.service_create, self.ctxt, values) def test_service_topic_exists_exceptions(self): db.service_create(self.ctxt, self._get_base_values()) values = self._get_base_values() values.update({'binary': 'bin1'}) self.assertRaises(exception.ServiceTopicExists, db.service_create, self.ctxt, values) class BaseInstanceTypeTestCase(test.TestCase, ModelsObjectComparatorMixin): def setUp(self): super(BaseInstanceTypeTestCase, self).setUp() self.ctxt = context.get_admin_context() self.user_ctxt = context.RequestContext('user', 'user') def _get_base_values(self): return { 'name': 'fake_name', 'memory_mb': 512, 'vcpus': 1, 'root_gb': 10, 'ephemeral_gb': 10, 'flavorid': 'fake_flavor', 'swap': 0, 'rxtx
codeparrot/github-code-clean
""" Classes for interpolating values. """ from __future__ import division, print_function, absolute_import __all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp', 'ppform', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly', 'RegularGridInterpolator', 'interpn'] import itertools import warnings import functools import operator import numpy as np from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d, dot, ravel, poly1d, asarray, intp) import scipy.linalg import scipy.special as spec from scipy.special import comb from scipy._lib.six import xrange, integer_types, string_types from . import fitpack from . import dfitpack from . import _fitpack from .polyint import _Interpolator1D from . import _ppoly from .fitpack2 import RectBivariateSpline from .interpnd import _ndim_coords_from_arrays from ._bsplines import make_interp_spline, BSpline def prod(x): """Product of a list of numbers; ~40x faster vs np.prod for Python tuples""" if len(x) == 0: return 1 return functools.reduce(operator.mul, x) def lagrange(x, w): """ Return a Lagrange interpolating polynomial. Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating polynomial through the points ``(x, w)``. Warning: This implementation is numerically unstable. Do not expect to be able to use more than about 20 points even if they are chosen optimally. Parameters ---------- x : array_like `x` represents the x-coordinates of a set of datapoints. w : array_like `w` represents the y-coordinates of a set of datapoints, i.e. f(`x`). Returns ------- lagrange : numpy.poly1d instance The Lagrange interpolating polynomial. """ M = len(x) p = poly1d(0.0) for j in xrange(M): pt = poly1d(w[j]) for k in xrange(M): if k == j: continue fac = x[j]-x[k] pt *= poly1d([1.0, -x[k]])/fac p += pt return p # !! Need to find argument for keeping initialize. If it isn't # !! found, get rid of it! class interp2d(object): """ interp2d(x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=nan) Interpolate over a 2-D grid. `x`, `y` and `z` are arrays of values used to approximate some function f: ``z = f(x, y)``. This class returns a function whose call method uses spline interpolation to find the value of new points. If `x` and `y` represent a regular grid, consider using RectBivariateSpline. Note that calling `interp2d` with NaNs present in input values results in undefined behaviour. Methods ------- __call__ Parameters ---------- x, y : array_like Arrays defining the data point coordinates. If the points lie on a regular grid, `x` can specify the column coordinates and `y` the row coordinates, for example:: >>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]] Otherwise, `x` and `y` must specify the full coordinates for each point, for example:: >>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6] If `x` and `y` are multi-dimensional, they are flattened before use. z : array_like The values of the function to interpolate at the data points. If `z` is a multi-dimensional array, it is flattened before use. The length of a flattened `z` array is either len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates for each point. kind : {'linear', 'cubic', 'quintic'}, optional The kind of spline interpolation to use. Default is 'linear'. copy : bool, optional If True, the class makes internal copies of x, y and z. If False, references may be used. The default is to copy. bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data (x,y), a ValueError is raised. If False, then `fill_value` is used. fill_value : number, optional If provided, the value to use for points outside of the interpolation domain. If omitted (None), values outside the domain are extrapolated. See Also -------- RectBivariateSpline : Much faster 2D interpolation if your input data is on a grid bisplrep, bisplev : Spline interpolation based on FITPACK BivariateSpline : a more recent wrapper of the FITPACK routines interp1d : one dimension version of this function Notes ----- The minimum number of data points required along the interpolation axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for quintic interpolation. The interpolator is constructed by `bisplrep`, with a smoothing factor of 0. If more control over smoothing is needed, `bisplrep` should be used directly. Examples -------- Construct a 2-D grid and interpolate on it: >>> from scipy import interpolate >>> x = np.arange(-5.01, 5.01, 0.25) >>> y = np.arange(-5.01, 5.01, 0.25) >>> xx, yy = np.meshgrid(x, y) >>> z = np.sin(xx**2+yy**2) >>> f = interpolate.interp2d(x, y, z, kind='cubic') Now use the obtained interpolation function and plot the result: >>> import matplotlib.pyplot as plt >>> xnew = np.arange(-5.01, 5.01, 1e-2) >>> ynew = np.arange(-5.01, 5.01, 1e-2) >>> znew = f(xnew, ynew) >>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-') >>> plt.show() """ def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=None): x = ravel(x) y = ravel(y) z = asarray(z) rectangular_grid = (z.size == len(x) * len(y)) if rectangular_grid: if z.ndim == 2: if z.shape != (len(y), len(x)): raise ValueError("When on a regular grid with x.size = m " "and y.size = n, if z.ndim == 2, then z " "must have shape (n, m)") if not np.all(x[1:] >= x[:-1]): j = np.argsort(x) x = x[j] z = z[:, j] if not np.all(y[1:] >= y[:-1]): j = np.argsort(y) y = y[j] z = z[j, :] z = ravel(z.T) else: z = ravel(z) if len(x) != len(y): raise ValueError( "x and y must have equal lengths for non rectangular grid") if len(z) != len(x): raise ValueError( "Invalid length for input z for non rectangular grid") try: kx = ky = {'linear': 1, 'cubic': 3, 'quintic': 5}[kind] except KeyError: raise ValueError("Unsupported interpolation type.") if not rectangular_grid: # TODO: surfit is really not meant for interpolation! self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0) else: nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth( x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0) self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky) self.bounds_error = bounds_error self.fill_value = fill_value self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)] self.x_min, self.x_max = np.amin(x), np.amax(x) self.y_min, self.y_max = np.amin(y), np.amax(y) def __call__(self, x, y, dx=0, dy=0, assume_sorted=False): """Interpolate the function. Parameters ---------- x : 1D array x-coordinates of the mesh on which to interpolate. y : 1D array y-coordinates of the mesh on which to interpolate. dx : int >= 0, < kx Order of partial derivatives in x. dy : int >= 0, < ky Order of partial derivatives in y. assume_sorted : bool, optional If False, values of `x` and `y` can be in any order and they are sorted first. If True, `x` and `y` have to be arrays of monotonically increasing values. Returns ------- z : 2D array with shape (len(y), len(x)) The interpolated values. """ x = atleast_1d(x) y = atleast_1d(y) if x.ndim != 1 or y.ndim != 1: raise ValueError("x and y should both be 1-D arrays") if not assume_sorted: x = np.sort(x) y = np.sort(y) if self.bounds_error or self.fill_value is not None: out_of_bounds_x = (x < self.x_min) | (x > self.x_max) out_of_bounds_y = (y < self.y_min) | (y > self.y_max) any_out_of_bounds_x = np.any(out_of_bounds_x) any_out_of_bounds_y = np.any(out_of_bounds_y) if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y): raise ValueError("Values out of range; x must be in %r, y in %r" % ((self.x_min, self.x_max), (self.y_min, self.y_max))) z = fitpack.bisplev(x, y, self.tck, dx, dy) z = atleast_2d(z) z = transpose(z) if self.fill_value is not None: if any_out_of_bounds_x: z[:, out_of_bounds_x] = self.fill_value if any_out_of_bounds_y: z[out_of_bounds_y, :] = self.fill_value if len(z) == 1: z = z[0] return array(z) def _check_broadcast_up_to(arr_from, shape_to, name): """Helper to check that arr_from broadcasts up to shape_to""" shape_from = arr_from.shape if len(shape_to) >= len(shape_from): for t, f in zip(shape_to[::-1], shape_from[::-1]): if f != 1 and f != t: break else: # all checks pass, do the upcasting that we need later if arr_from.size != 1 and arr_from.shape != shape_to: arr_from = np.ones(shape_to, arr_from.dtype) * arr_from return arr_from.ravel() # at least one check failed raise ValueError('%s argument must be able to broadcast up ' 'to shape %s but had shape %s' % (name, shape_to, shape_from)) def _do_extrapolate(fill_value): """Helper to check if fill_value == "extrapolate" without warnings""" return (isinstance(fill_value, string_types) and fill_value == 'extrapolate') class interp1d(_Interpolator1D): """ Interpolate a 1-D function. `x` and `y` are arrays of values used to approximate some function f: ``y = f(x)``. This class returns a function whose call method uses interpolation to find the value of new points. Note that calling `interp1d` with NaNs present in input values results in undefined behaviour. Parameters ---------- x : (N,) array_like A 1-D array of real values. y : (...,N,...) array_like A N-D array of real values. The length of `y` along the interpolation axis must be equal to the length of `x`. kind : str or int, optional Specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic' where 'zero', 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of zeroth, first, second or third order) or as an integer specifying the order of the spline interpolator to use. Default is 'linear'. axis : int, optional Specifies the axis of `y` along which to interpolate. Interpolation defaults to the last axis of `y`. copy : bool, optional If True, the class makes internal copies of x and y. If False, references to `x` and `y` are used. The default is to copy. bounds_error : bool, optional If True, a ValueError is raised any time interpolation is attempted on a value outside of the range of x (where extrapolation is necessary). If False, out of bounds values are assigned `fill_value`. By default, an error is raised unless `fill_value="extrapolate"`. fill_value : array-like or (array-like, array_like) or "extrapolate", optional - if a ndarray (or float), this value will be used to fill in for requested points outside of the data range. If not provided, then the default is NaN. The array-like must broadcast properly to the dimensions of the non-interpolation axes. - If a two-element tuple, then the first element is used as a fill value for ``x_new < x[0]`` and the second element is used for ``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g., list or ndarray, regardless of shape) is taken to be a single array-like argument meant to be used for both bounds as ``below, above = fill_value, fill_value``. .. versionadded:: 0.17.0 - If "extrapolate", then points outside the data range will be extrapolated. .. versionadded:: 0.17.0 assume_sorted : bool, optional If False, values of `x` can be in any order and they are sorted first. If True, `x` has to be an array of monotonically increasing values. Methods ------- __call__ See Also -------- splrep, splev Spline interpolation/smoothing based on FITPACK. UnivariateSpline : An object-oriented wrapper of the FITPACK routines. interp2d : 2-D interpolation Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy import interpolate >>> x = np.arange(0, 10) >>> y = np.exp(-x/3.0) >>> f = interpolate.interp1d(x, y) >>> xnew = np.arange(0, 9, 0.1) >>> ynew = f(xnew) # use interpolation function returned by `interp1d` >>> plt.plot(x, y, 'o', xnew, ynew, '-') >>> plt.show() """ def __init__(self, x, y, kind='linear', axis=-1, copy=True, bounds_error=None, fill_value=np.nan, assume_sorted=False): """ Initialize a 1D linear interpolation class.""" _Interpolator1D.__init__(self, x, y, axis=axis) self.bounds_error = bounds_error # used by fill_value setter self.copy = copy if kind in ['zero', 'slinear', 'quadratic', 'cubic']: order = {'zero': 0, 'slinear': 1, 'quadratic': 2, 'cubic': 3}[kind] kind = 'spline' elif isinstance(kind, int): order = kind kind = 'spline' elif kind not in ('linear', 'nearest'): raise NotImplementedError("%s is unsupported: Use fitpack " "routines for other types." % kind) x = array(x, copy=self.copy) y = array(y, copy=self.copy) if not assume_sorted: ind = np.argsort(x) x = x[ind] y = np.take(y, ind, axis=axis) if x.ndim != 1: raise ValueError("the x array must have exactly one dimension.") if y.ndim == 0: raise ValueError("the y array must have at least one dimension.") # Force-cast y to a floating-point type, if it's not yet one if not issubclass(y.dtype.type, np.inexact): y = y.astype(np.float_) # Backward compatibility self.axis = axis % y.ndim # Interpolation goes internally along the first axis self.y = y self._y = self._reshape_yi(self.y) self.x = x del y, x # clean up namespace to prevent misuse; use attributes self._kind = kind self.fill_value = fill_value # calls the setter, can modify bounds_err # Adjust to interpolation kind; store reference to *unbound* # interpolation methods, in order to avoid circular references to self # stored in the bound instance methods, and therefore delayed garbage # collection. See: http://docs.python.org/2/reference/datamodel.html if kind in ('linear', 'nearest'): # Make a "view" of the y array that is rotated to the interpolation # axis. minval = 2 if kind == 'nearest': # Do division before addition to prevent possible integer # overflow self.x_bds = self.x / 2.0 self.x_bds = self.x_bds[1:] + self.x_bds[:-1] self._call = self.__class__._call_nearest else: # Check if we can delegate to numpy.interp (2x-10x faster). cond = self.x.dtype == np.float_ and self.y.dtype == np.float_ cond = cond and self.y.ndim == 1 cond = cond and not _do_extrapolate(fill_value) if cond: self._call = self.__class__._call_linear_np else: self._call = self.__class__._call_linear else: minval = order + 1 rewrite_nan = False xx, yy = self.x, self._y if order > 1: # Quadratic or cubic spline. If input contains even a single # nan, then the output is all nans. We cannot just feed data # with nans to make_interp_spline because it calls LAPACK. # So, we make up a bogus x and y with no nans and use it # to get the correct shape of the output, which we then fill # with nans. # For slinear or zero order spline, we just pass nans through. if np.isnan(self.x).any(): xx = np.linspace(min(self.x), max(self.x), len(self.x)) rewrite_nan = True if np.isnan(self._y).any(): yy = np.ones_like(self._y) rewrite_nan = True self._spline = make_interp_spline(xx, yy, k=order, check_finite=False) if rewrite_nan: self._call = self.__class__._call_nan_spline else: self._call = self.__class__._call_spline if len(self.x) < minval: raise ValueError("x and y arrays must have at " "least %d entries" % minval) @property def fill_value(self): # backwards compat: mimic a public attribute return self._fill_value_orig @fill_value.setter def fill_value(self, fill_value): # extrapolation only works for nearest neighbor and linear methods if _do_extrapolate(fill_value): if self.bounds_error: raise ValueError("Cannot extrapolate and raise " "at the same time.") self.bounds_error = False self._extrapolate = True else: broadcast_shape = (self.y.shape[:self.axis] + self.y.shape[self.axis + 1:]) if len(broadcast_shape) == 0: broadcast_shape = (1,) # it's either a pair (_below_range, _above_range) or a single value # for both above and below range if isinstance(fill_value, tuple) and len(fill_value) == 2: below_above = [np.asarray(fill_value[0]), np.asarray(fill_value[1])] names = ('fill_value (below)', 'fill_value (above)') for ii in range(2): below_above[ii] = _check_broadcast_up_to( below_above[ii], broadcast_shape, names[ii]) else: fill_value = np.asarray(fill_value) below_above = [_check_broadcast_up_to( fill_value, broadcast_shape, 'fill_value')] * 2 self._fill_value_below, self._fill_value_above = below_above self._extrapolate = False if self.bounds_error is None: self.bounds_error = True # backwards compat: fill_value was a public attr; make it writeable self._fill_value_orig = fill_value def _call_linear_np(self, x_new): # Note that out-of-bounds values are taken care of in self._evaluate return np.interp(x_new, self.x, self.y) def _call_linear(self, x_new): # 2. Find where in the orignal data, the values to interpolate # would be inserted. # Note: If x_new[n] == x[m], then m is returned by searchsorted. x_new_indices = searchsorted(self.x, x_new) # 3. Clip x_new_indices so that they are within the range of # self.x indices and at least 1. Removes mis-interpolation # of x_new[n] = x[0] x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int) # 4. Calculate the slope of regions that each x_new value falls in. lo = x_new_indices - 1 hi = x_new_indices x_lo = self.x[lo] x_hi = self.x[hi] y_lo = self._y[lo] y_hi = self._y[hi] # Note that the following two expressions rely on the specifics of the # broadcasting semantics. slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None] # 5. Calculate the actual value for each entry in x_new. y_new = slope*(x_new - x_lo)[:, None] + y_lo return y_new def _call_nearest(self, x_new): """ Find nearest neighbour interpolated y_new = f(x_new).""" # 2. Find where in the averaged data the values to interpolate # would be inserted. # Note: use side='left' (right) to searchsorted() to define the # halfway point to be nearest to the left (right) neighbour x_new_indices = searchsorted(self.x_bds, x_new, side='left') # 3. Clip x_new_indices so that they are within the range of x indices. x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp) # 4. Calculate the actual value for each entry in x_new. y_new = self._y[x_new_indices] return y_new def _call_spline(self, x_new): return self._spline(x_new) def _call_nan_spline(self, x_new): out = self._spline(x_new) out[...] = np.nan return out def _evaluate(self, x_new): # 1. Handle values in x_new that are outside of x. Throw error, # or return a list of mask array indicating the outofbounds values. # The behavior is set by the bounds_error variable. x_new = asarray(x_new) y_new = self._call(self, x_new) if not self._extrapolate: below_bounds, above_bounds = self._check_bounds(x_new) if len(y_new) > 0: # Note fill_value must be broadcast up to the proper size # and flattened to work here y_new[below_bounds] = self._fill_value_below y_new[above_bounds] = self._fill_value_above return y_new def _check_bounds(self, x_new): """Check the inputs for being in the bounds of the interpolated data. Parameters ---------- x_new : array Returns ------- out_of_bounds : bool array The mask on x_new of values that are out of the bounds. """ # If self.bounds_error is True, we raise an error if any x_new values # fall outside the range of x. Otherwise, we return an array indicating # which values are outside the boundary region. below_bounds = x_new < self.x[0] above_bounds = x_new > self.x[-1] # !! Could provide more information about which values are out of bounds if self.bounds_error and below_bounds.any(): raise ValueError("A value in x_new is below the interpolation " "range.") if self.bounds_error and above_bounds.any(): raise ValueError("A value in x_new is above the interpolation " "range.") # !! Should we emit a warning if some values are out of bounds? # !! matlab does not. return below_bounds, above_bounds class _PPolyBase(object): """Base class for piecewise polynomials.""" __slots__ = ('c', 'x', 'extrapolate', 'axis') def __init__(self, c, x, extrapolate=None, axis=0): self.c = np.asarray(c) self.x = np.ascontiguousarray(x, dtype=np.float64) if extrapolate is None: extrapolate = True elif extrapolate != 'periodic': extrapolate = bool(extrapolate) self.extrapolate = extrapolate if self.c.ndim < 2: raise ValueError("Coefficients array must be at least " "2-dimensional.") if not (0 <= axis < self.c.ndim - 1): raise ValueError("axis=%s must be between 0 and %s" % (axis, self.c.ndim-1)) self.axis = axis if axis != 0: # roll the interpolation axis to be the first one in self.c # More specifically, the target shape for self.c is (k, m, ...), # and axis !=0 means that we have c.shape (..., k, m, ...) # ^ # axis # So we roll two of them. self.c = np.rollaxis(self.c, axis+1) self.c = np.rollaxis(self.c, axis+1) if self.x.ndim != 1: raise ValueError("x must be 1-dimensional") if self.x.size < 2: raise ValueError("at least 2 breakpoints are needed") if self.c.ndim < 2: raise ValueError("c must have at least 2 dimensions") if self.c.shape[0] == 0: raise ValueError("polynomial must be at least of order 0") if self.c.shape[1] != self.x.size-1: raise ValueError("number of coefficients != len(x)-1") dx = np.diff(self.x) if not (np.all(dx >= 0) or np.all(dx <= 0)): raise ValueError("`x` must be strictly increasing or decreasing.") dtype = self._get_dtype(self.c.dtype) self.c = np.ascontiguousarray(self.c, dtype=dtype) def _get_dtype(self, dtype): if np.issubdtype(dtype, np.complexfloating) \ or np.issubdtype(self.c.dtype, np.complexfloating): return np.complex_ else: return np.float_ @classmethod def construct_fast(cls, c, x, extrapolate=None, axis=0): """ Construct the piecewise polynomial without making checks. Takes the same parameters as the constructor. Input arguments `c` and `x` must be arrays of the correct shape and type. The `c` array can only be of dtypes float and complex, and `x` array must have dtype float. """ self = object.__new__(cls) self.c = c self.x = x self.axis = axis if extrapolate is None: extrapolate = True self.extrapolate = extrapolate return self def _ensure_c_contiguous(self): """ c and x may be modified by the user. The Cython code expects that they are C contiguous. """ if not self.x.flags.c_contiguous: self.x = self.x.copy() if not self.c.flags.c_contiguous: self.c = self.c.copy() def extend(self, c, x, right=None): """ Add additional breakpoints and coefficients to the polynomial. Parameters ---------- c : ndarray, size (k, m, ...) Additional coefficients for polynomials in intervals. Note that the first additional interval will be formed using one of the `self.x` end points. x : ndarray, size (m,) Additional breakpoints. Must be sorted in the same order as `self.x` and either to the right or to the left of the current breakpoints. right Deprecated argument. Has no effect. .. deprecated:: 0.19 """ if right is not None: warnings.warn("`right` is deprecated and will be removed.") c = np.asarray(c) x = np.asarray(x) if c.ndim < 2: raise ValueError("invalid dimensions for c") if x.ndim != 1: raise ValueError("invalid dimensions for x") if x.shape[0] != c.shape[1]: raise ValueError("x and c have incompatible sizes") if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim: raise ValueError("c and self.c have incompatible shapes") if c.size == 0: return dx = np.diff(x) if not (np.all(dx >= 0) or np.all(dx <= 0)): raise ValueError("`x` is not sorted.") if self.x[-1] >= self.x[0]: if not x[-1] >= x[0]: raise ValueError("`x` is in the different order " "than `self.x`.") if x[0] >= self.x[-1]: action = 'append' elif x[-1] <= self.x[0]: action = 'prepend' else: raise ValueError("`x` is neither on the left or on the right " "from `self.x`.") else: if not x[-1] <= x[0]: raise ValueError("`x` is in the different order " "than `self.x`.") if x[0] <= self.x[-1]: action = 'append' elif x[-1] >= self.x[0]: action = 'prepend' else: raise ValueError("`x` is neither on the left or on the right " "from `self.x`.") dtype = self._get_dtype(c.dtype) k2 = max(c.shape[0], self.c.shape[0]) c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:], dtype=dtype) if action == 'append': c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c c2[k2-c.shape[0]:, self.c.shape[1]:] = c self.x = np.r_[self.x, x] elif action == 'prepend': c2[k2-self.c.shape[0]:, :c.shape[1]] = c c2[k2-c.shape[0]:, c.shape[1]:] = self.c self.x = np.r_[x, self.x] self.c = c2 def __call__(self, x, nu=0, extrapolate=None): """ Evaluate the piecewise polynomial or its derivative. Parameters ---------- x : array_like Points to evaluate the interpolant at. nu : int, optional Order of derivative to evaluate. Must be non-negative. extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. If None (default), use `self.extrapolate`. Returns ------- y : array_like Interpolated values. Shape is determined by replacing the interpolation axis in the original array with the shape of x. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``[a, b)``, except for the last interval which is closed ``[a, b]``. """ if extrapolate is None: extrapolate = self.extrapolate x = np.asarray(x) x_shape, x_ndim = x.shape, x.ndim x = np.ascontiguousarray(x.ravel(), dtype=np.float_) # With periodic extrapolation we map x to the segment # [self.x[0], self.x[-1]]. if extrapolate == 'periodic': x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0]) extrapolate = False out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype) self._ensure_c_contiguous() self._evaluate(x, nu, extrapolate, out) out = out.reshape(x_shape + self.c.shape[2:]) if self.axis != 0: # transpose to move the calculated values to the interpolation axis l = list(range(out.ndim)) l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:] out = out.transpose(l) return out class PPoly(_PPolyBase): """ Piecewise polynomial in terms of coefficients and breakpoints The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the local power basis:: S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1)) where ``k`` is the degree of the polynomial. Parameters ---------- c : ndarray, shape (k, m, ...) Polynomial coefficients, order `k` and `m` intervals x : ndarray, shape (m+1,) Polynomial breakpoints. Must be sorted in either increasing or decreasing order. extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. axis : int, optional Interpolation axis. Default is zero. Attributes ---------- x : ndarray Breakpoints. c : ndarray Coefficients of the polynomials. They are reshaped to a 3-dimensional array with the last dimension representing the trailing dimensions of the original coefficient array. axis : int Interpolation axis. Methods ------- __call__ derivative antiderivative integrate solve roots extend from_spline from_bernstein_basis construct_fast See also -------- BPoly : piecewise polynomials in the Bernstein basis Notes ----- High-order polynomials in the power basis can be numerically unstable. Precision problems can start to appear for orders larger than 20-30. """ def _evaluate(self, x, nu, extrapolate, out): _ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, x, nu, bool(extrapolate), out) def derivative(self, nu=1): """ Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : int, optional Order of derivative to evaluate. Default is 1, i.e. compute the first derivative. If negative, the antiderivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k - n representing the derivative of this polynomial. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``[a, b)``, except for the last interval which is closed ``[a, b]``. """ if nu < 0: return self.antiderivative(-nu) # reduce order if nu == 0: c2 = self.c.copy() else: c2 = self.c[:-nu, :].copy() if c2.shape[0] == 0: # derivative of order 0 is zero c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype) # multiply by the correct rising factorials factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu) c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)] # construct a compatible polynomial return self.construct_fast(c2, self.x, self.extrapolate, self.axis) def antiderivative(self, nu=1): """ Construct a new piecewise polynomial representing the antiderivative. Antiderivative is also the indefinite integral of the function, and derivative is its inverse operation. Parameters ---------- nu : int, optional Order of antiderivative to evaluate. Default is 1, i.e. compute the first integral. If negative, the derivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k + n representing the antiderivative of this polynomial. Notes ----- The antiderivative returned by this function is continuous and continuously differentiable to order n-1, up to floating point rounding error. If antiderivative is computed and ``self.extrapolate='periodic'``, it will be set to False for the returned instance. This is done because the antiderivative is no longer periodic and its correct evaluation outside of the initially given x interval is difficult. """ if nu <= 0: return self.derivative(-nu) c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:], dtype=self.c.dtype) c[:-nu] = self.c # divide by the correct rising factorials factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu) c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)] # fix continuity of added degrees of freedom self._ensure_c_contiguous() _ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1), self.x, nu - 1) if self.extrapolate == 'periodic': extrapolate = False else: extrapolate = self.extrapolate # construct a compatible polynomial return self.construct_fast(c, self.x, extrapolate, self.axis) def integrate(self, a, b, extrapolate=None): """ Compute a definite integral over a piecewise polynomial. Parameters ---------- a : float Lower integration bound b : float Upper integration bound extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. If None (default), use `self.extrapolate`. Returns ------- ig : array_like Definite integral of the piecewise polynomial over [a, b] """ if extrapolate is None: extrapolate = self.extrapolate # Swap integration bounds if needed sign = 1 if b < a: a, b = b, a sign = -1 range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype) self._ensure_c_contiguous() # Compute the integral. if extrapolate == 'periodic': # Split the integral into the part over period (can be several # of them) and the remaining part. xs, xe = self.x[0], self.x[-1] period = xe - xs interval = b - a n_periods, left = divmod(interval, period) if n_periods > 0: _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, xs, xe, False, out=range_int) range_int *= n_periods else: range_int.fill(0) # Map a to [xs, xe], b is always a + left. a = xs + (a - xs) % period b = a + left # If b <= xe then we need to integrate over [a, b], otherwise # over [a, xe] and from xs to what is remained. remainder_int = np.empty_like(range_int) if b <= xe: _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, a, b, False, out=remainder_int) range_int += remainder_int else: _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, a, xe, False, out=remainder_int) range_int += remainder_int _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, xs, xs + left + a - xe, False, out=remainder_int) range_int += remainder_int else: _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, a, b, bool(extrapolate), out=range_int) # Return range_int *= sign return range_int.reshape(self.c.shape[2:]) def solve(self, y=0., discontinuity=True, extrapolate=None): """ Find real solutions of the the equation ``pp(x) == y``. Parameters ---------- y : float, optional Right-hand side. Default is zero. discontinuity : bool, optional Whether to report sign changes across discontinuities at breakpoints as roots. extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to return roots from the polynomial extrapolated based on first and last intervals, 'periodic' works the same as False. If None (default), use `self.extrapolate`. Returns ------- roots : ndarray Roots of the polynomial(s). If the PPoly object describes multiple polynomials, the return value is an object array whose each element is an ndarray containing the roots. Notes ----- This routine works only on real-valued polynomials. If the piecewise polynomial contains sections that are identically zero, the root list will contain the start point of the corresponding interval, followed by a ``nan`` value. If the polynomial is discontinuous across a breakpoint, and there is a sign change across the breakpoint, this is reported if the `discont` parameter is True. Examples -------- Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals ``[-2, 1], [1, 2]``: >>> from scipy.interpolate import PPoly >>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2]) >>> pp.roots() array([-1., 1.]) """ if extrapolate is None: extrapolate = self.extrapolate self._ensure_c_contiguous() if np.issubdtype(self.c.dtype, np.complexfloating): raise ValueError("Root finding is only for " "real-valued polynomials") y = float(y) r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, y, bool(discontinuity), bool(extrapolate)) if self.c.ndim == 2: return r[0] else: r2 = np.empty(prod(self.c.shape[2:]), dtype=object) # this for-loop is equivalent to ``r2[...] = r``, but that's broken # in numpy 1.6.0 for ii, root in enumerate(r): r2[ii] = root return r2.reshape(self.c.shape[2:]) def roots(self, discontinuity=True, extrapolate=None): """ Find real roots of the the piecewise polynomial. Parameters ---------- discontinuity : bool, optional Whether to report sign changes across discontinuities at breakpoints as roots. extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to return roots from the polynomial extrapolated based on first and last intervals, 'periodic' works the same as False. If None (default), use `self.extrapolate`. Returns ------- roots : ndarray Roots of the polynomial(s). If the PPoly object describes multiple polynomials, the return value is an object array whose each element is an ndarray containing the roots. See Also -------- PPoly.solve """ return self.solve(0, discontinuity, extrapolate) @classmethod def from_spline(cls, tck, extrapolate=None): """ Construct a piecewise polynomial from a spline Parameters ---------- tck A spline, as returned by `splrep` or a BSpline object. extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. """ if isinstance(tck, BSpline): t, c, k = tck.tck if extrapolate is None: extrapolate = tck.extrapolate else: t, c, k = tck cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype) for m in xrange(k, -1, -1): y = fitpack.splev(t[:-1], tck, der=m) cvals[k - m, :] = y/spec.gamma(m+1) return cls.construct_fast(cvals, t, extrapolate) @classmethod def from_bernstein_basis(cls, bp, extrapolate=None): """ Construct a piecewise polynomial in the power basis from a polynomial in Bernstein basis. Parameters ---------- bp : BPoly A Bernstein basis polynomial, as created by BPoly extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. """ dx = np.diff(bp.x) k = bp.c.shape[0] - 1 # polynomial order rest = (None,)*(bp.c.ndim-2) c = np.zeros_like(bp.c) for a in range(k+1): factor = (-1)**a * comb(k, a) * bp.c[a] for s in range(a, k+1): val = comb(k-a, s-a) * (-1)**s c[k-s] += factor * val / dx[(slice(None),)+rest]**s if extrapolate is None: extrapolate = bp.extrapolate return cls.construct_fast(c, bp.x, extrapolate, bp.axis) class BPoly(_PPolyBase): """Piecewise polynomial in terms of coefficients and breakpoints. The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the Bernstein polynomial basis:: S = sum(c[a, i] * b(a, k; x) for a in range(k+1)), where ``k`` is the degree of the polynomial, and:: b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a), with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial coefficient. Parameters ---------- c : ndarray, shape (k, m, ...) Polynomial coefficients, order `k` and `m` intervals x : ndarray, shape (m+1,) Polynomial breakpoints. Must be sorted in either increasing or decreasing order. extrapolate : bool, optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. axis : int, optional Interpolation axis. Default is zero. Attributes ---------- x : ndarray Breakpoints. c : ndarray Coefficients of the polynomials. They are reshaped to a 3-dimensional array with the last dimension representing the trailing dimensions of the original coefficient array. axis : int Interpolation axis. Methods ------- __call__ extend derivative antiderivative integrate construct_fast from_power_basis from_derivatives See also -------- PPoly : piecewise polynomials in the power basis Notes ----- Properties of Bernstein polynomials are well documented in the literature. Here's a non-exhaustive list: .. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial .. [2] Kenneth I. Joy, Bernstein polynomials, http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf .. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems, vol 2011, article ID 829546, :doi:`10.1155/2011/829543`. Examples -------- >>> from scipy.interpolate import BPoly >>> x = [0, 1] >>> c = [[1], [2], [3]] >>> bp = BPoly(c, x) This creates a 2nd order polynomial .. math:: B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\ = 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2 """ def _evaluate(self, x, nu, extrapolate, out): _ppoly.evaluate_bernstein( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, x, nu, bool(extrapolate), out) def derivative(self, nu=1): """ Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : int, optional Order of derivative to evaluate. Default is 1, i.e. compute the first derivative. If negative, the antiderivative is returned. Returns ------- bp : BPoly Piecewise polynomial of order k - nu representing the derivative of this polynomial. """ if nu < 0: return self.antiderivative(-nu) if nu > 1: bp = self for k in range(nu): bp = bp.derivative() return bp # reduce order if nu == 0: c2 = self.c.copy() else: # For a polynomial # B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x), # we use the fact that # b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ), # which leads to # B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1} # # finally, for an interval [y, y + dy] with dy != 1, # we need to correct for an extra power of dy rest = (None,)*(self.c.ndim-2) k = self.c.shape[0] - 1 dx = np.diff(self.x)[(None, slice(None))+rest] c2 = k * np.diff(self.c, axis=0) / dx if c2.shape[0] == 0: # derivative of order 0 is zero c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype) # construct a compatible polynomial return self.construct_fast(c2, self.x, self.extrapolate, self.axis) def antiderivative(self, nu=1): """ Construct a new piecewise polynomial representing the antiderivative. Parameters ---------- nu : int, optional Order of antiderivative to evaluate. Default is 1, i.e. compute the first integral. If negative, the derivative is returned. Returns ------- bp : BPoly Piecewise polynomial of order k + nu representing the antiderivative of this polynomial. Notes ----- If antiderivative is computed and ``self.extrapolate='periodic'``, it will be set to False for the returned instance. This is done because the antiderivative is no longer periodic and its correct evaluation outside of the initially given x interval is difficult. """ if nu <= 0: return self.derivative(-nu) if nu > 1: bp = self for k in range(nu): bp = bp.antiderivative() return bp # Construct the indefinite integrals on individual intervals c, x = self.c, self.x k = c.shape[0] c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype) c2[1:, ...] = np.cumsum(c, axis=0) / k delta = x[1:] - x[:-1] c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)] # Now fix continuity: on the very first interval, take the integration # constant to be zero; on an interval [x_j, x_{j+1}) with j>0, # the integration constant is then equal to the jump of the `bp` at x_j. # The latter is given by the coefficient of B_{n+1, n+1} # *on the previous interval* (other B. polynomials are zero at the # breakpoint). Finally, use the fact that BPs form a partition of unity. c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1] if self.extrapolate == 'periodic': extrapolate = False else: extrapolate = self.extrapolate return self.construct_fast(c2, x, extrapolate, axis=self.axis) def integrate(self, a, b, extrapolate=None): """ Compute a definite integral over a piecewise polynomial. Parameters ---------- a : float Lower integration bound b : float Upper integration bound extrapolate : {bool, 'periodic', None}, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. If None (default), use `self.extrapolate`. Returns ------- array_like Definite integral of the piecewise polynomial over [a, b] """ # XXX: can probably use instead the fact that # \int_0^{1} B_{j, n}(x) \dx = 1/(n+1) ib = self.antiderivative() if extrapolate is None: extrapolate = self.extrapolate # ib.extrapolate shouldn't be 'periodic', it is converted to # False for 'periodic. in antiderivative() call. if extrapolate != 'periodic': ib.extrapolate = extrapolate if extrapolate == 'periodic': # Split the integral into the part over period (can be several # of them) and the remaining part. # For simplicity and clarity convert to a <= b case. if a <= b: sign = 1 else: a, b = b, a sign = -1 xs, xe = self.x[0], self.x[-1] period = xe - xs interval = b - a n_periods, left = divmod(interval, period) res = n_periods * (ib(xe) - ib(xs)) # Map a and b to [xs, xe]. a = xs + (a - xs) % period b = a + left # If b <= xe then we need to integrate over [a, b], otherwise # over [a, xe] and from xs to what is remained. if b <= xe: res += ib(b) - ib(a) else: res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs) return sign * res else: return ib(b) - ib(a) def extend(self, c, x, right=None): k = max(self.c.shape[0], c.shape[0]) self.c = self._raise_degree(self.c, k - self.c.shape[0]) c = self._raise_degree(c, k - c.shape[0]) return _PPolyBase.extend(self, c, x, right) extend.__doc__ = _PPolyBase.extend.__doc__ @classmethod def from_power_basis(cls, pp, extrapolate=None): """ Construct a piecewise polynomial in Bernstein basis from a power basis polynomial. Parameters ---------- pp : PPoly A piecewise polynomial in the power basis extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. """ dx = np.diff(pp.x) k = pp.c.shape[0] - 1 # polynomial order rest = (None,)*(pp.c.ndim-2) c = np.zeros_like(pp.c) for a in range(k+1): factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a) for j in range(k-a, k+1): c[j] += factor * comb(j, k-a) if extrapolate is None: extrapolate = pp.extrapolate return cls.construct_fast(c, pp.x, extrapolate, pp.axis) @classmethod def from_derivatives(cls, xi, yi, orders=None, extrapolate=None): """Construct a piecewise polynomial in the Bernstein basis, compatible with the specified values and derivatives at breakpoints. Parameters ---------- xi : array_like sorted 1D array of x-coordinates yi : array_like or list of array_likes ``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]`` orders : None or int or array_like of ints. Default: None. Specifies the degree of local polynomials. If not None, some derivatives are ignored. extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. Notes ----- If ``k`` derivatives are specified at a breakpoint ``x``, the constructed polynomial is exactly ``k`` times continuously differentiable at ``x``, unless the ``order`` is provided explicitly. In the latter case, the smoothness of the polynomial at the breakpoint is controlled by the ``order``. Deduces the number of derivatives to match at each end from ``order`` and the number of derivatives available. If possible it uses the same number of derivatives from each end; if the number is odd it tries to take the extra one from y2. In any case if not enough derivatives are available at one end or another it draws enough to make up the total from the other end. If the order is too high and not enough derivatives are available, an exception is raised. Examples -------- >>> from scipy.interpolate import BPoly >>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]]) Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]` such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4` >>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]]) Creates a piecewise polynomial `f(x)`, such that `f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`. Based on the number of derivatives provided, the order of the local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`. Notice that no restriction is imposed on the derivatives at `x = 1` and `x = 2`. Indeed, the explicit form of the polynomial is:: f(x) = | x * (1 - x), 0 <= x < 1 | 2 * (x - 1), 1 <= x <= 2 So that f'(1-0) = -1 and f'(1+0) = 2 """ xi = np.asarray(xi) if len(xi) != len(yi): raise ValueError("xi and yi need to have the same length") if np.any(xi[1:] - xi[:1] <= 0): raise ValueError("x coordinates are not in increasing order") # number of intervals m = len(xi) - 1 # global poly order is k-1, local orders are <=k and can vary try: k = max(len(yi[i]) + len(yi[i+1]) for i in range(m)) except TypeError: raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).") if orders is None: orders = [None] * m else: if isinstance(orders, (integer_types, np.integer)): orders = [orders] * m k = max(k, max(orders)) if any(o <= 0 for o in orders): raise ValueError("Orders must be positive.") c = [] for i in range(m): y1, y2 = yi[i], yi[i+1] if orders[i] is None: n1, n2 = len(y1), len(y2) else: n = orders[i]+1 n1 = min(n//2, len(y1)) n2 = min(n - n1, len(y2)) n1 = min(n - n2, len(y2)) if n1+n2 != n: mesg = ("Point %g has %d derivatives, point %g" " has %d derivatives, but order %d requested" % ( xi[i], len(y1), xi[i+1], len(y2), orders[i])) raise ValueError(mesg) if not (n1 <= len(y1) and n2 <= len(y2)): raise ValueError("`order` input incompatible with" " length y1 or y2.") b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2]) if len(b) < k: b = BPoly._raise_degree(b, k - len(b)) c.append(b) c = np.asarray(c) return cls(c.swapaxes(0, 1), xi, extrapolate) @staticmethod def _construct_from_derivatives(xa, xb, ya, yb): r"""Compute the coefficients of a polynomial in the Bernstein basis given the values and derivatives at the edges. Return the coefficients of a polynomial in the Bernstein basis defined on `[xa, xb]` and having the values and derivatives at the endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``. The polynomial constructed is of the minimal possible degree, i.e., if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree of the polynomial is ``na + nb - 1``. Parameters ---------- xa : float Left-hand end point of the interval xb : float Right-hand end point of the interval ya : array_like Derivatives at ``xa``. ``ya[0]`` is the value of the function, and ``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative. yb : array_like Derivatives at ``xb``. Returns ------- array coefficient array of a polynomial having specified derivatives Notes ----- This uses several facts from life of Bernstein basis functions. First of all, .. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1}) If B(x) is a linear combination of the form .. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n}, then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}. Iterating the latter one, one finds for the q-th derivative .. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q}, with .. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a} This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and `c_q` are found one by one by iterating `q = 0, ..., na`. At `x = xb` it's the same with `a = n - q`. """ ya, yb = np.asarray(ya), np.asarray(yb) if ya.shape[1:] != yb.shape[1:]: raise ValueError('ya and yb have incompatible dimensions.') dta, dtb = ya.dtype, yb.dtype if (np.issubdtype(dta, np.complexfloating) or np.issubdtype(dtb, np.complexfloating)): dt = np.complex_ else: dt = np.float_ na, nb = len(ya), len(yb) n = na + nb c = np.empty((na+nb,) + ya.shape[1:], dtype=dt) # compute coefficients of a polynomial degree na+nb-1 # walk left-to-right for q in range(0, na): c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q for j in range(0, q): c[q] -= (-1)**(j+q) * comb(q, j) * c[j] # now walk right-to-left for q in range(0, nb): c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q for j in range(0, q): c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j] return c @staticmethod def _raise_degree(c, d): r"""Raise a degree of a polynomial in the Bernstein basis. Given the coefficients of a polynomial degree `k`, return (the coefficients of) the equivalent polynomial of degree `k+d`. Parameters ---------- c : array_like coefficient array, 1D d : integer Returns ------- array coefficient array, 1D array of length `c.shape[0] + d` Notes ----- This uses the fact that a Bernstein polynomial `b_{a, k}` can be identically represented as a linear combination of polynomials of a higher degree `k+d`: .. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \ comb(d, j) / comb(k+d, a+j) """ if d == 0: return c k = c.shape[0] - 1 out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype) for a in range(c.shape[0]): f = c[a] * comb(k, a) for j in range(d+1): out[a+j] += f * comb(d, j) / comb(k+d, a+j) return out class NdPPoly(object): """ Piecewise tensor product polynomial The value at point `xp = (x', y', z', ...)` is evaluated by first computing the interval indices `i` such that:: x[0][i[0]] <= x' < x[0][i[0]+1] x[1][i[1]] <= y' < x[1][i[1]+1] ... and then computing:: S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]] * (xp[0] - x[0][i[0]])**m0 * ... * (xp[n] - x[n][i[n]])**mn for m0 in range(k[0]+1) ... for mn in range(k[n]+1)) where ``k[j]`` is the degree of the polynomial in dimension j. This representation is the piecewise multivariate power basis. Parameters ---------- c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...) Polynomial coefficients, with polynomial order `kj` and `mj+1` intervals for each dimension `j`. x : ndim-tuple of ndarrays, shapes (mj+1,) Polynomial breakpoints for each dimension. These must be sorted in increasing order. extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Default: True. Attributes ---------- x : tuple of ndarrays Breakpoints. c : ndarray Coefficients of the polynomials. Methods ------- __call__ construct_fast See also -------- PPoly : piecewise polynomials in 1D Notes ----- High-order polynomials in the power basis can be numerically unstable. """ def __init__(self, c, x, extrapolate=None): self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x) self.c = np.asarray(c) if extrapolate is None: extrapolate = True self.extrapolate = bool(extrapolate) ndim = len(self.x) if any(v.ndim != 1 for v in self.x): raise ValueError("x arrays must all be 1-dimensional") if any(v.size < 2 for v in self.x): raise ValueError("x arrays must all contain at least 2 points") if c.ndim < 2*ndim: raise ValueError("c must have at least 2*len(x) dimensions") if any(np.any(v[1:] - v[:-1] < 0) for v in self.x): raise ValueError("x-coordinates are not in increasing order") if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)): raise ValueError("x and c do not agree on the number of intervals") dtype = self._get_dtype(self.c.dtype) self.c = np.ascontiguousarray(self.c, dtype=dtype) @classmethod def construct_fast(cls, c, x, extrapolate=None): """ Construct the piecewise polynomial without making checks. Takes the same parameters as the constructor. Input arguments `c` and `x` must be arrays of the correct shape and type. The `c` array can only be of dtypes float and complex, and `x` array must have dtype float. """ self = object.__new__(cls) self.c = c self.x = x if extrapolate is None: extrapolate = True self.extrapolate = extrapolate return self def _get_dtype(self, dtype): if np.issubdtype(dtype, np.complexfloating) \ or np.issubdtype(self.c.dtype, np.complexfloating): return np.complex_ else: return np.float_ def _ensure_c_contiguous(self): if not self.c.flags.c_contiguous: self.c = self.c.copy() if not isinstance(self.x, tuple): self.x = tuple(self.x) def __call__(self, x, nu=None, extrapolate=None): """ Evaluate the piecewise polynomial or its derivative Parameters ---------- x : array-like Points to evaluate the interpolant at. nu : tuple, optional Orders of derivatives to evaluate. Each must be non-negative. extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Returns ------- y : array-like Interpolated values. Shape is determined by replacing the interpolation axis in the original array with the shape of x. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``[a, b)``, except for the last interval which is closed ``[a, b]``. """ if extrapolate is None: extrapolate = self.extrapolate else: extrapolate = bool(extrapolate) ndim = len(self.x) x = _ndim_coords_from_arrays(x) x_shape = x.shape x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_) if nu is None: nu = np.zeros((ndim,), dtype=np.intc) else: nu = np.asarray(nu, dtype=np.intc) if nu.ndim != 1 or nu.shape[0] != ndim: raise ValueError("invalid number of derivative orders nu") dim1 = prod(self.c.shape[:ndim]) dim2 = prod(self.c.shape[ndim:2*ndim]) dim3 = prod(self.c.shape[2*ndim:]) ks = np.array(self.c.shape[:ndim], dtype=np.intc) out = np.empty((x.shape[0], dim3), dtype=self.c.dtype) self._ensure_c_contiguous() _ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3), self.x, ks, x, nu, bool(extrapolate), out) return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:]) def _derivative_inplace(self, nu, axis): """ Compute 1D derivative along a selected dimension in-place May result to non-contiguous c array. """ if nu < 0: return self._antiderivative_inplace(-nu, axis) ndim = len(self.x) axis = axis % ndim # reduce order if nu == 0: # noop return else: sl = [slice(None)]*ndim sl[axis] = slice(None, -nu, None) c2 = self.c[sl] if c2.shape[axis] == 0: # derivative of order 0 is zero shp = list(c2.shape) shp[axis] = 1 c2 = np.zeros(shp, dtype=c2.dtype) # multiply by the correct rising factorials factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu) sl = [None]*c2.ndim sl[axis] = slice(None) c2 *= factor[sl] self.c = c2 def _antiderivative_inplace(self, nu, axis): """ Compute 1D antiderivative along a selected dimension May result to non-contiguous c array. """ if nu <= 0: return self._derivative_inplace(-nu, axis) ndim = len(self.x) axis = axis % ndim perm = list(range(ndim)) perm[0], perm[axis] = perm[axis], perm[0] perm = perm + list(range(ndim, self.c.ndim)) c = self.c.transpose(perm) c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:], dtype=c.dtype) c2[:-nu] = c # divide by the correct rising factorials factor = spec.poch(np.arange(c.shape[0], 0, -1), nu) c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)] # fix continuity of added degrees of freedom perm2 = list(range(c2.ndim)) perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1] c2 = c2.transpose(perm2) c2 = c2.copy() _ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1), self.x[axis], nu-1) c2 = c2.transpose(perm2) c2 = c2.transpose(perm) # Done self.c = c2 def derivative(self, nu): """ Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : ndim-tuple of int Order of derivatives to evaluate for each dimension. If negative, the antiderivative is returned. Returns ------- pp : NdPPoly Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n]) representing the derivative of this polynomial. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals in each dimension are considered half-open, ``[a, b)``, except for the last interval which is closed ``[a, b]``. """ p = self.construct_fast(self.c.copy(), self.x, self.extrapolate) for axis, n in enumerate(nu): p._derivative_inplace(n, axis) p._ensure_c_contiguous() return p def antiderivative(self, nu): """ Construct a new piecewise polynomial representing the antiderivative. Antiderivative is also the indefinite integral of the function, and derivative is its inverse operation. Parameters ---------- nu : ndim-tuple of int Order of derivatives to evaluate for each dimension. If negative, the derivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k + n representing the antiderivative of this polynomial. Notes ----- The antiderivative returned by this function is continuous and continuously differentiable to order n-1, up to floating point rounding error. """ p = self.construct_fast(self.c.copy(), self.x, self.extrapolate) for axis, n in enumerate(nu): p._antiderivative_inplace(n, axis) p._ensure_c_contiguous() return p def integrate_1d(self, a, b, axis, extrapolate=None): r""" Compute NdPPoly representation for one dimensional definite integral The result is a piecewise polynomial representing the integral: .. math:: p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...) where the dimension integrated over is specified with the `axis` parameter. Parameters ---------- a, b : float Lower and upper bound for integration. axis : int Dimension over which to compute the 1D integrals extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Returns ------- ig : NdPPoly or array-like Definite integral of the piecewise polynomial over [a, b]. If the polynomial was 1-dimensional, an array is returned, otherwise, an NdPPoly object. """ if extrapolate is None: extrapolate = self.extrapolate else: extrapolate = bool(extrapolate) ndim = len(self.x) axis = int(axis) % ndim # reuse 1D integration routines c = self.c swap = list(range(c.ndim)) swap.insert(0, swap[axis]) del swap[axis + 1] swap.insert(1, swap[ndim + axis]) del swap[ndim + axis + 1] c = c.transpose(swap) p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1), self.x[axis], extrapolate=extrapolate) out = p.integrate(a, b, extrapolate=extrapolate) # Construct result if ndim == 1: return out.reshape(c.shape[2:]) else: c = out.reshape(c.shape[2:]) x = self.x[:axis] + self.x[axis+1:] return self.construct_fast(c, x, extrapolate=extrapolate) def integrate(self, ranges, extrapolate=None): """ Compute a definite integral over a piecewise polynomial. Parameters ---------- ranges : ndim-tuple of 2-tuples float Sequence of lower and upper bounds for each dimension, ``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]`` extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Returns ------- ig : array_like Definite integral of the piecewise polynomial over [a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]] """ ndim = len(self.x) if extrapolate is None: extrapolate = self.extrapolate else: extrapolate = bool(extrapolate) if not hasattr(ranges, '__len__') or len(ranges) != ndim: raise ValueError("Range not a sequence of correct length") self._ensure_c_contiguous() # Reuse 1D integration routine c = self.c for n, (a, b) in enumerate(ranges): swap = list(range(c.ndim)) swap.insert(1, swap[ndim - n]) del swap[ndim - n + 1] c = c.transpose(swap) p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate) out = p.integrate(a, b, extrapolate=extrapolate) c = out.reshape(c.shape[2:]) return c class RegularGridInterpolator(object): """ Interpolation on a regular grid in arbitrary dimensions The data must be defined on a regular grid; the grid spacing however may be uneven. Linear and nearest-neighbour interpolation are supported. After setting up the interpolator object, the interpolation method (*linear* or *nearest*) may be chosen at each evaluation. Parameters ---------- points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ) The points defining the regular grid in n dimensions. values : array_like, shape (m1, ..., mn, ...) The data on the regular grid in n dimensions. method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest". This parameter will become the default for the object's ``__call__`` method. Default is "linear". bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then `fill_value` is used. fill_value : number, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Methods ------- __call__ Notes ----- Contrary to LinearNDInterpolator and NearestNDInterpolator, this class avoids expensive triangulation of the input data by taking advantage of the regular grid structure. .. versionadded:: 0.14 Examples -------- Evaluate a simple example function on the points of a 3D grid: >>> from scipy.interpolate import RegularGridInterpolator >>> def f(x, y, z): ... return 2 * x**3 + 3 * y**2 - z >>> x = np.linspace(1, 4, 11) >>> y = np.linspace(4, 7, 22) >>> z = np.linspace(7, 9, 33) >>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True)) ``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``. Next, define an interpolating function from this data: >>> my_interpolating_function = RegularGridInterpolator((x, y, z), data) Evaluate the interpolating function at the two points ``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``: >>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]]) >>> my_interpolating_function(pts) array([ 125.80469388, 146.30069388]) which is indeed a close approximation to ``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``. See also -------- NearestNDInterpolator : Nearest neighbour interpolation on unstructured data in N dimensions LinearNDInterpolator : Piecewise linear interpolant on unstructured data in N dimensions References ---------- .. [1] Python package *regulargrid* by Johannes Buchner, see https://pypi.python.org/pypi/regulargrid/ .. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free Encyclopedia. Retrieved 27 Feb 2013 01:28. http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871 .. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear and multilinear table interpolation in many dimensions." MATH. COMPUT. 50.181 (1988): 189-196. http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf """ # this class is based on code originally programmed by Johannes Buchner, # see https://github.com/JohannesBuchner/regulargrid def __init__(self, points, values, method="linear", bounds_error=True, fill_value=np.nan): if method not in ["linear", "nearest"]: raise ValueError("Method '%s' is not defined" % method) self.method = method self.bounds_error = bounds_error if not hasattr(values, 'ndim'): # allow reasonable duck-typed values values = np.asarray(values) if len(points) > values.ndim: raise ValueError("There are %d point arrays, but values has %d " "dimensions" % (len(points), values.ndim)) if hasattr(values, 'dtype') and hasattr(values, 'astype'): if not np.issubdtype(values.dtype, np.inexact): values = values.astype(float) self.fill_value = fill_value if fill_value is not None: fill_value_dtype = np.asarray(fill_value).dtype if (hasattr(values, 'dtype') and not np.can_cast(fill_value_dtype, values.dtype, casting='same_kind')): raise ValueError("fill_value must be either 'None' or " "of a type compatible with values") for i, p in enumerate(points): if not np.all(np.diff(p) > 0.): raise ValueError("The points in dimension %d must be strictly " "ascending" % i) if not np.asarray(p).ndim == 1: raise ValueError("The points in dimension %d must be " "1-dimensional" % i) if not values.shape[i] == len(p): raise ValueError("There are %d points and %d values in " "dimension %d" % (len(p), values.shape[i], i)) self.grid = tuple([np.asarray(p) for p in points]) self.values = values def __call__(self, xi, method=None): """ Interpolation at coordinates Parameters ---------- xi : ndarray of shape (..., ndim) The coordinates to sample the gridded data at method : str The method of interpolation to perform. Supported are "linear" and "nearest". """ method = self.method if method is None else method if method not in ["linear", "nearest"]: raise ValueError("Method '%s' is not defined" % method) ndim = len(self.grid) xi = _ndim_coords_from_arrays(xi, ndim=ndim) if xi.shape[-1] != len(self.grid): raise ValueError("The requested sample points xi have dimension " "%d, but this RegularGridInterpolator has " "dimension %d" % (xi.shape[1], ndim)) xi_shape = xi.shape xi = xi.reshape(-1, xi_shape[-1]) if self.bounds_error: for i, p in enumerate(xi.T): if not np.logical_and(np.all(self.grid[i][0] <= p), np.all(p <= self.grid[i][-1])): raise ValueError("One of the requested xi is out of bounds " "in dimension %d" % i) indices, norm_distances, out_of_bounds = self._find_indices(xi.T) if method == "linear": result = self._evaluate_linear(indices, norm_distances, out_of_bounds) elif method == "nearest": result = self._evaluate_nearest(indices, norm_distances, out_of_bounds) if not self.bounds_error and self.fill_value is not None: result[out_of_bounds] = self.fill_value return result.reshape(xi_shape[:-1] + self.values.shape[ndim:]) def _evaluate_linear(self, indices, norm_distances, out_of_bounds): # slice for broadcasting over trailing dimensions in self.values vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices)) # find relevant values # each i and i+1 represents a edge edges = itertools.product(*[[i, i + 1] for i in indices]) values = 0. for edge_indices in edges: weight = 1. for ei, i, yi in zip(edge_indices, indices, norm_distances): weight *= np.where(ei == i, 1 - yi, yi) values += np.asarray(self.values[edge_indices]) * weight[vslice] return values def _evaluate_nearest(self, indices, norm_distances, out_of_bounds): idx_res = [] for i, yi in zip(indices, norm_distances): idx_res.append(np.where(yi <= .5, i, i + 1)) return self.values[idx_res] def _find_indices(self, xi): # find relevant edges between which xi are situated indices = [] # compute distance to lower edge in unity units norm_distances = [] # check for out of bounds xi out_of_bounds = np.zeros((xi.shape[1]), dtype=bool) # iterate through dimensions for x, grid in zip(xi, self.grid): i = np.searchsorted(grid, x) - 1 i[i < 0] = 0 i[i > grid.size - 2] = grid.size - 2 indices.append(i) norm_distances.append((x - grid[i]) / (grid[i + 1] - grid[i])) if not self.bounds_error: out_of_bounds += x < grid[0] out_of_bounds += x > grid[-1] return indices, norm_distances, out_of_bounds def interpn(points, values, xi, method="linear", bounds_error=True, fill_value=np.nan): """ Multidimensional interpolation on regular grids. Parameters ---------- points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ) The points defining the regular grid in n dimensions. values : array_like, shape (m1, ..., mn, ...) The data on the regular grid in n dimensions. xi : ndarray of shape (..., ndim) The coordinates to sample the gridded data at method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest", and "splinef2d". "splinef2d" is only supported for 2-dimensional data. bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then `fill_value` is used. fill_value : number, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Extrapolation is not supported by method "splinef2d". Returns ------- values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:] Interpolated values at input coordinates. Notes ----- .. versionadded:: 0.14 See also -------- NearestNDInterpolator : Nearest neighbour interpolation on unstructured data in N dimensions LinearNDInterpolator : Piecewise linear interpolant on unstructured data in N dimensions RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a regular grid in arbitrary dimensions RectBivariateSpline : Bivariate spline approximation over a rectangular mesh """ # sanity check 'method' kwarg if method not in ["linear", "nearest", "splinef2d"]: raise ValueError("interpn only understands the methods 'linear', " "'nearest', and 'splinef2d'. You provided %s." % method) if not hasattr(values, 'ndim'): values = np.asarray(values) ndim = values.ndim if ndim > 2 and method == "splinef2d": raise ValueError("The method spline2fd can only be used for " "2-dimensional input data") if not bounds_error and fill_value is None and method == "splinef2d": raise ValueError("The method spline2fd does not support extrapolation.") # sanity check consistency of input dimensions if len(points) > ndim: raise ValueError("There are %d point arrays, but values has %d " "dimensions" % (len(points), ndim)) if len(points) != ndim and method == 'splinef2d': raise ValueError("The method spline2fd can only be used for " "scalar data with one point per coordinate") # sanity check input grid for i, p in enumerate(points): if not np.all(np.diff(p) > 0.): raise ValueError("The points in dimension %d must be strictly " "ascending" % i) if not np.asarray(p).ndim == 1: raise ValueError("The points in dimension %d must be " "1-dimensional" % i) if not values.shape[i] == len(p): raise ValueError("There are %d points and %d values in " "dimension %d" % (len(p), values.shape[i], i)) grid = tuple([np.asarray(p) for p in points]) # sanity check requested xi xi = _ndim_coords_from_arrays(xi, ndim=len(grid)) if xi.shape[-1] != len(grid): raise ValueError("The requested sample points xi have dimension " "%d, but this RegularGridInterpolator has " "dimension %d" % (xi.shape[1], len(grid))) for i, p in enumerate(xi.T): if bounds_error and not np.logical_and(np.all(grid[i][0] <= p), np.all(p <= grid[i][-1])): raise ValueError("One of the requested xi is out of bounds " "in dimension %d" % i) # perform interpolation if method == "linear": interp = RegularGridInterpolator(points, values, method="linear", bounds_error=bounds_error, fill_value=fill_value) return interp(xi) elif method == "nearest": interp = RegularGridInterpolator(points, values, method="nearest", bounds_error=bounds_error, fill_value=fill_value) return interp(xi) elif method == "splinef2d": xi_shape = xi.shape xi = xi.reshape(-1, xi.shape[-1]) # RectBivariateSpline doesn't support fill_value; we need to wrap here idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1], grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]), axis=0) result = np.empty_like(xi[:, 0]) # make a copy of values for RectBivariateSpline interp = RectBivariateSpline(points[0], points[1], values[:]) result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1]) result[np.logical_not(idx_valid)] = fill_value return result.reshape(xi_shape[:-1]) # backward compatibility wrapper class ppform(PPoly): """ Deprecated piecewise polynomial class. New code should use the `PPoly` class instead. """ def __init__(self, coeffs, breaks, fill=0.0, sort=False): warnings.warn("ppform is deprecated -- use PPoly instead", category=DeprecationWarning) if sort: breaks = np.sort(breaks) else: breaks = np.asarray(breaks) PPoly.__init__(self, coeffs, breaks) self.coeffs = self.c self.breaks = self.x self.K = self.coeffs.shape[0] self.fill = fill self.a = self.breaks[0] self.b = self.breaks[-1] def __call__(self, x): return PPoly.__call__(self, x, 0, False) def _evaluate(self, x, nu, extrapolate, out): PPoly._evaluate(self, x, nu, extrapolate, out) out[~((x >= self.a) & (x <= self.b))] = self.fill return out @classmethod def fromspline(cls, xk, cvals, order, fill=0.0): # Note: this spline representation is incompatible with FITPACK N = len(xk)-1 sivals = np.empty((order+1, N), dtype=float) for m in xrange(order, -1, -1): fact = spec.gamma(m+1) res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m) res /= fact sivals[order-m, :] = res return cls(sivals, xk, fill=fill) # The 3 private functions below can be called by splmake(). def _dot0(a, b): """Similar to numpy.dot, but sum over last axis of a and 1st axis of b""" if b.ndim <= 2: return dot(a, b) else: axes = list(range(b.ndim)) axes.insert(-1, 0) axes.pop(0) return dot(a, b.transpose(axes)) def _find_smoothest(xk, yk, order, conds=None, B=None): # construct Bmatrix, and Jmatrix # e = J*c # minimize norm(e,2) given B*c=yk # if desired B can be given # conds is ignored N = len(xk)-1 K = order if B is None: B = _fitpack._bsplmat(order, xk) J = _fitpack._bspldismat(order, xk) u, s, vh = scipy.linalg.svd(B) ind = K-1 V2 = vh[-ind:,:].T V1 = vh[:-ind,:].T A = dot(J.T,J) tmp = dot(V2.T,A) Q = dot(tmp,V2) p = scipy.linalg.solve(Q, tmp) tmp = dot(V2,p) tmp = np.eye(N+K) - tmp tmp = dot(tmp,V1) tmp = dot(tmp,np.diag(1.0/s)) tmp = dot(tmp,u.T) return _dot0(tmp, yk) # conds is a tuple of an array and a vector # giving the left-hand and the right-hand side # of the additional equations to add to B def _find_user(xk, yk, order, conds, B): lh = conds[0] rh = conds[1] B = np.concatenate((B, lh), axis=0) w = np.concatenate((yk, rh), axis=0) M, N = B.shape if (M > N): raise ValueError("over-specification of conditions") elif (M < N): return _find_smoothest(xk, yk, order, None, B) else: return scipy.linalg.solve(B, w) # Remove the 3 private functions above as well when removing splmake @np.deprecate(message="splmake is deprecated in scipy 0.19.0, " "use make_interp_spline instead.") def splmake(xk, yk, order=3, kind='smoothest', conds=None): """ Return a representation of a spline given data-points at internal knots Parameters ---------- xk : array_like The input array of x values of rank 1 yk : array_like The input array of y values of rank N. `yk` can be an N-d array to represent more than one curve, through the same `xk` points. The first dimension is assumed to be the interpolating dimension and is the same length of `xk`. order : int, optional Order of the spline kind : str, optional Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural', 'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2 conds : optional Conds Returns ------- splmake : tuple Return a (`xk`, `cvals`, `k`) representation of a spline given data-points where the (internal) knots are at the data-points. """ yk = np.asanyarray(yk) order = int(order) if order < 0: raise ValueError("order must not be negative") if order == 0: return xk, yk[:-1], order elif order == 1: return xk, yk, order try: func = eval('_find_%s' % kind) except: raise NotImplementedError # the constraint matrix B = _fitpack._bsplmat(order, xk) coefs = func(xk, yk, order, conds, B) return xk, coefs, order @np.deprecate(message="spleval is deprecated in scipy 0.19.0, " "use BSpline instead.") def spleval(xck, xnew, deriv=0): """ Evaluate a fixed spline represented by the given tuple at the new x-values The `xj` values are the interior knot points. The approximation region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals` should have length N+k where `k` is the order of the spline. Parameters ---------- (xj, cvals, k) : tuple Parameters that define the fixed spline xj : array_like Interior knot points cvals : array_like Curvature k : int Order of the spline xnew : array_like Locations to calculate spline deriv : int Deriv Returns ------- spleval : ndarray If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or `xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]` providing the interpolation of multiple curves. Notes ----- Internally, an additional `k`-1 knot points are added on either side of the spline. """ (xj, cvals, k) = xck oldshape = np.shape(xnew) xx = np.ravel(xnew) sh = cvals.shape[1:] res = np.empty(xx.shape + sh, dtype=cvals.dtype) for index in np.ndindex(*sh): sl = (slice(None),) + index if issubclass(cvals.dtype.type, np.complexfloating): res[sl].real = _fitpack._bspleval(xx,xj, cvals.real[sl], k, deriv) res[sl].imag = _fitpack._bspleval(xx,xj, cvals.imag[sl], k, deriv) else: res[sl] = _fitpack._bspleval(xx, xj, cvals[sl], k, deriv) res.shape = oldshape + sh return res @np.deprecate(message="spltopp is deprecated in scipy 0.19.0, " "use PPoly.from_spline instead.") def spltopp(xk, cvals, k): """Return a piece-wise polynomial object from a fixed-spline tuple.""" return ppform.fromspline(xk, cvals, k) @np.deprecate(message="spline is deprecated in scipy 0.19.0, " "use Bspline class instead.") def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None): """ Interpolate a curve at new points using a spline fit Parameters ---------- xk, yk : array_like The x and y values that define the curve. xnew : array_like The x values where spline should estimate the y values. order : int Default is 3. kind : string One of {'smoothest'} conds : Don't know Don't know Returns ------- spline : ndarray An array of y values; the spline evaluated at the positions `xnew`. """ return spleval(splmake(xk, yk, order=order, kind=kind, conds=conds), xnew)
codeparrot/github-code-clean
# Copyright (c) 2012 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import lockutils from oslo_log import log as logging import six from cinder import exception from cinder.i18n import _, _LE, _LI, _LW from cinder.volume.drivers.emc import emc_vmax_fast from cinder.volume.drivers.emc import emc_vmax_provision from cinder.volume.drivers.emc import emc_vmax_provision_v3 from cinder.volume.drivers.emc import emc_vmax_utils LOG = logging.getLogger(__name__) STORAGEGROUPTYPE = 4 POSTGROUPTYPE = 3 INITIATORGROUPTYPE = 2 ISCSI = 'iscsi' FC = 'fc' EMC_ROOT = 'root/emc' FASTPOLICY = 'storagetype:fastpolicy' ISV3 = 'isV3' class EMCVMAXMasking(object): """Masking class for SMI-S based EMC volume drivers. Masking code to dynamically create a masking view This masking class is for EMC volume drivers based on SMI-S. It supports VMAX arrays. """ def __init__(self, prtcl): self.protocol = prtcl self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl) self.fast = emc_vmax_fast.EMCVMAXFast(prtcl) self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl) self.provisionv3 = emc_vmax_provision_v3.EMCVMAXProvisionV3(prtcl) def setup_masking_view(self, conn, maskingViewDict, extraSpecs): @lockutils.synchronized(maskingViewDict['maskingViewName'], "emc-mv-", True) def do_get_or_create_masking_view_and_map_lun(): return self.get_or_create_masking_view_and_map_lun(conn, maskingViewDict, extraSpecs) do_get_or_create_masking_view_and_map_lun() def get_or_create_masking_view_and_map_lun(self, conn, maskingViewDict, extraSpecs): """Get or Create a masking view and add a volume to the storage group. Given a masking view tuple either get or create a masking view and add the volume to the associated storage group. If it is a live migration operation then we do not need to remove the volume from any storage group (default or otherwise). :param conn: the connection to ecom :param maskingViewDict: the masking view dict :param extraSpecs: additional info :returns: dict -- rollbackDict :raises: VolumeBackendAPIException """ rollbackDict = {} controllerConfigService = maskingViewDict['controllerConfigService'] volumeInstance = maskingViewDict['volumeInstance'] maskingViewName = maskingViewDict['maskingViewName'] volumeName = maskingViewDict['volumeName'] isV3 = maskingViewDict['isV3'] isLiveMigration = maskingViewDict['isLiveMigration'] maskingViewDict['extraSpecs'] = extraSpecs defaultStorageGroupInstanceName = None fastPolicyName = None assocStorageGroupName = None if isLiveMigration is False: if isV3: assocStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( conn, volumeInstance.path, maskingViewDict['sgGroupName'])) instance = conn.GetInstance( assocStorageGroupInstanceName, LocalOnly=False) assocStorageGroupName = instance['ElementName'] defaultSgGroupName = self.utils.get_v3_storage_group_name( maskingViewDict['pool'], maskingViewDict['slo'], maskingViewDict['workload']) if assocStorageGroupName != defaultSgGroupName: LOG.warning(_LW( "Volume: %(volumeName)s Does not belong " "to storage storage group %(defaultSgGroupName)s."), {'volumeName': volumeName, 'defaultSgGroupName': defaultSgGroupName}) defaultStorageGroupInstanceName = assocStorageGroupInstanceName self._get_and_remove_from_storage_group_v3( conn, controllerConfigService, volumeInstance.path, volumeName, maskingViewDict, defaultStorageGroupInstanceName) else: fastPolicyName = maskingViewDict['fastPolicy'] # If FAST is enabled remove the volume from the default SG. if fastPolicyName is not None: defaultStorageGroupInstanceName = ( self._get_and_remove_from_storage_group_v2( conn, controllerConfigService, volumeInstance.path, volumeName, fastPolicyName, extraSpecs)) # If anything has gone wrong with the masking view we rollback try: maskingViewInstanceName, storageGroupInstanceName, errorMessage = ( self._validate_masking_view(conn, maskingViewDict, defaultStorageGroupInstanceName, extraSpecs)) LOG.debug( "The masking view in the attach operation is " "%(maskingViewInstanceName)s. The storage group " "in the masking view is %(storageGroupInstanceName)s.", {'maskingViewInstanceName': maskingViewInstanceName, 'storageGroupInstanceName': storageGroupInstanceName}) except Exception as e: LOG.exception(_LE( "Masking View creation or retrieval was not successful " "for masking view %(maskingViewName)s. " "Attempting rollback."), {'maskingViewName': maskingViewDict['maskingViewName']}) errorMessage = e if not errorMessage: # Only after the masking view has been validated, add the # volume to the storage group and recheck that it has been # successfully added. errorMessage = self._check_adding_volume_to_storage_group( conn, maskingViewDict, storageGroupInstanceName) rollbackDict['controllerConfigService'] = controllerConfigService rollbackDict['defaultStorageGroupInstanceName'] = ( defaultStorageGroupInstanceName) rollbackDict['volumeInstance'] = volumeInstance rollbackDict['volumeName'] = volumeName rollbackDict['fastPolicyName'] = fastPolicyName rollbackDict['isV3'] = isV3 rollbackDict['extraSpecs'] = extraSpecs rollbackDict['sgName'] = maskingViewDict['sgGroupName'] if errorMessage: # Rollback code if we cannot complete any of the steps above # successfully then we must roll back by adding the volume back to # the default storage group for that fast policy. if (fastPolicyName is not None): # If the errorMessage was returned before the volume # was removed from the default storage group no action. self._check_if_rollback_action_for_masking_required( conn, rollbackDict) if isV3: rollbackDict['sgGroupName'] = assocStorageGroupName rollbackDict['storageSystemName'] = ( maskingViewDict['storageSystemName']) self._check_if_rollback_action_for_masking_required( conn, rollbackDict) exceptionMessage = (_( "Failed to get, create or add volume %(volumeName)s " "to masking view %(maskingViewName)s. " "The error message received was %(errorMessage)s.") % {'maskingViewName': maskingViewName, 'volumeName': volumeName, 'errorMessage': errorMessage}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException(data=exceptionMessage) return rollbackDict def _validate_masking_view(self, conn, maskingViewDict, defaultStorageGroupInstanceName, extraSpecs): """Validate all the individual pieces of the masking view. :param conn: the ecom connection :param maskingViewDict: the masking view dictionary :param defaultStorageGroupInstanceName: the default SG :param extraSpecs: extra specifications :returns: maskingViewInstanceName :returns: storageGroupInstanceName, :returns: string -- errorMessage """ storageSystemName = maskingViewDict['storageSystemName'] maskingViewName = maskingViewDict['maskingViewName'] maskingViewInstanceName = self._find_masking_view( conn, maskingViewName, storageSystemName) if maskingViewInstanceName is None: maskingViewInstanceName, storageGroupInstanceName, errorMessage = ( self._validate_new_masking_view( conn, maskingViewDict, defaultStorageGroupInstanceName, extraSpecs)) else: storageGroupInstanceName, errorMessage = ( self._validate_existing_masking_view( conn, maskingViewDict, maskingViewInstanceName, extraSpecs)) return maskingViewInstanceName, storageGroupInstanceName, errorMessage def _validate_new_masking_view(self, conn, maskingViewDict, defaultStorageGroupInstanceName, extraSpecs): """Validate the creation of a new masking view. :param conn: the ecom connection :param maskingViewDict: the masking view dictionary :param defaultStorageGroupInstanceName: the default SG :param extraSpecs: extra specifications :returns: maskingViewInstanceName :returns: storageGroupInstanceName, :returns: string -- errorMessage """ controllerConfigService = maskingViewDict['controllerConfigService'] igGroupName = maskingViewDict['igGroupName'] connector = maskingViewDict['connector'] storageSystemName = maskingViewDict['storageSystemName'] maskingViewName = maskingViewDict['maskingViewName'] pgGroupName = maskingViewDict['pgGroupName'] storageGroupInstanceName, errorMessage = ( self._check_storage_group( conn, maskingViewDict, defaultStorageGroupInstanceName)) if errorMessage: return None, storageGroupInstanceName, errorMessage portGroupInstanceName, errorMessage = ( self._check_port_group(conn, controllerConfigService, pgGroupName)) if errorMessage: return None, storageGroupInstanceName, errorMessage initiatorGroupInstanceName, errorMessage = ( self._check_initiator_group(conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs)) if errorMessage: return None, storageGroupInstanceName, errorMessage # Only after the components of the MV have been validated, # add the volume to the storage group and recheck that it # has been successfully added. This is necessary before # creating a new masking view. errorMessage = self._check_adding_volume_to_storage_group( conn, maskingViewDict, storageGroupInstanceName) if errorMessage: return None, storageGroupInstanceName, errorMessage maskingViewInstanceName, errorMessage = ( self._check_masking_view( conn, controllerConfigService, maskingViewName, storageGroupInstanceName, portGroupInstanceName, initiatorGroupInstanceName, extraSpecs)) return maskingViewInstanceName, storageGroupInstanceName, errorMessage def _validate_existing_masking_view(self, conn, maskingViewDict, maskingViewInstanceName, extraSpecs): """Validate the components of an existing masking view. :param conn: the ecom connection :param maskingViewDict: the masking view dictionary :param maskingViewInstanceName: the masking view instance name :param extraSpecs: extra specification :returns: storageGroupInstanceName :returns: string -- errorMessage """ storageGroupInstanceName = None controllerConfigService = maskingViewDict['controllerConfigService'] sgGroupName = maskingViewDict['sgGroupName'] igGroupName = maskingViewDict['igGroupName'] connector = maskingViewDict['connector'] storageSystemName = maskingViewDict['storageSystemName'] maskingViewName = maskingViewDict['maskingViewName'] # First verify that the initiator group matches the initiators. errorMessage = self._check_existing_initiator_group( conn, controllerConfigService, maskingViewName, connector, storageSystemName, igGroupName, extraSpecs) if errorMessage: return storageGroupInstanceName, errorMessage storageGroupInstanceName, errorMessage = ( self._check_existing_storage_group( conn, controllerConfigService, sgGroupName, maskingViewInstanceName)) return storageGroupInstanceName, errorMessage def _check_storage_group(self, conn, maskingViewDict, storageGroupInstanceName): """Get the storage group and return it. :param conn: the ecom connection :param maskingViewDict: the masking view dictionary :param storageGroupInstanceName: default storage group instance name :returns: storageGroupInstanceName :returns: string -- msg, the error message """ msg = None storageGroupInstanceName = ( self._get_storage_group_instance_name( conn, maskingViewDict, storageGroupInstanceName)) if storageGroupInstanceName is None: # This may be used in exception hence _ instead of _LE. msg = (_( "Cannot get or create a storage group: %(sgGroupName)s" " for volume %(volumeName)s ") % {'sgGroupName': maskingViewDict['sgGroupName'], 'volumeName': maskingViewDict['volumeName']}) LOG.error(msg) return storageGroupInstanceName, msg def _check_existing_storage_group( self, conn, controllerConfigService, sgGroupName, maskingViewInstanceName): """Check that we can get the existing storage group. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param sgGroupName: the storage group name :param maskingViewInstanceName: the masking view instance name :returns: storageGroupInstanceName :returns: string -- msg, the error message """ msg = None sgFromMvInstanceName = ( self._get_storage_group_from_masking_view_instance( conn, maskingViewInstanceName)) if sgFromMvInstanceName is None: # This may be used in exception hence _ instead of _LE. msg = (_( "Cannot get storage group: %(sgGroupName)s " "from masking view %(maskingViewInstanceName)s. ") % {'sgGroupName': sgGroupName, 'maskingViewInstanceName': maskingViewInstanceName}) LOG.error(msg) return sgFromMvInstanceName, msg def _check_port_group(self, conn, controllerConfigService, pgGroupName): """Check that you can either get or create a port group. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param pgGroupName: the port group Name :returns: portGroupInstanceName :returns: string -- msg, the error message """ msg = None portGroupInstanceName = self._get_port_group_instance_name( conn, controllerConfigService, pgGroupName) if portGroupInstanceName is None: # This may be used in exception hence _ instead of _LE. msg = (_( "Cannot get port group: %(pgGroupName)s. ") % {'pgGroupName': pgGroupName}) LOG.error(msg) return portGroupInstanceName, msg def _check_initiator_group( self, conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs): """Check that initiator group can be either retrieved or created. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param igGroupName: the initiator group Name :param connector: the connector object :param storageSystemName: the storage system name :param extraSpecs: extra specifications :returns: initiatorGroupInstanceName :returns: string -- the error message """ msg = None initiatorGroupInstanceName = ( self._get_initiator_group_instance_name( conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs)) if initiatorGroupInstanceName is None: # This may be used in exception hence _ instead of _LE. msg = (_( "Cannot get or create initiator group: " "%(igGroupName)s. ") % {'igGroupName': igGroupName}) LOG.error(msg) return initiatorGroupInstanceName, msg def _check_existing_initiator_group( self, conn, controllerConfigService, maskingViewName, connector, storageSystemName, igGroupName, extraSpecs): """Check that existing initiator group in the masking view. Check if the initiators in the initiator group match those in the system. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param maskingViewName: the masking view name :param connector: the connector object :param storageSystemName: the storage system name :param igGroupName: the initiator group name :param extraSpecs: extra specification :returns: string -- msg, the error message """ msg = None if not self._verify_initiator_group_from_masking_view( conn, controllerConfigService, maskingViewName, connector, storageSystemName, igGroupName, extraSpecs): # This may be used in exception hence _ instead of _LE. msg = (_( "Unable to verify initiator group: %(igGroupName)s " "in masking view %(maskingViewName)s. ") % {'igGroupName': igGroupName, 'maskingViewName': maskingViewName}) LOG.error(msg) return msg def _check_masking_view( self, conn, controllerConfigService, maskingViewName, storageGroupInstanceName, portGroupInstanceName, initiatorGroupInstanceName, extraSpecs): """Check that masking view can be either got or created. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param maskingViewName: the masking view name :param storageGroupInstanceName: storage group instance name :param portGroupInstanceName: port group instance name :param initiatorGroupInstanceName: the initiator group instance name :param extraSpecs: extra specifications :returns: maskingViewInstanceName :returns: string -- msg, the error message """ msg = None maskingViewInstanceName = ( self._get_masking_view_instance_name( conn, controllerConfigService, maskingViewName, storageGroupInstanceName, portGroupInstanceName, initiatorGroupInstanceName, extraSpecs)) if maskingViewInstanceName is None: # This may be used in exception hence _ instead of _LE. msg = (_( "Cannot create masking view: %(maskingViewName)s. ") % {'maskingViewName': maskingViewName}) LOG.error(msg) return maskingViewInstanceName, msg def _check_adding_volume_to_storage_group( self, conn, maskingViewDict, storageGroupInstanceName): """Add the volume to the storage group and double check it is there. :param conn: the ecom connection :param maskingViewDict: the masking view dictionary :param storageGroupInstanceName: storage group instance name :returns: string -- the error message """ controllerConfigService = maskingViewDict['controllerConfigService'] sgGroupName = maskingViewDict['sgGroupName'] volumeInstance = maskingViewDict['volumeInstance'] volumeName = maskingViewDict['volumeName'] msg = None if self._is_volume_in_storage_group( conn, storageGroupInstanceName, volumeInstance, sgGroupName): LOG.warning(_LW( "Volume: %(volumeName)s is already part " "of storage group %(sgGroupName)s."), {'volumeName': volumeName, 'sgGroupName': sgGroupName}) else: self.add_volume_to_storage_group( conn, controllerConfigService, storageGroupInstanceName, volumeInstance, volumeName, sgGroupName, maskingViewDict['extraSpecs']) if not self._is_volume_in_storage_group( conn, storageGroupInstanceName, volumeInstance, sgGroupName): # This may be used in exception hence _ instead of _LE. msg = (_( "Volume: %(volumeName)s was not added " "to storage group %(sgGroupName)s. ") % {'volumeName': volumeName, 'sgGroupName': sgGroupName}) LOG.error(msg) else: LOG.debug("Successfully added %(volumeName)s to " "%(sgGroupName)s.", {'volumeName': volumeName, 'sgGroupName': sgGroupName}) return msg def _get_and_remove_from_storage_group_v2( self, conn, controllerConfigService, volumeInstanceName, volumeName, fastPolicyName, extraSpecs): """Get the storage group and remove volume from it. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param volumeInstanceName: volume instance name :param volumeName: volume name :param fastPolicyName: fast name :param extraSpecs: additional info :returns: defaultStorageGroupInstanceName :raises: VolumeBackendAPIException """ defaultStorageGroupInstanceName = ( self.fast.get_and_verify_default_storage_group( conn, controllerConfigService, volumeInstanceName, volumeName, fastPolicyName)) if defaultStorageGroupInstanceName is None: exceptionMessage = (_( "Cannot get the default storage group for FAST policy: " "%(fastPolicyName)s.") % {'fastPolicyName': fastPolicyName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) retStorageGroupInstanceName = ( self.remove_device_from_default_storage_group( conn, controllerConfigService, volumeInstanceName, volumeName, fastPolicyName, extraSpecs)) if retStorageGroupInstanceName is None: exceptionMessage = (_( "Failed to remove volume %(volumeName)s from default SG.") % {'volumeName': volumeName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) return defaultStorageGroupInstanceName def _get_and_remove_from_storage_group_v3( self, conn, controllerConfigService, volumeInstanceName, volumeName, maskingViewDict, storageGroupInstanceName): """Get the storage group and remove volume from it. :param conn: the ecom connection :param controllerConfigService: controller configuration service :param volumeInstanceName: volume instance name :param volumeName: volume name :param maskingViewDict: the masking view dictionary :param storageGroupInstanceName: storage group instance name :raises: VolumeBackendAPIException """ assocVolumeInstanceNames = self.get_devices_from_storage_group( conn, storageGroupInstanceName) LOG.debug( "There are %(length)lu associated with the default storage group " "before removing volume %(volumeName)s.", {'length': len(assocVolumeInstanceNames), 'volumeName': volumeName}) self.provision.remove_device_from_storage_group( conn, controllerConfigService, storageGroupInstanceName, volumeInstanceName, volumeName, maskingViewDict['extraSpecs']) assocVolumeInstanceNames = self.get_devices_from_storage_group( conn, storageGroupInstanceName) LOG.debug( "There are %(length)lu associated with the default storage group " "after removing volume %(volumeName)s.", {'length': len(assocVolumeInstanceNames), 'volumeName': volumeName}) # Required for unit tests. emptyStorageGroupInstanceName = ( self._wrap_get_storage_group_from_volume( conn, volumeInstanceName, maskingViewDict['sgGroupName'])) if emptyStorageGroupInstanceName is not None: exceptionMessage = (_( "Failed to remove volume %(volumeName)s from default SG: " "%(volumeName)s.") % {'volumeName': volumeName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) def _is_volume_in_storage_group( self, conn, storageGroupInstanceName, volumeInstance, sgName): """Check if the volume is already part of the storage group. Check if the volume is already part of the storage group, if it is no need to re-add it. :param conn: the connection to ecom :param storageGroupInstanceName: the storage group instance name :param volumeInstance: the volume instance :param sgName: the storage group name :returns: boolean """ foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( conn, volumeInstance.path, sgName)) if foundStorageGroupInstanceName is not None: storageGroupInstance = conn.GetInstance( storageGroupInstanceName, LocalOnly=False) LOG.debug( "The existing storage group instance element name is: " "%(existingElement)s.", {'existingElement': storageGroupInstance['ElementName']}) foundStorageGroupInstance = conn.GetInstance( foundStorageGroupInstanceName, LocalOnly=False) LOG.debug( "The found storage group instance element name is: " "%(foundElement)s.", {'foundElement': foundStorageGroupInstance['ElementName']}) if (foundStorageGroupInstance['ElementName'] == ( storageGroupInstance['ElementName'])): return True return False def _find_masking_view(self, conn, maskingViewName, storageSystemName): """Given the masking view name get the masking view instance. :param conn: connection to the ecom server :param maskingViewName: the masking view name :param storageSystemName: the storage system name(String) :returns: dict -- foundMaskingViewInstanceName """ foundMaskingViewInstanceName = None storageSystemInstanceName = self.utils.find_storageSystem( conn, storageSystemName) maskingViewInstances = conn.Associators( storageSystemInstanceName, ResultClass='EMC_LunMaskingSCSIProtocolController') for maskingViewInstance in maskingViewInstances: if maskingViewName == maskingViewInstance['ElementName']: foundMaskingViewInstanceName = maskingViewInstance.path break if foundMaskingViewInstanceName is not None: # Now check that is has not been deleted. instance = self.utils.get_existing_instance( conn, foundMaskingViewInstanceName) if instance is None: foundMaskingViewInstanceName = None LOG.error(_LE( "Looks like masking view: %(maskingViewName)s " "has recently been deleted."), {'maskingViewName': maskingViewName}) else: LOG.info(_LI( "Found existing masking view: %(maskingViewName)s."), {'maskingViewName': maskingViewName}) return foundMaskingViewInstanceName def _create_storage_group( self, conn, maskingViewDict, defaultStorageGroupInstanceName): """Create a new storage group that doesn't already exist. If fastPolicyName is not none we attempt to remove it from the default storage group of that policy and associate to the new storage group that will be part of the masking view. Will not handle any exception in this method it will be handled up the stack. :param conn: connection the ecom server :param maskingViewDict: the masking view dictionary :param defaultStorageGroupInstanceName: the default storage group instance name (Can be None) :returns: foundStorageGroupInstanceName the instance Name of the storage group """ failedRet = None controllerConfigService = maskingViewDict['controllerConfigService'] storageGroupName = maskingViewDict['sgGroupName'] isV3 = maskingViewDict['isV3'] if isV3: workload = maskingViewDict['workload'] pool = maskingViewDict['pool'] slo = maskingViewDict['slo'] foundStorageGroupInstanceName = ( self.provisionv3.create_storage_group_v3( conn, controllerConfigService, storageGroupName, pool, slo, workload, maskingViewDict['extraSpecs'])) else: fastPolicyName = maskingViewDict['fastPolicy'] volumeInstance = maskingViewDict['volumeInstance'] foundStorageGroupInstanceName = ( self.provision.create_and_get_storage_group( conn, controllerConfigService, storageGroupName, volumeInstance.path, maskingViewDict['extraSpecs'])) if (fastPolicyName is not None and defaultStorageGroupInstanceName is not None): assocTierPolicyInstanceName = ( self.fast.add_storage_group_and_verify_tier_policy_assoc( conn, controllerConfigService, foundStorageGroupInstanceName, storageGroupName, fastPolicyName, maskingViewDict['extraSpecs'])) if assocTierPolicyInstanceName is None: LOG.error(_LE( "Cannot add and verify tier policy association for " "storage group : %(storageGroupName)s to " "FAST policy : %(fastPolicyName)s."), {'storageGroupName': storageGroupName, 'fastPolicyName': fastPolicyName}) return failedRet if foundStorageGroupInstanceName is None: LOG.error(_LE( "Cannot get storage Group from job : %(storageGroupName)s."), {'storageGroupName': storageGroupName}) return failedRet else: LOG.info(_LI( "Created new storage group: %(storageGroupName)s."), {'storageGroupName': storageGroupName}) return foundStorageGroupInstanceName def _find_port_group(self, conn, controllerConfigService, portGroupName): """Given the port Group name get the port group instance name. :param conn: connection to the ecom server :param controllerConfigService: the controller configuration service :param portGroupName: the name of the port group you are getting :returns: foundPortGroupInstanceName """ foundPortGroupInstanceName = None portMaskingGroupInstances = conn.Associators( controllerConfigService, ResultClass='CIM_TargetMaskingGroup') for portMaskingGroupInstance in portMaskingGroupInstances: if portGroupName == portMaskingGroupInstance['ElementName']: # Check to see if it has been recently deleted. instance = self.utils.get_existing_instance( conn, portMaskingGroupInstance.path) if instance is None: foundPortGroupInstanceName = None else: foundPortGroupInstanceName = instance.path break if foundPortGroupInstanceName is None: LOG.error(_LE( "Could not find port group : %(portGroupName)s. Check that " "the EMC configuration file has the correct port group name."), {'portGroupName': portGroupName}) return foundPortGroupInstanceName def _create_or_get_initiator_group( self, conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs): """Attempt to create a initiatorGroup. If one already exists with the same Initiator/wwns then get it. Check to see if an initiatorGroup already exists, that matches the connector information. NOTE: An initiator/wwn can only belong to one initiatorGroup. If we were to attempt to create one with an initiator/wwn that is already belong to another initiatorGroup, it would fail. :param conn: connection to the ecom server :param controllerConfigService: the controller config Servicer :param igGroupName: the proposed name of the initiator group :param connector: the connector information to the host :param storageSystemName: the storage system name (String) :param extraSpecs: extra specifications :returns: foundInitiatorGroupInstanceName """ initiatorNames = self._find_initiator_names(conn, connector) LOG.debug("The initiator name(s) are: %(initiatorNames)s.", {'initiatorNames': initiatorNames}) foundInitiatorGroupInstanceName = self._find_initiator_masking_group( conn, controllerConfigService, initiatorNames) # If you cannot find an initiatorGroup that matches the connector # info create a new initiatorGroup. if foundInitiatorGroupInstanceName is None: # Check that our connector information matches the # hardwareId(s) on the vmax. storageHardwareIDInstanceNames = ( self._get_storage_hardware_id_instance_names( conn, initiatorNames, storageSystemName)) if not storageHardwareIDInstanceNames: LOG.info(_LI( "Initiator Name(s) %(initiatorNames)s are not on array " "%(storageSystemName)s."), {'initiatorNames': initiatorNames, 'storageSystemName': storageSystemName}) storageHardwareIDInstanceNames = ( self._create_hardware_ids(conn, initiatorNames, storageSystemName)) if not storageHardwareIDInstanceNames: msg = (_("Failed to create hardware id(s) on " "%(storageSystemName)s.") % {'storageSystemName': storageSystemName}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) foundInitiatorGroupInstanceName = self._create_initiator_Group( conn, controllerConfigService, igGroupName, storageHardwareIDInstanceNames, extraSpecs) LOG.info(_LI( "Created new initiator group name: %(igGroupName)s."), {'igGroupName': igGroupName}) else: LOG.info(_LI( "Using existing initiator group name: %(igGroupName)s."), {'igGroupName': igGroupName}) return foundInitiatorGroupInstanceName def _find_initiator_names(self, conn, connector): """Check the connector object for initiators(ISCSI) or wwpns(FC). :param conn: the connection to the ecom :param connector: the connector object :returns: list -- list of found initiator names :raises: VolumeBackendAPIException """ foundinitiatornames = [] name = 'initiator name' if (self.protocol.lower() == ISCSI and connector['initiator']): foundinitiatornames.append(connector['initiator']) elif self.protocol.lower() == FC: if ('wwpns' in connector and connector['wwpns']): for wwn in connector['wwpns']: foundinitiatornames.append(wwn) name = 'world wide port names' else: msg = (_("FC is the protocol but wwpns are " "not supplied by Openstack.")) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if (foundinitiatornames is None or len(foundinitiatornames) == 0): msg = (_("Error finding %(name)s.") % {'name': name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Found %(name)s: %(initiator)s.", {'name': name, 'initiator': foundinitiatornames}) return foundinitiatornames def _find_initiator_masking_group( self, conn, controllerConfigService, initiatorNames): """Check to see if an initiatorGroup already exists. NOTE: An initiator/wwn can only belong to one initiatorGroup. If we were to attempt to create one with an initiator/wwn that is already belong to another initiatorGroup, it would fail. :param conn: the connection to the ecom server :param controllerConfigService: the controller configuration service :param initiatorNames: the list of initiator names :returns: foundInitiatorMaskingGroup """ foundInitiatorMaskingGroupInstanceName = None initiatorMaskingGroupInstanceNames = ( conn.AssociatorNames(controllerConfigService, ResultClass='CIM_InitiatorMaskingGroup')) for initiatorMaskingGroupInstanceName in ( initiatorMaskingGroupInstanceNames): # Check that it hasn't been deleted. If it has, break out # of the for loop. instance = self.utils.get_existing_instance( conn, initiatorMaskingGroupInstanceName) if instance is None: # MaskingGroup doesn't exist any more. break storageHardwareIdInstances = ( conn.Associators(initiatorMaskingGroupInstanceName, ResultClass='EMC_StorageHardwareID')) for storageHardwareIdInstance in storageHardwareIdInstances: # If EMC_StorageHardwareID matches the initiator, # we found the existing CIM_InitiatorMaskingGroup. hardwareid = storageHardwareIdInstance['StorageID'] for initiator in initiatorNames: if six.text_type(hardwareid).lower() == ( six.text_type(initiator).lower()): foundInitiatorMaskingGroupInstanceName = ( initiatorMaskingGroupInstanceName) break if foundInitiatorMaskingGroupInstanceName is not None: break if foundInitiatorMaskingGroupInstanceName is not None: break return foundInitiatorMaskingGroupInstanceName def _get_storage_hardware_id_instance_names( self, conn, initiatorNames, storageSystemName): """Given a list of initiator names find CIM_StorageHardwareID instance. :param conn: the connection to the ecom server :param initiatorNames: the list of initiator names :param storageSystemName: the storage system name :returns: list -- foundHardwardIDsInstanceNames """ foundHardwardIDsInstanceNames = [] hardwareIdManagementService = ( self.utils.find_storage_hardwareid_service( conn, storageSystemName)) hardwareIdInstances = ( self.utils.get_hardware_id_instances_from_array( conn, hardwareIdManagementService)) for hardwareIdInstance in hardwareIdInstances: storageId = hardwareIdInstance['StorageID'] for initiatorName in initiatorNames: if storageId.lower() == initiatorName.lower(): # Check that the found hardwareId has been deleted. # If it has, we don't want to add it to the list. instance = self.utils.get_existing_instance( conn, hardwareIdInstance.path) if instance is None: # HardwareId doesn't exist. Skip it. break foundHardwardIDsInstanceNames.append( hardwareIdInstance.path) break LOG.debug( "The found hardware IDs are : %(foundHardwardIDsInstanceNames)s.", {'foundHardwardIDsInstanceNames': foundHardwardIDsInstanceNames}) return foundHardwardIDsInstanceNames def _get_initiator_group_from_job(self, conn, job): """After creating an new initiator group find it and return it. :param conn: the connection to the ecom server :param job: the create initiator group job :returns: dict -- initiatorDict """ associators = conn.Associators( job['Job'], ResultClass='CIM_InitiatorMaskingGroup') volpath = associators[0].path initiatorDict = {} initiatorDict['classname'] = volpath.classname keys = {} keys['CreationClassName'] = volpath['CreationClassName'] keys['SystemName'] = volpath['SystemName'] keys['DeviceID'] = volpath['DeviceID'] keys['SystemCreationClassName'] = volpath['SystemCreationClassName'] initiatorDict['keybindings'] = keys return initiatorDict def _create_masking_view( self, conn, configService, maskingViewName, deviceMaskingGroup, targetMaskingGroup, initiatorMaskingGroup, extraSpecs): """After creating an new initiator group find it and return it. :param conn: the connection to the ecom server :param configService: the create initiator group job :param maskingViewName: the masking view name string :param deviceMaskingGroup: device(storage) masking group (instanceName) :param targetMaskingGroup: target(port) masking group (instanceName) :param initiatorMaskingGroup: initiator masking group (instanceName) :param extraSpecs: extra specifications :returns: int -- return code :returns: dict -- job :raises: VolumeBackendAPIException """ rc, job = conn.InvokeMethod( 'CreateMaskingView', configService, ElementName=maskingViewName, InitiatorMaskingGroup=initiatorMaskingGroup, DeviceMaskingGroup=deviceMaskingGroup, TargetMaskingGroup=targetMaskingGroup) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Create Masking View: %(groupName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'groupName': maskingViewName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) LOG.info(_LI("Created new masking view : %(maskingViewName)s."), {'maskingViewName': maskingViewName}) return rc, job def find_new_masking_view(self, conn, jobDict): """Find the newly created volume. :param conn: the connection to the ecom server :param jobDict: the job dictionary :returns: dict -- maskingViewInstance """ associators = conn.Associators( jobDict['Job'], ResultClass='Symm_LunMaskingView') mvpath = associators[0].path maskingViewInstance = {} maskingViewInstance['classname'] = mvpath.classname keys = {} keys['CreationClassName'] = mvpath['CreationClassName'] keys['SystemName'] = mvpath['SystemName'] keys['DeviceID'] = mvpath['DeviceID'] keys['SystemCreationClassName'] = mvpath['SystemCreationClassName'] maskingViewInstance['keybindings'] = keys return maskingViewInstance def _get_storage_group_from_masking_view( self, conn, maskingViewName, storageSystemName): """Gets the Device Masking Group from masking view. :param conn: the connection to the ecom server :param maskingViewName: the masking view name (String) :param storageSystemName: storage system name (String) :returns: instance name foundStorageGroupInstanceName """ foundStorageGroupInstanceName = None foundView = self._find_masking_view( conn, maskingViewName, storageSystemName) if foundView is not None: foundStorageGroupInstanceName = ( self._get_storage_group_from_masking_view_instance( conn, foundView)) LOG.debug( "Masking view: %(view)s DeviceMaskingGroup: %(masking)s.", {'view': maskingViewName, 'masking': foundStorageGroupInstanceName}) else: LOG.warning(_LW("Unable to find Masking view: %(view)s."), {'view': maskingViewName}) return foundStorageGroupInstanceName def _get_storage_group_from_masking_view_instance( self, conn, maskingViewInstance): """Gets the Device Masking Group from masking view instance. :param conn: the connection to the ecom server :param maskingViewInstance: the masking view instance :returns: instance name foundStorageGroupInstanceName """ foundStorageGroupInstanceName = None groups = conn.AssociatorNames( maskingViewInstance, ResultClass='CIM_DeviceMaskingGroup') if groups[0] > 0: foundStorageGroupInstanceName = groups[0] return foundStorageGroupInstanceName def _get_storage_group_instance_name( self, conn, maskingViewDict, defaultStorageGroupInstanceName): """Gets the storage group instance name. If fastPolicy name is None then NON FAST is assumed. If it is a valid fastPolicy name then associate the new storage group with the fast policy. If we are using an existing storage group then we must check that it is associated with the correct fast policy. :param conn: the connection to the ecom server :param maskingViewDict: the masking view dictionary :param defaultStorageGroupInstanceName: default storage group instance name (can be None for Non FAST) :returns: instance name storageGroupInstanceName :raises: VolumeBackendAPIException """ storageGroupInstanceName = self.utils.find_storage_masking_group( conn, maskingViewDict['controllerConfigService'], maskingViewDict['sgGroupName']) if storageGroupInstanceName is None: storageGroupInstanceName = self._create_storage_group( conn, maskingViewDict, defaultStorageGroupInstanceName) if storageGroupInstanceName is None: errorMessage = (_( "Cannot create or find an storage group with name " "%(sgGroupName)s.") % {'sgGroupName': maskingViewDict['sgGroupName']}) LOG.error(errorMessage) raise exception.VolumeBackendAPIException(data=errorMessage) return storageGroupInstanceName def _get_port_group_instance_name( self, conn, controllerConfigService, pgGroupName): """Gets the port group instance name. The portGroup name has been defined in the EMC Config file if it does not exist the operation should fail. :param conn: the connection to the ecom server :param controllerConfigService: the controller configuration server :param pgGroupName: the port group name :returns: instance name foundPortGroupInstanceName """ foundPortGroupInstanceName = self._find_port_group( conn, controllerConfigService, pgGroupName) if foundPortGroupInstanceName is None: LOG.error(_LE( "Cannot find a portGroup with name %(pgGroupName)s. " "The port group for a masking view must be pre-defined."), {'pgGroupName': pgGroupName}) return foundPortGroupInstanceName LOG.info(_LI( "Port group instance name is %(foundPortGroupInstanceName)s."), {'foundPortGroupInstanceName': foundPortGroupInstanceName}) return foundPortGroupInstanceName def _get_initiator_group_instance_name( self, conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs): """Gets the initiator group instance name. :param conn: the connection to the ecom server :param controllerConfigService: the controller configuration server :param igGroupName: the port group name :param connector: the connector object :param storageSystemName: the storage system name :param extraSpecs: extra specifications :returns: foundInitiatorGroupInstanceName """ foundInitiatorGroupInstanceName = (self._create_or_get_initiator_group( conn, controllerConfigService, igGroupName, connector, storageSystemName, extraSpecs)) if foundInitiatorGroupInstanceName is None: LOG.error(_LE( "Cannot create or find an initiator group with " "name %(igGroupName)s."), {'igGroupName': igGroupName}) return foundInitiatorGroupInstanceName def _get_masking_view_instance_name( self, conn, controllerConfigService, maskingViewName, storageGroupInstanceName, portGroupInstanceName, initiatorGroupInstanceName, extraSpecs): """Gets the masking view instance name. :param conn: the connection to the ecom server :param controllerConfigService: the controller configuration server :param maskingViewName: the masking view name (String) :param storageGroupInstanceName: the storage group instance name :param portGroupInstanceName: the port group instance name :param initiatorGroupInstanceName: the initiator group instance name :param extraSpecs: extra specifications :returns: instance name foundMaskingViewInstanceName """ _rc, job = ( self._create_masking_view( conn, controllerConfigService, maskingViewName, storageGroupInstanceName, portGroupInstanceName, initiatorGroupInstanceName, extraSpecs)) foundMaskingViewInstanceName = self.find_new_masking_view(conn, job) if foundMaskingViewInstanceName is None: LOG.error(_LE( "Cannot find the new masking view just created with name " "%(maskingViewName)s."), {'maskingViewName': maskingViewName}) return foundMaskingViewInstanceName def _check_if_rollback_action_for_masking_required( self, conn, rollbackDict): """This is a rollback action for FAST. We need to be able to return the volume to the default storage group if anything has gone wrong. The volume can also potentially belong to a storage group that is not the default depending on where the exception occurred. :param conn: the connection to the ecom server :param rollbackDict: the rollback dictionary :returns: message :raises: VolumeBackendAPIException """ message = None try: if rollbackDict['isV3']: errorMessage = self._check_adding_volume_to_storage_group( conn, rollbackDict, rollbackDict['defaultStorageGroupInstanceName']) if errorMessage: LOG.error(errorMessage) message = (_("V3 rollback")) else: foundStorageGroupInstanceName = ( self.utils.get_storage_group_from_volume( conn, rollbackDict['volumeInstance'].path, rollbackDict['sgName'])) # Volume is not associated with any storage group so add # it back to the default. if not foundStorageGroupInstanceName: LOG.warning(_LW( "No storage group found. " "Performing rollback on Volume: %(volumeName)s " "To return it to the default storage group for FAST " "policy %(fastPolicyName)s."), {'volumeName': rollbackDict['volumeName'], 'fastPolicyName': rollbackDict['fastPolicyName']}) assocDefaultStorageGroupName = ( self.fast .add_volume_to_default_storage_group_for_fast_policy( conn, rollbackDict['controllerConfigService'], rollbackDict['volumeInstance'], rollbackDict['volumeName'], rollbackDict['fastPolicyName'], rollbackDict['extraSpecs'])) if assocDefaultStorageGroupName is None: LOG.error(_LE( "Failed to Roll back to re-add volume " "%(volumeName)s " "to default storage group for fast policy " "%(fastPolicyName)s: Please contact your sys " "admin to get the volume re-added manually."), {'volumeName': rollbackDict['volumeName'], 'fastPolicyName': rollbackDict['fastPolicyName']}) message = (_("V2 rollback, volume is not in any storage " "group.")) else: LOG.info(_LI( "The storage group found is " "%(foundStorageGroupInstanceName)s."), {'foundStorageGroupInstanceName': foundStorageGroupInstanceName}) # Check the name, see is it the default storage group # or another. if (foundStorageGroupInstanceName != rollbackDict['defaultStorageGroupInstanceName']): # Remove it from its current masking view and return it # to its default masking view if fast is enabled. self.remove_and_reset_members( conn, rollbackDict['controllerConfigService'], rollbackDict['volumeInstance'], rollbackDict['volumeName'], rollbackDict['extraSpecs']) message = (_("V2 rollback - Volume in another storage " "group besides default storage group.")) except Exception: errorMessage = (_( "Rollback for Volume: %(volumeName)s has failed. " "Please contact your system administrator to manually return " "your volume to the default storage group for fast policy " "%(fastPolicyName)s failed.") % {'volumeName': rollbackDict['volumeName'], 'fastPolicyName': rollbackDict['fastPolicyName']}) LOG.exception(errorMessage) raise exception.VolumeBackendAPIException(data=errorMessage) return message def _find_new_initiator_group(self, conn, maskingGroupDict): """After creating an new initiator group find it and return it. :param conn: connection the ecom server :param maskingGroupDict: the maskingGroupDict dict :returns: instance name foundInitiatorGroupInstanceName """ foundInitiatorGroupInstanceName = None if 'MaskingGroup' in maskingGroupDict: foundInitiatorGroupInstanceName = maskingGroupDict['MaskingGroup'] return foundInitiatorGroupInstanceName def _get_initiator_group_from_masking_view( self, conn, maskingViewName, storageSystemName): """Given the masking view name get the initiator group from it. :param conn: connection the the ecom server :param maskingViewName: the name of the masking view :param storageSystemName: the storage system name :returns: instance name foundInitiatorMaskingGroupInstanceName """ foundInitiatorMaskingGroupInstanceName = None foundView = self._find_masking_view( conn, maskingViewName, storageSystemName) if foundView is not None: groups = conn.AssociatorNames( foundView, ResultClass='CIM_InitiatorMaskingGroup') if len(groups): foundInitiatorMaskingGroupInstanceName = groups[0] LOG.debug( "Masking view: %(view)s InitiatorMaskingGroup: %(masking)s.", {'view': maskingViewName, 'masking': foundInitiatorMaskingGroupInstanceName}) else: LOG.warning(_LW("Unable to find Masking view: %(view)s."), {'view': maskingViewName}) return foundInitiatorMaskingGroupInstanceName def _verify_initiator_group_from_masking_view( self, conn, controllerConfigService, maskingViewName, connector, storageSystemName, igGroupName, extraSpecs): """Check that the initiator group contains the correct initiators. If using an existing masking view check that the initiator group contains the correct initiators. If it does not contain the correct initiators then we delete the initiator group from the masking view, re-create it with the correct initiators and add it to the masking view NOTE: EMC does not support ModifyMaskingView so we must first delete the masking view and recreate it. :param conn: connection the ecom server :param controllerConfigService: the controller configuration service :param maskingViewName: maskingview name (String) :param connector: the connector dict :param storageSystemName: the storage System Name (string) :param igGroupName: the initiator group name (String) :param extraSpecs: extra specifications :returns: boolean """ initiatorNames = self._find_initiator_names(conn, connector) foundInitiatorGroupFromConnector = self._find_initiator_masking_group( conn, controllerConfigService, initiatorNames) foundInitiatorGroupFromMaskingView = ( self._get_initiator_group_from_masking_view( conn, maskingViewName, storageSystemName)) if (foundInitiatorGroupFromConnector != foundInitiatorGroupFromMaskingView): if foundInitiatorGroupFromMaskingView is not None: maskingViewInstanceName = self._find_masking_view( conn, maskingViewName, storageSystemName) if foundInitiatorGroupFromConnector is None: storageHardwareIDInstanceNames = ( self._get_storage_hardware_id_instance_names( conn, initiatorNames, storageSystemName)) if not storageHardwareIDInstanceNames: LOG.info(_LI( "Initiator Name(s) %(initiatorNames)s are not on " "array %(storageSystemName)s. "), {'initiatorNames': initiatorNames, 'storageSystemName': storageSystemName}) storageHardwareIDInstanceNames = ( self._create_hardware_ids(conn, initiatorNames, storageSystemName)) if not storageHardwareIDInstanceNames: LOG.error(_LE( "Failed to create hardware id(s) on " "%(storageSystemName)s."), {'storageSystemName': storageSystemName}) return False foundInitiatorGroupFromConnector = ( self._create_initiator_Group( conn, controllerConfigService, igGroupName, storageHardwareIDInstanceNames, extraSpecs)) storageGroupInstanceName = ( self._get_storage_group_from_masking_view( conn, maskingViewName, storageSystemName)) portGroupInstanceName = self._get_port_group_from_masking_view( conn, maskingViewName, storageSystemName) if (foundInitiatorGroupFromConnector is not None and storageGroupInstanceName is not None and portGroupInstanceName is not None): self._delete_masking_view( conn, controllerConfigService, maskingViewName, maskingViewInstanceName, extraSpecs) newMaskingViewInstanceName = ( self._get_masking_view_instance_name( conn, controllerConfigService, maskingViewName, storageGroupInstanceName, portGroupInstanceName, foundInitiatorGroupFromConnector, extraSpecs)) if newMaskingViewInstanceName is not None: LOG.debug( "The old masking view has been replaced: " "%(maskingViewName)s.", {'maskingViewName': maskingViewName}) else: LOG.error(_LE( "One of the components of the original masking view " "%(maskingViewName)s cannot be retrieved so " "please contact your system administrator to check " "that the correct initiator(s) are part of masking."), {'maskingViewName': maskingViewName}) return False return True def _create_initiator_Group( self, conn, controllerConfigService, igGroupName, hardwareIdinstanceNames, extraSpecs): """Create a new initiator group. Given a list of hardwareId Instance name create a new initiator group. :param conn: connection the ecom server :param controllerConfigService: the controller configuration service :param igGroupName: the initiator group name (String) :param hardwareIdinstanceNames: one or more hardware id instance names :param extraSpecs: extra specifications :returns: foundInitiatorGroupInstanceName :raises: VolumeBackendAPIException """ rc, job = conn.InvokeMethod( 'CreateGroup', controllerConfigService, GroupName=igGroupName, Type=self.utils.get_num(INITIATORGROUPTYPE, '16'), Members=[hardwareIdinstanceNames[0]]) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Create Group: %(groupName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'groupName': igGroupName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) foundInitiatorGroupInstanceName = self._find_new_initiator_group( conn, job) numHardwareIDInstanceNames = len(hardwareIdinstanceNames) if numHardwareIDInstanceNames > 1: for j in range(1, numHardwareIDInstanceNames): rc, job = conn.InvokeMethod( 'AddMembers', controllerConfigService, MaskingGroup=foundInitiatorGroupInstanceName, Members=[hardwareIdinstanceNames[j]]) if rc != 0: rc, errordesc = ( self.utils.wait_for_job_complete(conn, job, extraSpecs)) if rc != 0: exceptionMessage = (_( "Error adding initiator to group : %(groupName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'groupName': igGroupName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) j = j + 1 return foundInitiatorGroupInstanceName def _get_port_group_from_masking_view( self, conn, maskingViewName, storageSystemName): """Given the masking view name get the port group from it. :param conn: connection the the ecom server :param maskingViewName: the name of the masking view :param storageSystemName: the storage system name :returns: instance name foundPortMaskingGroupInstanceName """ foundPortMaskingGroupInstanceName = None foundView = self._find_masking_view( conn, maskingViewName, storageSystemName) if foundView: groups = conn.AssociatorNames( foundView, ResultClass='CIM_TargetMaskingGroup') if len(groups) > 0: foundPortMaskingGroupInstanceName = groups[0] LOG.debug( "Masking view: %(view)s InitiatorMaskingGroup: %(masking)s.", {'view': maskingViewName, 'masking': foundPortMaskingGroupInstanceName}) return foundPortMaskingGroupInstanceName def _delete_masking_view( self, conn, controllerConfigService, maskingViewName, maskingViewInstanceName, extraSpecs): """Delete a masking view. :param conn: connection the ecom server :param controllerConfigService: the controller configuration service :param maskingViewName: maskingview name (String) :param maskingViewInstanceName: the masking view instance name :param extraSpecs: extra specifications :raises: VolumeBackendAPIException """ rc, job = conn.InvokeMethod('DeleteMaskingView', controllerConfigService, ProtocolController=maskingViewInstanceName) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Modifying masking view : %(groupName)s. " "Return code: %(rc)lu. Error: %(error)s.") % {'groupName': maskingViewName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) def get_masking_view_from_storage_group( self, conn, storageGroupInstanceName): """Get the associated maskingview instance name. Given storage group instance name, get the associated masking view instance name. :param conn: connection the ecom server :param storageGroupInstanceName: the storage group instance name :returns: instance name foundMaskingViewInstanceName """ foundMaskingViewInstanceName = None maskingViews = conn.AssociatorNames( storageGroupInstanceName, ResultClass='Symm_LunMaskingView') if len(maskingViews) > 0: foundMaskingViewInstanceName = maskingViews[0] return foundMaskingViewInstanceName def add_volume_to_storage_group( self, conn, controllerConfigService, storageGroupInstanceName, volumeInstance, volumeName, sgGroupName, extraSpecs): """Add a volume to an existing storage group. :param conn: connection to ecom server :param controllerConfigService: the controller configuration service :param storageGroupInstanceName: storage group instance name :param volumeInstance: the volume instance :param volumeName: the name of the volume (String) :param sgGroupName: the name of the storage group (String) :param extraSpecs: additional info :returns: int -- rc the return code of the job :returns: dict -- the job dict """ self.provision.add_members_to_masking_group( conn, controllerConfigService, storageGroupInstanceName, volumeInstance.path, volumeName, extraSpecs) LOG.info(_LI( "Added volume: %(volumeName)s to existing storage group " "%(sgGroupName)s."), {'volumeName': volumeName, 'sgGroupName': sgGroupName}) def remove_device_from_default_storage_group( self, conn, controllerConfigService, volumeInstanceName, volumeName, fastPolicyName, extraSpecs): """Remove the volume from the default storage group. Remove the volume from the default storage group for the FAST policy and return the default storage group instance name. :param conn: the connection to the ecom server :param controllerConfigService: the controller config service :param volumeInstanceName: the volume instance name :param volumeName: the volume name (String) :param fastPolicyName: the fast policy name (String) :param extraSpecs: additional info :returns: instance name defaultStorageGroupInstanceName """ failedRet = None defaultStorageGroupInstanceName, defaultSgName = ( self.fast.get_and_verify_default_storage_group( conn, controllerConfigService, volumeInstanceName, volumeName, fastPolicyName)) if defaultStorageGroupInstanceName is None: LOG.warning(_LW( "Volume %(volumeName)s was not first part of the default " "storage group for the FAST Policy."), {'volumeName': volumeName}) return failedRet assocVolumeInstanceNames = self.get_devices_from_storage_group( conn, defaultStorageGroupInstanceName) LOG.debug( "There are %(length)lu associated with the default storage group " "for fast before removing volume %(volumeName)s.", {'length': len(assocVolumeInstanceNames), 'volumeName': volumeName}) self.provision.remove_device_from_storage_group( conn, controllerConfigService, defaultStorageGroupInstanceName, volumeInstanceName, volumeName, extraSpecs) assocVolumeInstanceNames = self.get_devices_from_storage_group( conn, defaultStorageGroupInstanceName) LOG.debug( "There are %(length)lu associated with the default storage group " "for fast after removing volume %(volumeName)s.", {'length': len(assocVolumeInstanceNames), 'volumeName': volumeName}) # Required for unit tests. emptyStorageGroupInstanceName = ( self._wrap_get_storage_group_from_volume(conn, volumeInstanceName, defaultSgName)) if emptyStorageGroupInstanceName is not None: LOG.error(_LE( "Failed to remove %(volumeName)s from the default storage " "group for the FAST Policy."), {'volumeName': volumeName}) return failedRet return defaultStorageGroupInstanceName def _wrap_get_storage_group_from_volume(self, conn, volumeInstanceName, defaultSgName): """Wrapper for get_storage_group_from_volume. Needed for override in tests. :param conn: the connection to the ecom server :param volumeInstanceName: the volume instance name :param defaultSgName: the default storage group name :returns: emptyStorageGroupInstanceName """ return self.utils.get_storage_group_from_volume( conn, volumeInstanceName, defaultSgName) def get_devices_from_storage_group( self, conn, storageGroupInstanceName): """Get the associated volume Instance names. Given the storage group instance name get the associated volume Instance names. :param conn: connection the the ecom server :param storageGroupInstanceName: the storage group instance name :returns: list -- volumeInstanceNames list of volume instance names """ volumeInstanceNames = conn.AssociatorNames( storageGroupInstanceName, ResultClass='EMC_StorageVolume') return volumeInstanceNames def get_associated_masking_groups_from_device( self, conn, volumeInstanceName): """Get the associated storage groups from the volume Instance name. Given the volume instance name get the associated storage group instance names. :param conn: connection the the ecom server :param volumeInstanceName: the volume instance name :returns: list -- list of storage group instance names """ maskingGroupInstanceNames = conn.AssociatorNames( volumeInstanceName, ResultClass='CIM_DeviceMaskingGroup', AssocClass='CIM_OrderedMemberOfCollection') if len(maskingGroupInstanceNames) > 0: return maskingGroupInstanceNames else: LOG.info(_LI("Volume %(volumeName)s not in any storage group."), {'volumeName': volumeInstanceName}) return None def remove_and_reset_members( self, conn, controllerConfigService, volumeInstance, volumeName, extraSpecs, connector=None, noReset=None): """Part of unmap device or rollback. Removes volume from the Device Masking Group that belongs to a Masking View. Check if fast policy is in the extra specs, if it isn't we do not need to do any thing for FAST. Assume that isTieringPolicySupported is False unless the FAST policy is in the extra specs and tiering is enabled on the array. :param conn: connection the the ecom server :param controllerConfigService: the controller configuration service :param volumeInstance: the volume Instance :param volumeName: the volume name :param extraSpecs: additional info :param connector: optional :param noReset: optional, if none, then reset :returns: storageGroupInstanceName """ fastPolicyName = extraSpecs.get(FASTPOLICY, None) isV3 = extraSpecs[ISV3] storageGroupInstanceName = None if connector is not None: storageGroupInstanceName = self._get_sg_associated_with_connector( conn, controllerConfigService, volumeInstance.path, volumeName, connector) if storageGroupInstanceName is None: return None else: # Connector is None in V3 volume deletion case. storageGroupInstanceNames = ( self.get_associated_masking_groups_from_device( conn, volumeInstance.path)) if storageGroupInstanceNames: storageGroupInstanceName = storageGroupInstanceNames[0] else: return None instance = conn.GetInstance(storageGroupInstanceName, LocalOnly=False) storageGroupName = instance['ElementName'] volumeInstanceNames = self.get_devices_from_storage_group( conn, storageGroupInstanceName) storageSystemInstanceName = self.utils.find_storage_system( conn, controllerConfigService) numVolInMaskingView = len(volumeInstanceNames) LOG.debug( "There are %(numVol)d volumes in the storage group " "%(maskingGroup)s.", {'numVol': numVolInMaskingView, 'maskingGroup': storageGroupInstanceName}) if not isV3: isTieringPolicySupported, __ = ( self._get_tiering_info(conn, storageSystemInstanceName, fastPolicyName)) if numVolInMaskingView == 1: # Last volume in the storage group. LOG.warning(_LW("Only one volume remains in storage group " "%(sgname)s. Driver will attempt cleanup."), {'sgname': storageGroupName}) mvInstanceName = self.get_masking_view_from_storage_group( conn, storageGroupInstanceName) if mvInstanceName is None: LOG.warning(_LW("Unable to get masking view %(maskingView)s " "from storage group."), {'maskingView': mvInstanceName}) else: maskingViewInstance = conn.GetInstance( mvInstanceName, LocalOnly=False) maskingViewName = maskingViewInstance['ElementName'] maskingViewInstance = conn.GetInstance( mvInstanceName, LocalOnly=False) maskingViewName = maskingViewInstance['ElementName'] @lockutils.synchronized(maskingViewName, "emc-mv-", True) def do_delete_mv_and_sg(): return self._delete_mv_and_sg( conn, controllerConfigService, mvInstanceName, maskingViewName, storageGroupInstanceName, storageGroupName, volumeInstance, volumeName, extraSpecs) do_delete_mv_and_sg() else: # Not the last volume so remove it from storage group in # the masking view. LOG.debug("Start: number of volumes in masking storage group: " "%(numVol)d", {'numVol': len(volumeInstanceNames)}) self.provision.remove_device_from_storage_group( conn, controllerConfigService, storageGroupInstanceName, volumeInstance.path, volumeName, extraSpecs) LOG.debug( "RemoveMembers for volume %(volumeName)s completed " "successfully.", {'volumeName': volumeName}) # Add it back to the default storage group. if isV3: self._return_volume_to_default_storage_group_v3( conn, controllerConfigService, storageGroupName, volumeInstance, volumeName, storageSystemInstanceName, extraSpecs) else: # V2 if FAST POLICY enabled, move the volume to the default SG. if fastPolicyName is not None and isTieringPolicySupported: self._cleanup_tiering( conn, controllerConfigService, fastPolicyName, volumeInstance, volumeName, extraSpecs) volumeInstanceNames = self.get_devices_from_storage_group( conn, storageGroupInstanceName) LOG.debug( "End: number of volumes in masking storage group: %(numVol)d.", {'numVol': len(volumeInstanceNames)}) return storageGroupInstanceName def _delete_mv_and_sg(self, conn, controllerConfigService, mvInstanceName, maskingViewName, storageGroupInstanceName, storageGroupName, volumeInstance, volumeName, extraSpecs): """Delete the Masking view and the Storage Group. Also does necessary cleanup like removing the policy from the storage group for V2 and returning the volume to the default storage group. :param conn: connection the the ecom server :param controllerConfigService: the controller configuration service :param mvInstanceName: masking view instance name :param maskingViewName: masking view name :param storageGroupInstanceName: storage group instance name :param maskingViewName: masking view name :param volumeInstance: the volume Instance :param volumeName: the volume name :param extraSpecs: extra specs """ isV3 = extraSpecs[ISV3] fastPolicyName = extraSpecs.get(FASTPOLICY, None) storageSystemInstanceName = self.utils.find_storage_system( conn, controllerConfigService) self._last_volume_delete_masking_view( conn, controllerConfigService, mvInstanceName, maskingViewName, extraSpecs) if not isV3: isTieringPolicySupported, tierPolicyServiceInstanceName = ( self._get_tiering_info(conn, storageSystemInstanceName, fastPolicyName)) self._get_and_remove_rule_association( conn, fastPolicyName, isTieringPolicySupported, tierPolicyServiceInstanceName, storageSystemInstanceName['Name'], storageGroupInstanceName, extraSpecs) # Remove the last volume and delete the storage group. self._remove_last_vol_and_delete_sg( conn, controllerConfigService, storageGroupInstanceName, storageGroupName, volumeInstance.path, volumeName, extraSpecs) # Add it back to the default storage group. if isV3: self._return_volume_to_default_storage_group_v3( conn, controllerConfigService, storageGroupName, volumeInstance, volumeName, storageSystemInstanceName, extraSpecs) else: # V2 if FAST POLICY enabled, move the volume to the default # SG. if fastPolicyName is not None and isTieringPolicySupported: self._cleanup_tiering( conn, controllerConfigService, fastPolicyName, volumeInstance, volumeName, extraSpecs) def _get_sg_associated_with_connector( self, conn, controllerConfigService, volumeInstanceName, volumeName, connector): """Get storage group associated with connector. If the connector gets passed then extra logic required to get storage group. :param conn: the ecom connection :param controllerConfigService: storage system instance name :param volumeInstanceName: the volume instance name :param volumeName: the volume name (String) :param connector: the connector object :returns: storageGroupInstanceName(can be None) """ return self._get_sg_or_mv_associated_with_initiator( conn, controllerConfigService, volumeInstanceName, volumeName, connector, True) def _get_tiering_info( self, conn, storageSystemInstanceName, fastPolicyName): """Get tiering specifics. :param conn: the ecom connection :param storageSystemInstanceName: storage system instance name :param fastPolicyName: :returns: boolean -- isTieringPolicySupported :returns: tierPolicyServiceInstanceName """ isTieringPolicySupported = False tierPolicyServiceInstanceName = None if fastPolicyName is not None: tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( conn, storageSystemInstanceName) isTieringPolicySupported = self.fast.is_tiering_policy_enabled( conn, tierPolicyServiceInstanceName) LOG.debug( "FAST policy enabled on %(storageSystem)s: %(isSupported)s", {'storageSystem': storageSystemInstanceName, 'isSupported': isTieringPolicySupported}) return isTieringPolicySupported, tierPolicyServiceInstanceName def _last_volume_delete_masking_view( self, conn, controllerConfigService, mvInstanceName, maskingViewName, extraSpecs): """Delete the masking view. Delete the masking view if the volume is the last one in the storage group. :param conn: the ecom connection :param controllerConfigService: controller config service :param mvInstanceName: masking view instance name :param maskingViewName: masking view name :param extraSpecs: extra specifications """ LOG.debug( "Last volume in the storage group, deleting masking view " "%(maskingViewName)s.", {'maskingViewName': maskingViewName}) self._delete_masking_view( conn, controllerConfigService, maskingViewName, mvInstanceName, extraSpecs) mvInstance = self.utils.get_existing_instance( conn, mvInstanceName) if mvInstance: exceptionMessage = (_( "Masking view %(maskingViewName)s " "was not deleted successfully") % {'maskingViewName': maskingViewName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) else: LOG.debug("Masking view %(maskingViewName)s " "successfully deleted.", {'maskingViewName': maskingViewName}) def _get_and_remove_rule_association( self, conn, fastPolicyName, isTieringPolicySupported, tierPolicyServiceInstanceName, storageSystemName, storageGroupInstanceName, extraSpecs): """Remove the storage group from the policy rule. :param conn: the ecom connection :param fastPolicyName: the fast policy name :param isTieringPolicySupported: boolean :param tierPolicyServiceInstanceName: the tier policy instance name :param storageSystemName: storage system name :param storageGroupInstanceName: the storage group instance name :param extraSpecs: additional info """ # Disassociate storage group from FAST policy. if fastPolicyName is not None and isTieringPolicySupported is True: tierPolicyInstanceName = self.fast.get_tier_policy_by_name( conn, storageSystemName, fastPolicyName) LOG.info(_LI( "Policy: %(policy)s, policy service:%(service)s, " "masking group: %(maskingGroup)s."), {'policy': tierPolicyInstanceName, 'service': tierPolicyServiceInstanceName, 'maskingGroup': storageGroupInstanceName}) self.fast.delete_storage_group_from_tier_policy_rule( conn, tierPolicyServiceInstanceName, storageGroupInstanceName, tierPolicyInstanceName, extraSpecs) def _return_volume_to_default_storage_group_v3( self, conn, controllerConfigService, storageGroupName, volumeInstance, volumeName, storageSystemInstanceName, extraSpecs): """Return volume to the default storage group in v3. :param conn: the ecom connection :param controllerConfigService: controller config service :param storageGroupName: storage group name :param volumeInstance: volumeInstance :param volumeName: the volume name :param storageSystemInstanceName: the storage system instance name :param extraSpecs: additional info :raises: VolumeBackendAPIException """ # First strip the shortHostname from the storage group name. defaultStorageGroupName, shorthostName = ( self.utils.strip_short_host_name(storageGroupName)) # Check if host name exists which signifies detach operation. if shorthostName is not None: # Populate maskingViewDict and storageGroupInstanceName. maskingViewDict = {} maskingViewDict['sgGroupName'] = defaultStorageGroupName maskingViewDict['volumeInstance'] = volumeInstance maskingViewDict['volumeName'] = volumeName maskingViewDict['controllerConfigService'] = ( controllerConfigService) maskingViewDict['storageSystemName'] = ( storageSystemInstanceName) sgInstanceName = self.utils.find_storage_masking_group( conn, controllerConfigService, defaultStorageGroupName) if sgInstanceName is not None: errorMessage = ( self._check_adding_volume_to_storage_group( conn, maskingViewDict, sgInstanceName)) else: errorMessage = (_( "Storage group %(sgGroupName) " "does not exist.") % {'StorageGroup': defaultStorageGroupName}) LOG.error(errorMessage) raise exception.VolumeBackendAPIException( data=errorMessage) def _cleanup_tiering( self, conn, controllerConfigService, fastPolicyName, volumeInstance, volumeName, extraSpecs): """Clean up tiering. :param conn: the ecom connection :param controllerConfigService: the controller configuration service :param fastPolicyName: the fast policy name :param volumeInstance: volume instance :param volumeName: the volume name :param extraSpecs: additional info """ defaultStorageGroupInstanceName = ( self.fast.get_policy_default_storage_group( conn, controllerConfigService, fastPolicyName)) volumeInstanceNames = self.get_devices_from_storage_group( conn, defaultStorageGroupInstanceName) LOG.debug( "Start: number of volumes in default storage group: %(numVol)d.", {'numVol': len(volumeInstanceNames)}) defaultStorageGroupInstanceName = ( self.fast.add_volume_to_default_storage_group_for_fast_policy( conn, controllerConfigService, volumeInstance, volumeName, fastPolicyName, extraSpecs)) # Check default storage group number of volumes. volumeInstanceNames = self.get_devices_from_storage_group( conn, defaultStorageGroupInstanceName) LOG.debug( "End: number of volumes in default storage group: %(numVol)d.", {'numVol': len(volumeInstanceNames)}) def get_target_wwns(self, conn, mvInstanceName): """Get the DA ports wwns. :param conn: the ecom connection :param mvInstanceName: masking view instance name :returns: list -- the list of target wwns for the masking view """ targetWwns = [] targetPortInstanceNames = conn.AssociatorNames( mvInstanceName, ResultClass='Symm_FCSCSIProtocolEndpoint') numberOfPorts = len(targetPortInstanceNames) if numberOfPorts <= 0: LOG.warning(_LW("No target ports found in " "masking view %(maskingView)s."), {'numPorts': len(targetPortInstanceNames), 'maskingView': mvInstanceName}) for targetPortInstanceName in targetPortInstanceNames: targetWwns.append(targetPortInstanceName['Name']) return targetWwns def get_masking_view_by_volume(self, conn, volumeInstance, connector): """Given volume, retrieve the masking view instance name. :param conn: the ecom connection :param volumeInstance: the volume instance :param connector: the connector object :returns: masking view instance name """ storageSystemName = volumeInstance['SystemName'] controllerConfigService = ( self.utils.find_controller_configuration_service( conn, storageSystemName)) volumeName = volumeInstance['ElementName'] mvInstanceName = ( self._get_sg_or_mv_associated_with_initiator( conn, controllerConfigService, volumeInstance.path, volumeName, connector, False)) return mvInstanceName def get_masking_views_by_port_group(self, conn, portGroupInstanceName): """Given port group, retrieve the masking view instance name. :param conn: the ecom connection :param portGroupInstanceName: the instance name of the port group :returns: masking view instance names """ mvInstanceNames = conn.AssociatorNames( portGroupInstanceName, ResultClass='Symm_LunMaskingView') return mvInstanceNames def get_port_group_from_masking_view(self, conn, maskingViewInstanceName): """Get the port group in a masking view. :param conn: the ecom connection :param maskingViewInstanceName: masking view instance name :returns: portGroupInstanceName """ portGroupInstanceNames = conn.AssociatorNames( maskingViewInstanceName, ResultClass='SE_TargetMaskingGroup') if len(portGroupInstanceNames) > 0: LOG.debug("Found port group %(pg)s in masking view %(mv)s.", {'pg': portGroupInstanceNames[0], 'mv': maskingViewInstanceName}) return portGroupInstanceNames[0] else: LOG.warning(_LW("No port group found in masking view %(mv)s."), {'mv': maskingViewInstanceName}) def get_initiator_group_from_masking_view( self, conn, maskingViewInstanceName): """Get initiator group in a masking view. :param conn: the ecom connection :param maskingViewInstanceName: masking view instance name :returns: initiatorGroupInstanceName or None if it is not found """ initiatorGroupInstanceNames = conn.AssociatorNames( maskingViewInstanceName, ResultClass='SE_InitiatorMaskingGroup') if len(initiatorGroupInstanceNames) > 0: LOG.debug("Found initiator group %(pg)s in masking view %(mv)s.", {'pg': initiatorGroupInstanceNames[0], 'mv': maskingViewInstanceName}) return initiatorGroupInstanceNames[0] else: LOG.warning(_LW("No port group found in masking view %(mv)s."), {'mv': maskingViewInstanceName}) def _get_sg_or_mv_associated_with_initiator( self, conn, controllerConfigService, volumeInstanceName, volumeName, connector, getSG=True): """Get storage group or masking view associated with connector. If the connector gets passed then extra logic required to get storage group. :param conn: the ecom connection :param controllerConfigService: storage system instance name :param volumeInstanceName: volume instance name :param volumeName: volume element name :param connector: the connector object :param getSG: True if to get storage group; otherwise get masking :returns: foundInstanceName(can be None) """ foundInstanceName = None initiatorNames = self._find_initiator_names(conn, connector) igInstanceNameFromConnector = self._find_initiator_masking_group( conn, controllerConfigService, initiatorNames) # Device can be shared by multi-SGs in a multi-host attach case. storageGroupInstanceNames = ( self.get_associated_masking_groups_from_device( conn, volumeInstanceName)) LOG.debug("Found storage groups volume " "%(volumeName)s is in: %(storageGroups)s", {'volumeName': volumeName, 'storageGroups': storageGroupInstanceNames}) if storageGroupInstanceNames: # not empty # Get the SG by IGs. for sgInstanceName in storageGroupInstanceNames: # Get maskingview from storage group. mvInstanceName = self.get_masking_view_from_storage_group( conn, sgInstanceName) LOG.debug("Found masking view associated with SG " "%(storageGroup)s: %(maskingview)s", {'maskingview': mvInstanceName, 'storageGroup': sgInstanceName}) # Get initiator group from masking view. igInstanceName = ( self.get_initiator_group_from_masking_view( conn, mvInstanceName)) LOG.debug("Initiator Group in masking view %(ig)s: " "IG associated with connector%(igFromConnector)s", {'ig': igInstanceName, 'igFromConnector': igInstanceNameFromConnector}) if igInstanceName == igInstanceNameFromConnector: if getSG is True: foundInstanceName = sgInstanceName LOG.debug("Found the storage group associated with " "initiator %(initiator)s: %(storageGroup)s", {'initiator': initiatorNames, 'storageGroup': foundInstanceName}) else: foundInstanceName = mvInstanceName LOG.debug("Found the masking view associated with " "initiator %(initiator)s: %(maskingview)s.", {'initiator': initiatorNames, 'maskingview': foundInstanceName}) break return foundInstanceName def _remove_last_vol_and_delete_sg(self, conn, controllerConfigService, storageGroupInstanceName, storageGroupName, volumeInstanceName, volumeName, extraSpecs): """Remove the last volume and delete the storage group :param conn: the ecom connection :param controllerConfigService: controller config service :param storageGroupInstanceName: storage group instance name :param storageGroupName: storage group name :param volumeInstanceName: volume instance name :param volumeName: volume name :param extrSpecs: additional info """ self.provision.remove_device_from_storage_group( conn, controllerConfigService, storageGroupInstanceName, volumeInstanceName, volumeName, extraSpecs) LOG.debug( "Remove the last volume %(volumeName)s completed " "successfully.", {'volumeName': volumeName}) # Delete storage group. self._delete_storage_group(conn, controllerConfigService, storageGroupInstanceName, storageGroupName, extraSpecs) storageGroupInstance = self.utils.get_existing_instance( conn, storageGroupInstanceName) if storageGroupInstance: exceptionMessage = (_( "Storage group %(storageGroupName)s " "was not deleted successfully") % {'storageGroupName': storageGroupName}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) else: LOG.debug("Storage Group %(storageGroupName)s " "successfully deleted.", {'storageGroupName': storageGroupName}) def _delete_storage_group(self, conn, controllerConfigService, storageGroupInstanceName, storageGroupName, extraSpecs): """Delete empty storage group :param conn: the ecom connection :param controllerConfigService: controller config service :param storageGroupInstanceName: storage group instance name :param storageGroupName: storage group name :param extraSpecs: extra specifications """ rc, job = conn.InvokeMethod( 'DeleteGroup', controllerConfigService, MaskingGroup=storageGroupInstanceName, Force=True) if rc != 0: rc, errordesc = self.utils.wait_for_job_complete(conn, job, extraSpecs) if rc != 0: exceptionMessage = (_( "Error Deleting Group: %(storageGroupName)s. " "Return code: %(rc)lu. Error: %(error)s") % {'storageGroupName': storageGroupName, 'rc': rc, 'error': errordesc}) LOG.error(exceptionMessage) raise exception.VolumeBackendAPIException( data=exceptionMessage) def _create_hardware_ids( self, conn, initiatorNames, storageSystemName): """Create hardwareIds for initiator(s). :param conn: the connection to the ecom server :param initiatorNames: the list of initiator names :param storageSystemName: the storage system name :returns: list -- foundHardwareIDsInstanceNames """ foundHardwareIDsInstanceNames = [] hardwareIdManagementService = ( self.utils.find_storage_hardwareid_service( conn, storageSystemName)) for initiatorName in initiatorNames: hardwareIdInstanceName = ( self.utils.create_storage_hardwareId_instance_name( conn, hardwareIdManagementService, initiatorName)) LOG.debug( "Created hardwareId Instance: %(hardwareIdInstanceName)s.", {'hardwareIdInstanceName': hardwareIdInstanceName}) foundHardwareIDsInstanceNames.append(hardwareIdInstanceName) return foundHardwareIDsInstanceNames
codeparrot/github-code-clean
# pylint: disable=function-redefined import codecs import string from enum import Enum from itertools import accumulate from typing import Callable, Iterable, List, Optional, Tuple, TypeVar, Union, cast from prompt_toolkit.application.current import get_app from prompt_toolkit.buffer import Buffer, indent, reshape_text, unindent from prompt_toolkit.clipboard import ClipboardData from prompt_toolkit.document import Document from prompt_toolkit.filters import ( Always, Condition, Filter, has_arg, is_read_only, is_searching, ) from prompt_toolkit.filters.app import ( in_paste_mode, is_multiline, vi_digraph_mode, vi_insert_mode, vi_insert_multiple_mode, vi_mode, vi_navigation_mode, vi_recording_macro, vi_replace_mode, vi_replace_single_mode, vi_search_direction_reversed, vi_selection_mode, vi_waiting_for_text_object_mode, ) from prompt_toolkit.input.vt100_parser import Vt100Parser from prompt_toolkit.key_binding.digraphs import DIGRAPHS from prompt_toolkit.key_binding.key_processor import KeyPress, KeyPressEvent from prompt_toolkit.key_binding.vi_state import CharacterFind, InputMode from prompt_toolkit.keys import Keys from prompt_toolkit.search import SearchDirection from prompt_toolkit.selection import PasteMode, SelectionState, SelectionType from ..key_bindings import ConditionalKeyBindings, KeyBindings, KeyBindingsBase from .named_commands import get_by_name __all__ = [ "load_vi_bindings", "load_vi_search_bindings", ] E = KeyPressEvent ascii_lowercase = string.ascii_lowercase vi_register_names = ascii_lowercase + "0123456789" class TextObjectType(Enum): EXCLUSIVE = "EXCLUSIVE" INCLUSIVE = "INCLUSIVE" LINEWISE = "LINEWISE" BLOCK = "BLOCK" class TextObject: """ Return struct for functions wrapped in ``text_object``. Both `start` and `end` are relative to the current cursor position. """ def __init__( self, start: int, end: int = 0, type: TextObjectType = TextObjectType.EXCLUSIVE ): self.start = start self.end = end self.type = type @property def selection_type(self) -> SelectionType: if self.type == TextObjectType.LINEWISE: return SelectionType.LINES if self.type == TextObjectType.BLOCK: return SelectionType.BLOCK else: return SelectionType.CHARACTERS def sorted(self) -> Tuple[int, int]: """ Return a (start, end) tuple where start <= end. """ if self.start < self.end: return self.start, self.end else: return self.end, self.start def operator_range(self, document: Document) -> Tuple[int, int]: """ Return a (start, end) tuple with start <= end that indicates the range operators should operate on. `buffer` is used to get start and end of line positions. This should return something that can be used in a slice, so the `end` position is *not* included. """ start, end = self.sorted() doc = document if ( self.type == TextObjectType.EXCLUSIVE and doc.translate_index_to_position(end + doc.cursor_position)[1] == 0 ): # If the motion is exclusive and the end of motion is on the first # column, the end position becomes end of previous line. end -= 1 if self.type == TextObjectType.INCLUSIVE: end += 1 if self.type == TextObjectType.LINEWISE: # Select whole lines row, col = doc.translate_index_to_position(start + doc.cursor_position) start = doc.translate_row_col_to_index(row, 0) - doc.cursor_position row, col = doc.translate_index_to_position(end + doc.cursor_position) end = ( doc.translate_row_col_to_index(row, len(doc.lines[row])) - doc.cursor_position ) return start, end def get_line_numbers(self, buffer: Buffer) -> Tuple[int, int]: """ Return a (start_line, end_line) pair. """ # Get absolute cursor positions from the text object. from_, to = self.operator_range(buffer.document) from_ += buffer.cursor_position to += buffer.cursor_position # Take the start of the lines. from_, _ = buffer.document.translate_index_to_position(from_) to, _ = buffer.document.translate_index_to_position(to) return from_, to def cut(self, buffer: Buffer) -> Tuple[Document, ClipboardData]: """ Turn text object into `ClipboardData` instance. """ from_, to = self.operator_range(buffer.document) from_ += buffer.cursor_position to += buffer.cursor_position # For Vi mode, the SelectionState does include the upper position, # while `self.operator_range` does not. So, go one to the left, unless # we're in the line mode, then we don't want to risk going to the # previous line, and missing one line in the selection. if self.type != TextObjectType.LINEWISE: to -= 1 document = Document( buffer.text, to, SelectionState(original_cursor_position=from_, type=self.selection_type), ) new_document, clipboard_data = document.cut_selection() return new_document, clipboard_data # Typevar for any text object function: TextObjectFunction = Callable[[E], TextObject] _TOF = TypeVar("_TOF", bound=TextObjectFunction) def create_text_object_decorator( key_bindings: KeyBindings, ) -> Callable[..., Callable[[_TOF], _TOF]]: """ Create a decorator that can be used to register Vi text object implementations. """ def text_object_decorator( *keys: Union[Keys, str], filter: Filter = Always(), no_move_handler: bool = False, no_selection_handler: bool = False, eager: bool = False, ) -> Callable[[_TOF], _TOF]: """ Register a text object function. Usage:: @text_object('w', filter=..., no_move_handler=False) def handler(event): # Return a text object for this key. return TextObject(...) :param no_move_handler: Disable the move handler in navigation mode. (It's still active in selection mode.) """ def decorator(text_object_func: _TOF) -> _TOF: @key_bindings.add( *keys, filter=vi_waiting_for_text_object_mode & filter, eager=eager ) def _apply_operator_to_text_object(event: E) -> None: # Arguments are multiplied. vi_state = event.app.vi_state event._arg = str((vi_state.operator_arg or 1) * (event.arg or 1)) # Call the text object handler. text_obj = text_object_func(event) # Get the operator function. # (Should never be None here, given the # `vi_waiting_for_text_object_mode` filter state.) operator_func = vi_state.operator_func if text_obj is not None and operator_func is not None: # Call the operator function with the text object. operator_func(event, text_obj) # Clear operator. event.app.vi_state.operator_func = None event.app.vi_state.operator_arg = None # Register a move operation. (Doesn't need an operator.) if not no_move_handler: @key_bindings.add( *keys, filter=~vi_waiting_for_text_object_mode & filter & vi_navigation_mode, eager=eager, ) def _move_in_navigation_mode(event: E) -> None: """ Move handler for navigation mode. """ text_object = text_object_func(event) event.current_buffer.cursor_position += text_object.start # Register a move selection operation. if not no_selection_handler: @key_bindings.add( *keys, filter=~vi_waiting_for_text_object_mode & filter & vi_selection_mode, eager=eager, ) def _move_in_selection_mode(event: E) -> None: """ Move handler for selection mode. """ text_object = text_object_func(event) buff = event.current_buffer selection_state = buff.selection_state if selection_state is None: return # Should not happen, because of the `vi_selection_mode` filter. # When the text object has both a start and end position, like 'i(' or 'iw', # Turn this into a selection, otherwise the cursor. if text_object.end: # Take selection positions from text object. start, end = text_object.operator_range(buff.document) start += buff.cursor_position end += buff.cursor_position selection_state.original_cursor_position = start buff.cursor_position = end # Take selection type from text object. if text_object.type == TextObjectType.LINEWISE: selection_state.type = SelectionType.LINES else: selection_state.type = SelectionType.CHARACTERS else: event.current_buffer.cursor_position += text_object.start # Make it possible to chain @text_object decorators. return text_object_func return decorator return text_object_decorator # Typevar for any operator function: OperatorFunction = Callable[[E, TextObject], None] _OF = TypeVar("_OF", bound=OperatorFunction) def create_operator_decorator( key_bindings: KeyBindings, ) -> Callable[..., Callable[[_OF], _OF]]: """ Create a decorator that can be used for registering Vi operators. """ def operator_decorator( *keys: Union[Keys, str], filter: Filter = Always(), eager: bool = False ) -> Callable[[_OF], _OF]: """ Register a Vi operator. Usage:: @operator('d', filter=...) def handler(event, text_object): # Do something with the text object here. """ def decorator(operator_func: _OF) -> _OF: @key_bindings.add( *keys, filter=~vi_waiting_for_text_object_mode & filter & vi_navigation_mode, eager=eager, ) def _operator_in_navigation(event: E) -> None: """ Handle operator in navigation mode. """ # When this key binding is matched, only set the operator # function in the ViState. We should execute it after a text # object has been received. event.app.vi_state.operator_func = operator_func event.app.vi_state.operator_arg = event.arg @key_bindings.add( *keys, filter=~vi_waiting_for_text_object_mode & filter & vi_selection_mode, eager=eager, ) def _operator_in_selection(event: E) -> None: """ Handle operator in selection mode. """ buff = event.current_buffer selection_state = buff.selection_state if selection_state is not None: # Create text object from selection. if selection_state.type == SelectionType.LINES: text_obj_type = TextObjectType.LINEWISE elif selection_state.type == SelectionType.BLOCK: text_obj_type = TextObjectType.BLOCK else: text_obj_type = TextObjectType.INCLUSIVE text_object = TextObject( selection_state.original_cursor_position - buff.cursor_position, type=text_obj_type, ) # Execute operator. operator_func(event, text_object) # Quit selection mode. buff.selection_state = None return operator_func return decorator return operator_decorator def load_vi_bindings() -> KeyBindingsBase: """ Vi extensions. # Overview of Readline Vi commands: # http://www.catonmat.net/download/bash-vi-editing-mode-cheat-sheet.pdf """ # Note: Some key bindings have the "~IsReadOnly()" filter added. This # prevents the handler to be executed when the focus is on a # read-only buffer. # This is however only required for those that change the ViState to # INSERT mode. The `Buffer` class itself throws the # `EditReadOnlyBuffer` exception for any text operations which is # handled correctly. There is no need to add "~IsReadOnly" to all key # bindings that do text manipulation. key_bindings = KeyBindings() handle = key_bindings.add # (Note: Always take the navigation bindings in read-only mode, even when # ViState says different.) TransformFunction = Tuple[Tuple[str, ...], Filter, Callable[[str], str]] vi_transform_functions: List[TransformFunction] = [ # Rot 13 transformation ( ("g", "?"), Always(), lambda string: cast(str, codecs.encode(string, "rot_13")), ), # To lowercase (("g", "u"), Always(), lambda string: string.lower()), # To uppercase. (("g", "U"), Always(), lambda string: string.upper()), # Swap case. (("g", "~"), Always(), lambda string: string.swapcase()), ( ("~",), Condition(lambda: get_app().vi_state.tilde_operator), lambda string: string.swapcase(), ), ] # Insert a character literally (quoted insert). handle("c-v", filter=vi_insert_mode)(get_by_name("quoted-insert")) @handle("escape") def _back_to_navigation(event: E) -> None: """ Escape goes to vi navigation mode. """ buffer = event.current_buffer vi_state = event.app.vi_state if vi_state.input_mode in (InputMode.INSERT, InputMode.REPLACE): buffer.cursor_position += buffer.document.get_cursor_left_position() vi_state.input_mode = InputMode.NAVIGATION if bool(buffer.selection_state): buffer.exit_selection() @handle("k", filter=vi_selection_mode) def _up_in_selection(event: E) -> None: """ Arrow up in selection mode. """ event.current_buffer.cursor_up(count=event.arg) @handle("j", filter=vi_selection_mode) def _down_in_selection(event: E) -> None: """ Arrow down in selection mode. """ event.current_buffer.cursor_down(count=event.arg) @handle("up", filter=vi_navigation_mode) @handle("c-p", filter=vi_navigation_mode) def _up_in_navigation(event: E) -> None: """ Arrow up and ControlP in navigation mode go up. """ event.current_buffer.auto_up(count=event.arg) @handle("k", filter=vi_navigation_mode) def _go_up(event: E) -> None: """ Go up, but if we enter a new history entry, move to the start of the line. """ event.current_buffer.auto_up( count=event.arg, go_to_start_of_line_if_history_changes=True ) @handle("down", filter=vi_navigation_mode) @handle("c-n", filter=vi_navigation_mode) def _go_down(event: E) -> None: """ Arrow down and Control-N in navigation mode. """ event.current_buffer.auto_down(count=event.arg) @handle("j", filter=vi_navigation_mode) def _go_down2(event: E) -> None: """ Go down, but if we enter a new history entry, go to the start of the line. """ event.current_buffer.auto_down( count=event.arg, go_to_start_of_line_if_history_changes=True ) @handle("backspace", filter=vi_navigation_mode) def _go_left(event: E) -> None: """ In navigation-mode, move cursor. """ event.current_buffer.cursor_position += event.current_buffer.document.get_cursor_left_position( count=event.arg ) @handle("c-n", filter=vi_insert_mode) def _complete_next(event: E) -> None: b = event.current_buffer if b.complete_state: b.complete_next() else: b.start_completion(select_first=True) @handle("c-p", filter=vi_insert_mode) def _complete_prev(event: E) -> None: """ Control-P: To previous completion. """ b = event.current_buffer if b.complete_state: b.complete_previous() else: b.start_completion(select_last=True) @handle("c-g", filter=vi_insert_mode) @handle("c-y", filter=vi_insert_mode) def _accept_completion(event: E) -> None: """ Accept current completion. """ event.current_buffer.complete_state = None @handle("c-e", filter=vi_insert_mode) def _cancel_completion(event: E) -> None: """ Cancel completion. Go back to originally typed text. """ event.current_buffer.cancel_completion() @Condition def is_returnable() -> bool: return get_app().current_buffer.is_returnable # In navigation mode, pressing enter will always return the input. handle("enter", filter=vi_navigation_mode & is_returnable)( get_by_name("accept-line") ) # In insert mode, also accept input when enter is pressed, and the buffer # has been marked as single line. handle("enter", filter=is_returnable & ~is_multiline)(get_by_name("accept-line")) @handle("enter", filter=~is_returnable & vi_navigation_mode) def _start_of_next_line(event: E) -> None: """ Go to the beginning of next line. """ b = event.current_buffer b.cursor_down(count=event.arg) b.cursor_position += b.document.get_start_of_line_position( after_whitespace=True ) # ** In navigation mode ** # List of navigation commands: http://hea-www.harvard.edu/~fine/Tech/vi.html @handle("insert", filter=vi_navigation_mode) def _insert_mode(event: E) -> None: """ Pressing the Insert key. """ event.app.vi_state.input_mode = InputMode.INSERT @handle("insert", filter=vi_insert_mode) def _navigation_mode(event: E) -> None: """ Pressing the Insert key. """ event.app.vi_state.input_mode = InputMode.NAVIGATION @handle("a", filter=vi_navigation_mode & ~is_read_only) # ~IsReadOnly, because we want to stay in navigation mode for # read-only buffers. def _a(event: E) -> None: event.current_buffer.cursor_position += ( event.current_buffer.document.get_cursor_right_position() ) event.app.vi_state.input_mode = InputMode.INSERT @handle("A", filter=vi_navigation_mode & ~is_read_only) def _A(event: E) -> None: event.current_buffer.cursor_position += ( event.current_buffer.document.get_end_of_line_position() ) event.app.vi_state.input_mode = InputMode.INSERT @handle("C", filter=vi_navigation_mode & ~is_read_only) def _change_until_end_of_line(event: E) -> None: """ Change to end of line. Same as 'c$' (which is implemented elsewhere.) """ buffer = event.current_buffer deleted = buffer.delete(count=buffer.document.get_end_of_line_position()) event.app.clipboard.set_text(deleted) event.app.vi_state.input_mode = InputMode.INSERT @handle("c", "c", filter=vi_navigation_mode & ~is_read_only) @handle("S", filter=vi_navigation_mode & ~is_read_only) def _change_current_line(event: E) -> None: # TODO: implement 'arg' """ Change current line """ buffer = event.current_buffer # We copy the whole line. data = ClipboardData(buffer.document.current_line, SelectionType.LINES) event.app.clipboard.set_data(data) # But we delete after the whitespace buffer.cursor_position += buffer.document.get_start_of_line_position( after_whitespace=True ) buffer.delete(count=buffer.document.get_end_of_line_position()) event.app.vi_state.input_mode = InputMode.INSERT @handle("D", filter=vi_navigation_mode) def _delete_until_end_of_line(event: E) -> None: """ Delete from cursor position until the end of the line. """ buffer = event.current_buffer deleted = buffer.delete(count=buffer.document.get_end_of_line_position()) event.app.clipboard.set_text(deleted) @handle("d", "d", filter=vi_navigation_mode) def _delete_line(event: E) -> None: """ Delete line. (Or the following 'n' lines.) """ buffer = event.current_buffer # Split string in before/deleted/after text. lines = buffer.document.lines before = "\n".join(lines[: buffer.document.cursor_position_row]) deleted = "\n".join( lines[ buffer.document.cursor_position_row : buffer.document.cursor_position_row + event.arg ] ) after = "\n".join(lines[buffer.document.cursor_position_row + event.arg :]) # Set new text. if before and after: before = before + "\n" # Set text and cursor position. buffer.document = Document( text=before + after, # Cursor At the start of the first 'after' line, after the leading whitespace. cursor_position=len(before) + len(after) - len(after.lstrip(" ")), ) # Set clipboard data event.app.clipboard.set_data(ClipboardData(deleted, SelectionType.LINES)) @handle("x", filter=vi_selection_mode) def _cut(event: E) -> None: """ Cut selection. ('x' is not an operator.) """ clipboard_data = event.current_buffer.cut_selection() event.app.clipboard.set_data(clipboard_data) @handle("i", filter=vi_navigation_mode & ~is_read_only) def _i(event: E) -> None: event.app.vi_state.input_mode = InputMode.INSERT @handle("I", filter=vi_navigation_mode & ~is_read_only) def _I(event: E) -> None: event.app.vi_state.input_mode = InputMode.INSERT event.current_buffer.cursor_position += event.current_buffer.document.get_start_of_line_position( after_whitespace=True ) @Condition def in_block_selection() -> bool: buff = get_app().current_buffer return bool( buff.selection_state and buff.selection_state.type == SelectionType.BLOCK ) @handle("I", filter=in_block_selection & ~is_read_only) def insert_in_block_selection(event: E, after: bool = False) -> None: """ Insert in block selection mode. """ buff = event.current_buffer # Store all cursor positions. positions = [] if after: def get_pos(from_to: Tuple[int, int]) -> int: return from_to[1] else: def get_pos(from_to: Tuple[int, int]) -> int: return from_to[0] for i, from_to in enumerate(buff.document.selection_ranges()): positions.append(get_pos(from_to)) if i == 0: buff.cursor_position = get_pos(from_to) buff.multiple_cursor_positions = positions # Go to 'INSERT_MULTIPLE' mode. event.app.vi_state.input_mode = InputMode.INSERT_MULTIPLE buff.exit_selection() @handle("A", filter=in_block_selection & ~is_read_only) def _append_after_block(event: E) -> None: insert_in_block_selection(event, after=True) @handle("J", filter=vi_navigation_mode & ~is_read_only) def _join(event: E) -> None: """ Join lines. """ for i in range(event.arg): event.current_buffer.join_next_line() @handle("g", "J", filter=vi_navigation_mode & ~is_read_only) def _join_nospace(event: E) -> None: """ Join lines without space. """ for i in range(event.arg): event.current_buffer.join_next_line(separator="") @handle("J", filter=vi_selection_mode & ~is_read_only) def _join_selection(event: E) -> None: """ Join selected lines. """ event.current_buffer.join_selected_lines() @handle("g", "J", filter=vi_selection_mode & ~is_read_only) def _join_selection_nospace(event: E) -> None: """ Join selected lines without space. """ event.current_buffer.join_selected_lines(separator="") @handle("p", filter=vi_navigation_mode) def _paste(event: E) -> None: """ Paste after """ event.current_buffer.paste_clipboard_data( event.app.clipboard.get_data(), count=event.arg, paste_mode=PasteMode.VI_AFTER, ) @handle("P", filter=vi_navigation_mode) def _paste_before(event: E) -> None: """ Paste before """ event.current_buffer.paste_clipboard_data( event.app.clipboard.get_data(), count=event.arg, paste_mode=PasteMode.VI_BEFORE, ) @handle('"', Keys.Any, "p", filter=vi_navigation_mode) def _paste_register(event: E) -> None: """ Paste from named register. """ c = event.key_sequence[1].data if c in vi_register_names: data = event.app.vi_state.named_registers.get(c) if data: event.current_buffer.paste_clipboard_data( data, count=event.arg, paste_mode=PasteMode.VI_AFTER ) @handle('"', Keys.Any, "P", filter=vi_navigation_mode) def _paste_register_before(event: E) -> None: """ Paste (before) from named register. """ c = event.key_sequence[1].data if c in vi_register_names: data = event.app.vi_state.named_registers.get(c) if data: event.current_buffer.paste_clipboard_data( data, count=event.arg, paste_mode=PasteMode.VI_BEFORE ) @handle("r", filter=vi_navigation_mode) def _replace(event: E) -> None: """ Go to 'replace-single'-mode. """ event.app.vi_state.input_mode = InputMode.REPLACE_SINGLE @handle("R", filter=vi_navigation_mode) def _replace_mode(event: E) -> None: """ Go to 'replace'-mode. """ event.app.vi_state.input_mode = InputMode.REPLACE @handle("s", filter=vi_navigation_mode & ~is_read_only) def _substitute(event: E) -> None: """ Substitute with new text (Delete character(s) and go to insert mode.) """ text = event.current_buffer.delete(count=event.arg) event.app.clipboard.set_text(text) event.app.vi_state.input_mode = InputMode.INSERT @handle("u", filter=vi_navigation_mode, save_before=(lambda e: False)) def _undo(event: E) -> None: for i in range(event.arg): event.current_buffer.undo() @handle("V", filter=vi_navigation_mode) def _visual_line(event: E) -> None: """ Start lines selection. """ event.current_buffer.start_selection(selection_type=SelectionType.LINES) @handle("c-v", filter=vi_navigation_mode) def _visual_block(event: E) -> None: """ Enter block selection mode. """ event.current_buffer.start_selection(selection_type=SelectionType.BLOCK) @handle("V", filter=vi_selection_mode) def _visual_line2(event: E) -> None: """ Exit line selection mode, or go from non line selection mode to line selection mode. """ selection_state = event.current_buffer.selection_state if selection_state is not None: if selection_state.type != SelectionType.LINES: selection_state.type = SelectionType.LINES else: event.current_buffer.exit_selection() @handle("v", filter=vi_navigation_mode) def _visual(event: E) -> None: """ Enter character selection mode. """ event.current_buffer.start_selection(selection_type=SelectionType.CHARACTERS) @handle("v", filter=vi_selection_mode) def _visual2(event: E) -> None: """ Exit character selection mode, or go from non-character-selection mode to character selection mode. """ selection_state = event.current_buffer.selection_state if selection_state is not None: if selection_state.type != SelectionType.CHARACTERS: selection_state.type = SelectionType.CHARACTERS else: event.current_buffer.exit_selection() @handle("c-v", filter=vi_selection_mode) def _visual_block2(event: E) -> None: """ Exit block selection mode, or go from non block selection mode to block selection mode. """ selection_state = event.current_buffer.selection_state if selection_state is not None: if selection_state.type != SelectionType.BLOCK: selection_state.type = SelectionType.BLOCK else: event.current_buffer.exit_selection() @handle("a", "w", filter=vi_selection_mode) @handle("a", "W", filter=vi_selection_mode) def _visual_auto_word(event: E) -> None: """ Switch from visual linewise mode to visual characterwise mode. """ buffer = event.current_buffer if ( buffer.selection_state and buffer.selection_state.type == SelectionType.LINES ): buffer.selection_state.type = SelectionType.CHARACTERS @handle("x", filter=vi_navigation_mode) def _delete(event: E) -> None: """ Delete character. """ buff = event.current_buffer count = min(event.arg, len(buff.document.current_line_after_cursor)) if count: text = event.current_buffer.delete(count=count) event.app.clipboard.set_text(text) @handle("X", filter=vi_navigation_mode) def _delete_before_cursor(event: E) -> None: buff = event.current_buffer count = min(event.arg, len(buff.document.current_line_before_cursor)) if count: text = event.current_buffer.delete_before_cursor(count=count) event.app.clipboard.set_text(text) @handle("y", "y", filter=vi_navigation_mode) @handle("Y", filter=vi_navigation_mode) def _yank_line(event: E) -> None: """ Yank the whole line. """ text = "\n".join(event.current_buffer.document.lines_from_current[: event.arg]) event.app.clipboard.set_data(ClipboardData(text, SelectionType.LINES)) @handle("+", filter=vi_navigation_mode) def _next_line(event: E) -> None: """ Move to first non whitespace of next line """ buffer = event.current_buffer buffer.cursor_position += buffer.document.get_cursor_down_position( count=event.arg ) buffer.cursor_position += buffer.document.get_start_of_line_position( after_whitespace=True ) @handle("-", filter=vi_navigation_mode) def _prev_line(event: E) -> None: """ Move to first non whitespace of previous line """ buffer = event.current_buffer buffer.cursor_position += buffer.document.get_cursor_up_position( count=event.arg ) buffer.cursor_position += buffer.document.get_start_of_line_position( after_whitespace=True ) @handle(">", ">", filter=vi_navigation_mode) def _indent(event: E) -> None: """ Indent lines. """ buffer = event.current_buffer current_row = buffer.document.cursor_position_row indent(buffer, current_row, current_row + event.arg) @handle("<", "<", filter=vi_navigation_mode) def _unindent(event: E) -> None: """ Unindent lines. """ current_row = event.current_buffer.document.cursor_position_row unindent(event.current_buffer, current_row, current_row + event.arg) @handle("O", filter=vi_navigation_mode & ~is_read_only) def _open_above(event: E) -> None: """ Open line above and enter insertion mode """ event.current_buffer.insert_line_above(copy_margin=not in_paste_mode()) event.app.vi_state.input_mode = InputMode.INSERT @handle("o", filter=vi_navigation_mode & ~is_read_only) def _open_below(event: E) -> None: """ Open line below and enter insertion mode """ event.current_buffer.insert_line_below(copy_margin=not in_paste_mode()) event.app.vi_state.input_mode = InputMode.INSERT @handle("~", filter=vi_navigation_mode) def _reverse_case(event: E) -> None: """ Reverse case of current character and move cursor forward. """ buffer = event.current_buffer c = buffer.document.current_char if c is not None and c != "\n": buffer.insert_text(c.swapcase(), overwrite=True) @handle("g", "u", "u", filter=vi_navigation_mode & ~is_read_only) def _lowercase_line(event: E) -> None: """ Lowercase current line. """ buff = event.current_buffer buff.transform_current_line(lambda s: s.lower()) @handle("g", "U", "U", filter=vi_navigation_mode & ~is_read_only) def _uppercase_line(event: E) -> None: """ Uppercase current line. """ buff = event.current_buffer buff.transform_current_line(lambda s: s.upper()) @handle("g", "~", "~", filter=vi_navigation_mode & ~is_read_only) def _swapcase_line(event: E) -> None: """ Swap case of the current line. """ buff = event.current_buffer buff.transform_current_line(lambda s: s.swapcase()) @handle("#", filter=vi_navigation_mode) def _prev_occurence(event: E) -> None: """ Go to previous occurrence of this word. """ b = event.current_buffer search_state = event.app.current_search_state search_state.text = b.document.get_word_under_cursor() search_state.direction = SearchDirection.BACKWARD b.apply_search(search_state, count=event.arg, include_current_position=False) @handle("*", filter=vi_navigation_mode) def _next_occurance(event: E) -> None: """ Go to next occurrence of this word. """ b = event.current_buffer search_state = event.app.current_search_state search_state.text = b.document.get_word_under_cursor() search_state.direction = SearchDirection.FORWARD b.apply_search(search_state, count=event.arg, include_current_position=False) @handle("(", filter=vi_navigation_mode) def _begin_of_sentence(event: E) -> None: # TODO: go to begin of sentence. # XXX: should become text_object. pass @handle(")", filter=vi_navigation_mode) def _end_of_sentence(event: E) -> None: # TODO: go to end of sentence. # XXX: should become text_object. pass operator = create_operator_decorator(key_bindings) text_object = create_text_object_decorator(key_bindings) @handle(Keys.Any, filter=vi_waiting_for_text_object_mode) def _unknown_text_object(event: E) -> None: """ Unknown key binding while waiting for a text object. """ event.app.output.bell() # # *** Operators *** # def create_delete_and_change_operators( delete_only: bool, with_register: bool = False ) -> None: """ Delete and change operators. :param delete_only: Create an operator that deletes, but doesn't go to insert mode. :param with_register: Copy the deleted text to this named register instead of the clipboard. """ handler_keys: Iterable[str] if with_register: handler_keys = ('"', Keys.Any, "cd"[delete_only]) else: handler_keys = "cd"[delete_only] @operator(*handler_keys, filter=~is_read_only) def delete_or_change_operator(event: E, text_object: TextObject) -> None: clipboard_data = None buff = event.current_buffer if text_object: new_document, clipboard_data = text_object.cut(buff) buff.document = new_document # Set deleted/changed text to clipboard or named register. if clipboard_data and clipboard_data.text: if with_register: reg_name = event.key_sequence[1].data if reg_name in vi_register_names: event.app.vi_state.named_registers[reg_name] = clipboard_data else: event.app.clipboard.set_data(clipboard_data) # Only go back to insert mode in case of 'change'. if not delete_only: event.app.vi_state.input_mode = InputMode.INSERT create_delete_and_change_operators(False, False) create_delete_and_change_operators(False, True) create_delete_and_change_operators(True, False) create_delete_and_change_operators(True, True) def create_transform_handler( filter: Filter, transform_func: Callable[[str], str], *a: str ) -> None: @operator(*a, filter=filter & ~is_read_only) def _(event: E, text_object: TextObject) -> None: """ Apply transformation (uppercase, lowercase, rot13, swap case). """ buff = event.current_buffer start, end = text_object.operator_range(buff.document) if start < end: # Transform. buff.transform_region( buff.cursor_position + start, buff.cursor_position + end, transform_func, ) # Move cursor buff.cursor_position += text_object.end or text_object.start for k, f, func in vi_transform_functions: create_transform_handler(f, func, *k) @operator("y") def _yank(event: E, text_object: TextObject) -> None: """ Yank operator. (Copy text.) """ _, clipboard_data = text_object.cut(event.current_buffer) if clipboard_data.text: event.app.clipboard.set_data(clipboard_data) @operator('"', Keys.Any, "y") def _yank_to_register(event: E, text_object: TextObject) -> None: """ Yank selection to named register. """ c = event.key_sequence[1].data if c in vi_register_names: _, clipboard_data = text_object.cut(event.current_buffer) event.app.vi_state.named_registers[c] = clipboard_data @operator(">") def _indent_text_object(event: E, text_object: TextObject) -> None: """ Indent. """ buff = event.current_buffer from_, to = text_object.get_line_numbers(buff) indent(buff, from_, to + 1, count=event.arg) @operator("<") def _unindent_text_object(event: E, text_object: TextObject) -> None: """ Unindent. """ buff = event.current_buffer from_, to = text_object.get_line_numbers(buff) unindent(buff, from_, to + 1, count=event.arg) @operator("g", "q") def _reshape(event: E, text_object: TextObject) -> None: """ Reshape text. """ buff = event.current_buffer from_, to = text_object.get_line_numbers(buff) reshape_text(buff, from_, to) # # *** Text objects *** # @text_object("b") def _b(event: E) -> TextObject: """ Move one word or token left. """ return TextObject( event.current_buffer.document.find_start_of_previous_word(count=event.arg) or 0 ) @text_object("B") def _B(event: E) -> TextObject: """ Move one non-blank word left """ return TextObject( event.current_buffer.document.find_start_of_previous_word( count=event.arg, WORD=True ) or 0 ) @text_object("$") def _dollar(event: E) -> TextObject: """ 'c$', 'd$' and '$': Delete/change/move until end of line. """ return TextObject(event.current_buffer.document.get_end_of_line_position()) @text_object("w") def _word_forward(event: E) -> TextObject: """ 'word' forward. 'cw', 'dw', 'w': Delete/change/move one word. """ return TextObject( event.current_buffer.document.find_next_word_beginning(count=event.arg) or event.current_buffer.document.get_end_of_document_position() ) @text_object("W") def _WORD_forward(event: E) -> TextObject: """ 'WORD' forward. 'cW', 'dW', 'W': Delete/change/move one WORD. """ return TextObject( event.current_buffer.document.find_next_word_beginning( count=event.arg, WORD=True ) or event.current_buffer.document.get_end_of_document_position() ) @text_object("e") def _end_of_word(event: E) -> TextObject: """ End of 'word': 'ce', 'de', 'e' """ end = event.current_buffer.document.find_next_word_ending(count=event.arg) return TextObject(end - 1 if end else 0, type=TextObjectType.INCLUSIVE) @text_object("E") def _end_of_WORD(event: E) -> TextObject: """ End of 'WORD': 'cE', 'dE', 'E' """ end = event.current_buffer.document.find_next_word_ending( count=event.arg, WORD=True ) return TextObject(end - 1 if end else 0, type=TextObjectType.INCLUSIVE) @text_object("i", "w", no_move_handler=True) def _inner_word(event: E) -> TextObject: """ Inner 'word': ciw and diw """ start, end = event.current_buffer.document.find_boundaries_of_current_word() return TextObject(start, end) @text_object("a", "w", no_move_handler=True) def _a_word(event: E) -> TextObject: """ A 'word': caw and daw """ start, end = event.current_buffer.document.find_boundaries_of_current_word( include_trailing_whitespace=True ) return TextObject(start, end) @text_object("i", "W", no_move_handler=True) def _inner_WORD(event: E) -> TextObject: """ Inner 'WORD': ciW and diW """ start, end = event.current_buffer.document.find_boundaries_of_current_word( WORD=True ) return TextObject(start, end) @text_object("a", "W", no_move_handler=True) def _a_WORD(event: E) -> TextObject: """ A 'WORD': caw and daw """ start, end = event.current_buffer.document.find_boundaries_of_current_word( WORD=True, include_trailing_whitespace=True ) return TextObject(start, end) @text_object("a", "p", no_move_handler=True) def _paragraph(event: E) -> TextObject: """ Auto paragraph. """ start = event.current_buffer.document.start_of_paragraph() end = event.current_buffer.document.end_of_paragraph(count=event.arg) return TextObject(start, end) @text_object("^") def _start_of_line(event: E) -> TextObject: """ 'c^', 'd^' and '^': Soft start of line, after whitespace. """ return TextObject( event.current_buffer.document.get_start_of_line_position( after_whitespace=True ) ) @text_object("0") def _hard_start_of_line(event: E) -> TextObject: """ 'c0', 'd0': Hard start of line, before whitespace. (The move '0' key is implemented elsewhere, because a '0' could also change the `arg`.) """ return TextObject( event.current_buffer.document.get_start_of_line_position( after_whitespace=False ) ) def create_ci_ca_handles( ci_start: str, ci_end: str, inner: bool, key: Optional[str] = None ) -> None: # TODO: 'dat', 'dit', (tags (like xml) """ Delete/Change string between this start and stop character. But keep these characters. This implements all the ci", ci<, ci{, ci(, di", di<, ca", ca<, ... combinations. """ def handler(event: E) -> TextObject: if ci_start == ci_end: # Quotes start = event.current_buffer.document.find_backwards( ci_start, in_current_line=False ) end = event.current_buffer.document.find(ci_end, in_current_line=False) else: # Brackets start = event.current_buffer.document.find_enclosing_bracket_left( ci_start, ci_end ) end = event.current_buffer.document.find_enclosing_bracket_right( ci_start, ci_end ) if start is not None and end is not None: offset = 0 if inner else 1 return TextObject(start + 1 - offset, end + offset) else: # Nothing found. return TextObject(0) if key is None: text_object("ai"[inner], ci_start, no_move_handler=True)(handler) text_object("ai"[inner], ci_end, no_move_handler=True)(handler) else: text_object("ai"[inner], key, no_move_handler=True)(handler) for inner in (False, True): for ci_start, ci_end in [ ('"', '"'), ("'", "'"), ("`", "`"), ("[", "]"), ("<", ">"), ("{", "}"), ("(", ")"), ]: create_ci_ca_handles(ci_start, ci_end, inner) create_ci_ca_handles("(", ")", inner, "b") # 'dab', 'dib' create_ci_ca_handles("{", "}", inner, "B") # 'daB', 'diB' @text_object("{") def _previous_section(event: E) -> TextObject: """ Move to previous blank-line separated section. Implements '{', 'c{', 'd{', 'y{' """ index = event.current_buffer.document.start_of_paragraph( count=event.arg, before=True ) return TextObject(index) @text_object("}") def _next_section(event: E) -> TextObject: """ Move to next blank-line separated section. Implements '}', 'c}', 'd}', 'y}' """ index = event.current_buffer.document.end_of_paragraph( count=event.arg, after=True ) return TextObject(index) @text_object("f", Keys.Any) def _next_occurence(event: E) -> TextObject: """ Go to next occurrence of character. Typing 'fx' will move the cursor to the next occurrence of character. 'x'. """ event.app.vi_state.last_character_find = CharacterFind(event.data, False) match = event.current_buffer.document.find( event.data, in_current_line=True, count=event.arg ) if match: return TextObject(match, type=TextObjectType.INCLUSIVE) else: return TextObject(0) @text_object("F", Keys.Any) def _previous_occurance(event: E) -> TextObject: """ Go to previous occurrence of character. Typing 'Fx' will move the cursor to the previous occurrence of character. 'x'. """ event.app.vi_state.last_character_find = CharacterFind(event.data, True) return TextObject( event.current_buffer.document.find_backwards( event.data, in_current_line=True, count=event.arg ) or 0 ) @text_object("t", Keys.Any) def _t(event: E) -> TextObject: """ Move right to the next occurrence of c, then one char backward. """ event.app.vi_state.last_character_find = CharacterFind(event.data, False) match = event.current_buffer.document.find( event.data, in_current_line=True, count=event.arg ) if match: return TextObject(match - 1, type=TextObjectType.INCLUSIVE) else: return TextObject(0) @text_object("T", Keys.Any) def _T(event: E) -> TextObject: """ Move left to the previous occurrence of c, then one char forward. """ event.app.vi_state.last_character_find = CharacterFind(event.data, True) match = event.current_buffer.document.find_backwards( event.data, in_current_line=True, count=event.arg ) return TextObject(match + 1 if match else 0) def repeat(reverse: bool) -> None: """ Create ',' and ';' commands. """ @text_object("," if reverse else ";") def _(event: E) -> TextObject: """ Repeat the last 'f'/'F'/'t'/'T' command. """ pos: Optional[int] = 0 vi_state = event.app.vi_state type = TextObjectType.EXCLUSIVE if vi_state.last_character_find: char = vi_state.last_character_find.character backwards = vi_state.last_character_find.backwards if reverse: backwards = not backwards if backwards: pos = event.current_buffer.document.find_backwards( char, in_current_line=True, count=event.arg ) else: pos = event.current_buffer.document.find( char, in_current_line=True, count=event.arg ) type = TextObjectType.INCLUSIVE if pos: return TextObject(pos, type=type) else: return TextObject(0) repeat(True) repeat(False) @text_object("h") @text_object("left") def _left(event: E) -> TextObject: """ Implements 'ch', 'dh', 'h': Cursor left. """ return TextObject( event.current_buffer.document.get_cursor_left_position(count=event.arg) ) @text_object("j", no_move_handler=True, no_selection_handler=True) # Note: We also need `no_selection_handler`, because we in # selection mode, we prefer the other 'j' binding that keeps # `buffer.preferred_column`. def _down(event: E) -> TextObject: """ Implements 'cj', 'dj', 'j', ... Cursor up. """ return TextObject( event.current_buffer.document.get_cursor_down_position(count=event.arg), type=TextObjectType.LINEWISE, ) @text_object("k", no_move_handler=True, no_selection_handler=True) def _up(event: E) -> TextObject: """ Implements 'ck', 'dk', 'k', ... Cursor up. """ return TextObject( event.current_buffer.document.get_cursor_up_position(count=event.arg), type=TextObjectType.LINEWISE, ) @text_object("l") @text_object(" ") @text_object("right") def _right(event: E) -> TextObject: """ Implements 'cl', 'dl', 'l', 'c ', 'd ', ' '. Cursor right. """ return TextObject( event.current_buffer.document.get_cursor_right_position(count=event.arg) ) @text_object("H") def _top_of_screen(event: E) -> TextObject: """ Moves to the start of the visible region. (Below the scroll offset.) Implements 'cH', 'dH', 'H'. """ w = event.app.layout.current_window b = event.current_buffer if w and w.render_info: # When we find a Window that has BufferControl showing this window, # move to the start of the visible area. pos = ( b.document.translate_row_col_to_index( w.render_info.first_visible_line(after_scroll_offset=True), 0 ) - b.cursor_position ) else: # Otherwise, move to the start of the input. pos = -len(b.document.text_before_cursor) return TextObject(pos, type=TextObjectType.LINEWISE) @text_object("M") def _middle_of_screen(event: E) -> TextObject: """ Moves cursor to the vertical center of the visible region. Implements 'cM', 'dM', 'M'. """ w = event.app.layout.current_window b = event.current_buffer if w and w.render_info: # When we find a Window that has BufferControl showing this window, # move to the center of the visible area. pos = ( b.document.translate_row_col_to_index( w.render_info.center_visible_line(), 0 ) - b.cursor_position ) else: # Otherwise, move to the start of the input. pos = -len(b.document.text_before_cursor) return TextObject(pos, type=TextObjectType.LINEWISE) @text_object("L") def _end_of_screen(event: E) -> TextObject: """ Moves to the end of the visible region. (Above the scroll offset.) """ w = event.app.layout.current_window b = event.current_buffer if w and w.render_info: # When we find a Window that has BufferControl showing this window, # move to the end of the visible area. pos = ( b.document.translate_row_col_to_index( w.render_info.last_visible_line(before_scroll_offset=True), 0 ) - b.cursor_position ) else: # Otherwise, move to the end of the input. pos = len(b.document.text_after_cursor) return TextObject(pos, type=TextObjectType.LINEWISE) @text_object("n", no_move_handler=True) def _search_next(event: E) -> TextObject: """ Search next. """ buff = event.current_buffer search_state = event.app.current_search_state cursor_position = buff.get_search_position( search_state, include_current_position=False, count=event.arg ) return TextObject(cursor_position - buff.cursor_position) @handle("n", filter=vi_navigation_mode) def _search_next2(event: E) -> None: """ Search next in navigation mode. (This goes through the history.) """ search_state = event.app.current_search_state event.current_buffer.apply_search( search_state, include_current_position=False, count=event.arg ) @text_object("N", no_move_handler=True) def _search_previous(event: E) -> TextObject: """ Search previous. """ buff = event.current_buffer search_state = event.app.current_search_state cursor_position = buff.get_search_position( ~search_state, include_current_position=False, count=event.arg ) return TextObject(cursor_position - buff.cursor_position) @handle("N", filter=vi_navigation_mode) def _search_previous2(event: E) -> None: """ Search previous in navigation mode. (This goes through the history.) """ search_state = event.app.current_search_state event.current_buffer.apply_search( ~search_state, include_current_position=False, count=event.arg ) @handle("z", "+", filter=vi_navigation_mode | vi_selection_mode) @handle("z", "t", filter=vi_navigation_mode | vi_selection_mode) @handle("z", "enter", filter=vi_navigation_mode | vi_selection_mode) def _scroll_top(event: E) -> None: """ Scrolls the window to makes the current line the first line in the visible region. """ b = event.current_buffer event.app.layout.current_window.vertical_scroll = b.document.cursor_position_row @handle("z", "-", filter=vi_navigation_mode | vi_selection_mode) @handle("z", "b", filter=vi_navigation_mode | vi_selection_mode) def _scroll_bottom(event: E) -> None: """ Scrolls the window to makes the current line the last line in the visible region. """ # We can safely set the scroll offset to zero; the Window will make # sure that it scrolls at least enough to make the cursor visible # again. event.app.layout.current_window.vertical_scroll = 0 @handle("z", "z", filter=vi_navigation_mode | vi_selection_mode) def _scroll_center(event: E) -> None: """ Center Window vertically around cursor. """ w = event.app.layout.current_window b = event.current_buffer if w and w.render_info: info = w.render_info # Calculate the offset that we need in order to position the row # containing the cursor in the center. scroll_height = info.window_height // 2 y = max(0, b.document.cursor_position_row - 1) height = 0 while y > 0: line_height = info.get_height_for_line(y) if height + line_height < scroll_height: height += line_height y -= 1 else: break w.vertical_scroll = y @text_object("%") def _goto_corresponding_bracket(event: E) -> TextObject: """ Implements 'c%', 'd%', '%, 'y%' (Move to corresponding bracket.) If an 'arg' has been given, go this this % position in the file. """ buffer = event.current_buffer if event._arg: # If 'arg' has been given, the meaning of % is to go to the 'x%' # row in the file. if 0 < event.arg <= 100: absolute_index = buffer.document.translate_row_col_to_index( int((event.arg * buffer.document.line_count - 1) / 100), 0 ) return TextObject( absolute_index - buffer.document.cursor_position, type=TextObjectType.LINEWISE, ) else: return TextObject(0) # Do nothing. else: # Move to the corresponding opening/closing bracket (()'s, []'s and {}'s). match = buffer.document.find_matching_bracket_position() if match: return TextObject(match, type=TextObjectType.INCLUSIVE) else: return TextObject(0) @text_object("|") def _to_column(event: E) -> TextObject: """ Move to the n-th column (you may specify the argument n by typing it on number keys, for example, 20|). """ return TextObject( event.current_buffer.document.get_column_cursor_position(event.arg - 1) ) @text_object("g", "g") def _goto_first_line(event: E) -> TextObject: """ Go to the start of the very first line. Implements 'gg', 'cgg', 'ygg' """ d = event.current_buffer.document if event._arg: # Move to the given line. return TextObject( d.translate_row_col_to_index(event.arg - 1, 0) - d.cursor_position, type=TextObjectType.LINEWISE, ) else: # Move to the top of the input. return TextObject( d.get_start_of_document_position(), type=TextObjectType.LINEWISE ) @text_object("g", "_") def _goto_last_line(event: E) -> TextObject: """ Go to last non-blank of line. 'g_', 'cg_', 'yg_', etc.. """ return TextObject( event.current_buffer.document.last_non_blank_of_current_line_position(), type=TextObjectType.INCLUSIVE, ) @text_object("g", "e") def _ge(event: E) -> TextObject: """ Go to last character of previous word. 'ge', 'cge', 'yge', etc.. """ prev_end = event.current_buffer.document.find_previous_word_ending( count=event.arg ) return TextObject( prev_end - 1 if prev_end is not None else 0, type=TextObjectType.INCLUSIVE ) @text_object("g", "E") def _gE(event: E) -> TextObject: """ Go to last character of previous WORD. 'gE', 'cgE', 'ygE', etc.. """ prev_end = event.current_buffer.document.find_previous_word_ending( count=event.arg, WORD=True ) return TextObject( prev_end - 1 if prev_end is not None else 0, type=TextObjectType.INCLUSIVE ) @text_object("g", "m") def _gm(event: E) -> TextObject: """ Like g0, but half a screenwidth to the right. (Or as much as possible.) """ w = event.app.layout.current_window buff = event.current_buffer if w and w.render_info: width = w.render_info.window_width start = buff.document.get_start_of_line_position(after_whitespace=False) start += int(min(width / 2, len(buff.document.current_line))) return TextObject(start, type=TextObjectType.INCLUSIVE) return TextObject(0) @text_object("G") def _last_line(event: E) -> TextObject: """ Go to the end of the document. (If no arg has been given.) """ buf = event.current_buffer return TextObject( buf.document.translate_row_col_to_index(buf.document.line_count - 1, 0) - buf.cursor_position, type=TextObjectType.LINEWISE, ) # # *** Other *** # @handle("G", filter=has_arg) def _to_nth_history_line(event: E) -> None: """ If an argument is given, move to this line in the history. (for example, 15G) """ event.current_buffer.go_to_history(event.arg - 1) for n in "123456789": @handle( n, filter=vi_navigation_mode | vi_selection_mode | vi_waiting_for_text_object_mode, ) def _arg(event: E) -> None: """ Always handle numberics in navigation mode as arg. """ event.append_to_arg_count(event.data) @handle( "0", filter=( vi_navigation_mode | vi_selection_mode | vi_waiting_for_text_object_mode ) & has_arg, ) def _0_arg(event: E) -> None: """ Zero when an argument was already give. """ event.append_to_arg_count(event.data) @handle(Keys.Any, filter=vi_replace_mode) def _insert_text(event: E) -> None: """ Insert data at cursor position. """ event.current_buffer.insert_text(event.data, overwrite=True) @handle(Keys.Any, filter=vi_replace_single_mode) def _replace_single(event: E) -> None: """ Replace single character at cursor position. """ event.current_buffer.insert_text(event.data, overwrite=True) event.current_buffer.cursor_position -= 1 event.app.vi_state.input_mode = InputMode.NAVIGATION @handle( Keys.Any, filter=vi_insert_multiple_mode, save_before=(lambda e: not e.is_repeat), ) def _insert_text_multiple_cursors(event: E) -> None: """ Insert data at multiple cursor positions at once. (Usually a result of pressing 'I' or 'A' in block-selection mode.) """ buff = event.current_buffer original_text = buff.text # Construct new text. text = [] p = 0 for p2 in buff.multiple_cursor_positions: text.append(original_text[p:p2]) text.append(event.data) p = p2 text.append(original_text[p:]) # Shift all cursor positions. new_cursor_positions = [ pos + i + 1 for i, pos in enumerate(buff.multiple_cursor_positions) ] # Set result. buff.text = "".join(text) buff.multiple_cursor_positions = new_cursor_positions buff.cursor_position += 1 @handle("backspace", filter=vi_insert_multiple_mode) def _delete_before_multiple_cursors(event: E) -> None: """ Backspace, using multiple cursors. """ buff = event.current_buffer original_text = buff.text # Construct new text. deleted_something = False text = [] p = 0 for p2 in buff.multiple_cursor_positions: if p2 > 0 and original_text[p2 - 1] != "\n": # Don't delete across lines. text.append(original_text[p : p2 - 1]) deleted_something = True else: text.append(original_text[p:p2]) p = p2 text.append(original_text[p:]) if deleted_something: # Shift all cursor positions. lengths = [len(part) for part in text[:-1]] new_cursor_positions = list(accumulate(lengths)) # Set result. buff.text = "".join(text) buff.multiple_cursor_positions = new_cursor_positions buff.cursor_position -= 1 else: event.app.output.bell() @handle("delete", filter=vi_insert_multiple_mode) def _delete_after_multiple_cursors(event): """ Delete, using multiple cursors. """ buff = event.current_buffer original_text = buff.text # Construct new text. deleted_something = False text = [] new_cursor_positions = [] p = 0 for p2 in buff.multiple_cursor_positions: text.append(original_text[p:p2]) if p2 >= len(original_text) or original_text[p2] == "\n": # Don't delete across lines. p = p2 else: p = p2 + 1 deleted_something = True text.append(original_text[p:]) if deleted_something: # Shift all cursor positions. lengths = [len(part) for part in text[:-1]] new_cursor_positions = list(accumulate(lengths)) # Set result. buff.text = "".join(text) buff.multiple_cursor_positions = new_cursor_positions else: event.app.output.bell() @handle("left", filter=vi_insert_multiple_mode) def _left_multiple(event: E) -> None: """ Move all cursors to the left. (But keep all cursors on the same line.) """ buff = event.current_buffer new_positions = [] for p in buff.multiple_cursor_positions: if buff.document.translate_index_to_position(p)[1] > 0: p -= 1 new_positions.append(p) buff.multiple_cursor_positions = new_positions if buff.document.cursor_position_col > 0: buff.cursor_position -= 1 @handle("right", filter=vi_insert_multiple_mode) def _right_multiple(event: E) -> None: """ Move all cursors to the right. (But keep all cursors on the same line.) """ buff = event.current_buffer new_positions = [] for p in buff.multiple_cursor_positions: row, column = buff.document.translate_index_to_position(p) if column < len(buff.document.lines[row]): p += 1 new_positions.append(p) buff.multiple_cursor_positions = new_positions if not buff.document.is_cursor_at_the_end_of_line: buff.cursor_position += 1 @handle("up", filter=vi_insert_multiple_mode) @handle("down", filter=vi_insert_multiple_mode) def _updown_multiple(event: E) -> None: """ Ignore all up/down key presses when in multiple cursor mode. """ @handle("c-x", "c-l", filter=vi_insert_mode) def _complete_line(event: E) -> None: """ Pressing the ControlX - ControlL sequence in Vi mode does line completion based on the other lines in the document and the history. """ event.current_buffer.start_history_lines_completion() @handle("c-x", "c-f", filter=vi_insert_mode) def _complete_filename(event: E) -> None: """ Complete file names. """ # TODO pass @handle("c-k", filter=vi_insert_mode | vi_replace_mode) def _digraph(event: E) -> None: """ Go into digraph mode. """ event.app.vi_state.waiting_for_digraph = True @Condition def digraph_symbol_1_given() -> bool: return get_app().vi_state.digraph_symbol1 is not None @handle(Keys.Any, filter=vi_digraph_mode & ~digraph_symbol_1_given) def _digraph1(event: E) -> None: """ First digraph symbol. """ event.app.vi_state.digraph_symbol1 = event.data @handle(Keys.Any, filter=vi_digraph_mode & digraph_symbol_1_given) def _create_digraph(event: E) -> None: """ Insert digraph. """ try: # Lookup. code: Tuple[str, str] = ( event.app.vi_state.digraph_symbol1 or "", event.data, ) if code not in DIGRAPHS: code = code[::-1] # Try reversing. symbol = DIGRAPHS[code] except KeyError: # Unknown digraph. event.app.output.bell() else: # Insert digraph. overwrite = event.app.vi_state.input_mode == InputMode.REPLACE event.current_buffer.insert_text(chr(symbol), overwrite=overwrite) event.app.vi_state.waiting_for_digraph = False finally: event.app.vi_state.waiting_for_digraph = False event.app.vi_state.digraph_symbol1 = None @handle("c-o", filter=vi_insert_mode | vi_replace_mode) def _quick_normal_mode(event: E) -> None: """ Go into normal mode for one single action. """ event.app.vi_state.temporary_navigation_mode = True @handle("q", Keys.Any, filter=vi_navigation_mode & ~vi_recording_macro) def _start_macro(event: E) -> None: """ Start recording macro. """ c = event.key_sequence[1].data if c in vi_register_names: vi_state = event.app.vi_state vi_state.recording_register = c vi_state.current_recording = "" @handle("q", filter=vi_navigation_mode & vi_recording_macro) def _stop_macro(event: E) -> None: """ Stop recording macro. """ vi_state = event.app.vi_state # Store and stop recording. if vi_state.recording_register: vi_state.named_registers[vi_state.recording_register] = ClipboardData( vi_state.current_recording ) vi_state.recording_register = None vi_state.current_recording = "" @handle("@", Keys.Any, filter=vi_navigation_mode, record_in_macro=False) def _execute_macro(event: E) -> None: """ Execute macro. Notice that we pass `record_in_macro=False`. This ensures that the `@x` keys don't appear in the recording itself. This function inserts the body of the called macro back into the KeyProcessor, so these keys will be added later on to the macro of their handlers have `record_in_macro=True`. """ # Retrieve macro. c = event.key_sequence[1].data try: macro = event.app.vi_state.named_registers[c] except KeyError: return # Expand macro (which is a string in the register), in individual keys. # Use vt100 parser for this. keys: List[KeyPress] = [] parser = Vt100Parser(keys.append) parser.feed(macro.text) parser.flush() # Now feed keys back to the input processor. for _ in range(event.arg): event.app.key_processor.feed_multiple(keys, first=True) return ConditionalKeyBindings(key_bindings, vi_mode) def load_vi_search_bindings() -> KeyBindingsBase: key_bindings = KeyBindings() handle = key_bindings.add from . import search @Condition def search_buffer_is_empty() -> bool: " Returns True when the search buffer is empty. " return get_app().current_buffer.text == "" # Vi-style forward search. handle( "/", filter=(vi_navigation_mode | vi_selection_mode) & ~vi_search_direction_reversed, )(search.start_forward_incremental_search) handle( "?", filter=(vi_navigation_mode | vi_selection_mode) & vi_search_direction_reversed, )(search.start_forward_incremental_search) handle("c-s")(search.start_forward_incremental_search) # Vi-style backward search. handle( "?", filter=(vi_navigation_mode | vi_selection_mode) & ~vi_search_direction_reversed, )(search.start_reverse_incremental_search) handle( "/", filter=(vi_navigation_mode | vi_selection_mode) & vi_search_direction_reversed, )(search.start_reverse_incremental_search) handle("c-r")(search.start_reverse_incremental_search) # Apply the search. (At the / or ? prompt.) handle("enter", filter=is_searching)(search.accept_search) handle("c-r", filter=is_searching)(search.reverse_incremental_search) handle("c-s", filter=is_searching)(search.forward_incremental_search) handle("c-c")(search.abort_search) handle("c-g")(search.abort_search) handle("backspace", filter=search_buffer_is_empty)(search.abort_search) # Handle escape. This should accept the search, just like readline. # `abort_search` would be a meaningful alternative. handle("escape")(search.accept_search) return ConditionalKeyBindings(key_bindings, vi_mode)
codeparrot/github-code-clean
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ This module contains classes that help to emulate xcodebuild behavior on top of other build systems, such as make and ninja. """ from __future__ import print_function import copy import gyp.common import os import os.path import re import shlex import subprocess import sys from gyp.common import GypError PY3 = bytes != str # Populated lazily by XcodeVersion, for efficiency, and to fix an issue when # "xcodebuild" is called too quickly (it has been found to return incorrect # version number). XCODE_VERSION_CACHE = None # Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance # corresponding to the installed version of Xcode. XCODE_ARCHS_DEFAULT_CACHE = None def XcodeArchsVariableMapping(archs, archs_including_64_bit=None): """Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable, and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT).""" mapping = {"$(ARCHS_STANDARD)": archs} if archs_including_64_bit: mapping["$(ARCHS_STANDARD_INCLUDING_64_BIT)"] = archs_including_64_bit return mapping class XcodeArchsDefault(object): """A class to resolve ARCHS variable from xcode_settings, resolving Xcode macros and implementing filtering by VALID_ARCHS. The expansion of macros depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and on the version of Xcode. """ # Match variable like $(ARCHS_STANDARD). variable_pattern = re.compile(r"\$\([a-zA-Z_][a-zA-Z0-9_]*\)$") def __init__(self, default, mac, iphonesimulator, iphoneos): self._default = (default,) self._archs = {"mac": mac, "ios": iphoneos, "iossim": iphonesimulator} def _VariableMapping(self, sdkroot): """Returns the dictionary of variable mapping depending on the SDKROOT.""" sdkroot = sdkroot.lower() if "iphoneos" in sdkroot: return self._archs["ios"] elif "iphonesimulator" in sdkroot: return self._archs["iossim"] else: return self._archs["mac"] def _ExpandArchs(self, archs, sdkroot): """Expands variables references in ARCHS, and remove duplicates.""" variable_mapping = self._VariableMapping(sdkroot) expanded_archs = [] for arch in archs: if self.variable_pattern.match(arch): variable = arch try: variable_expansion = variable_mapping[variable] for arch in variable_expansion: if arch not in expanded_archs: expanded_archs.append(arch) except KeyError: print('Warning: Ignoring unsupported variable "%s".' % variable) elif arch not in expanded_archs: expanded_archs.append(arch) return expanded_archs def ActiveArchs(self, archs, valid_archs, sdkroot): """Expands variables references in ARCHS, and filter by VALID_ARCHS if it is defined (if not set, Xcode accept any value in ARCHS, otherwise, only values present in VALID_ARCHS are kept).""" expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or "") if valid_archs: filtered_archs = [] for arch in expanded_archs: if arch in valid_archs: filtered_archs.append(arch) expanded_archs = filtered_archs return expanded_archs def GetXcodeArchsDefault(): """Returns the |XcodeArchsDefault| object to use to expand ARCHS for the installed version of Xcode. The default values used by Xcode for ARCHS and the expansion of the variables depends on the version of Xcode used. For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses $(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0 and deprecated with Xcode 5.1. For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit architecture as part of $(ARCHS_STANDARD) and default to only building it. For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they are also part of $(ARCHS_STANDARD). All these rules are coded in the construction of the |XcodeArchsDefault| object to use depending on the version of Xcode detected. The object is for performance reason.""" global XCODE_ARCHS_DEFAULT_CACHE if XCODE_ARCHS_DEFAULT_CACHE: return XCODE_ARCHS_DEFAULT_CACHE xcode_version, _ = XcodeVersion() if xcode_version < "0500": XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault( "$(ARCHS_STANDARD)", XcodeArchsVariableMapping(["i386"]), XcodeArchsVariableMapping(["i386"]), XcodeArchsVariableMapping(["armv7"]), ) elif xcode_version < "0510": XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault( "$(ARCHS_STANDARD_INCLUDING_64_BIT)", XcodeArchsVariableMapping(["x86_64"], ["x86_64"]), XcodeArchsVariableMapping(["i386"], ["i386", "x86_64"]), XcodeArchsVariableMapping( ["armv7", "armv7s"], ["armv7", "armv7s", "arm64"] ), ) else: XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault( "$(ARCHS_STANDARD)", XcodeArchsVariableMapping(["x86_64"], ["x86_64"]), XcodeArchsVariableMapping(["i386", "x86_64"], ["i386", "x86_64"]), XcodeArchsVariableMapping( ["armv7", "armv7s", "arm64"], ["armv7", "armv7s", "arm64"] ), ) return XCODE_ARCHS_DEFAULT_CACHE class XcodeSettings(object): """A class that understands the gyp 'xcode_settings' object.""" # Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached # at class-level for efficiency. _sdk_path_cache = {} _platform_path_cache = {} _sdk_root_cache = {} # Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so # cached at class-level for efficiency. _plist_cache = {} # Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so # cached at class-level for efficiency. _codesigning_key_cache = {} def __init__(self, spec): self.spec = spec self.isIOS = False self.mac_toolchain_dir = None self.header_map_path = None # Per-target 'xcode_settings' are pushed down into configs earlier by gyp. # This means self.xcode_settings[config] always contains all settings # for that config -- the per-target settings as well. Settings that are # the same for all configs are implicitly per-target settings. self.xcode_settings = {} configs = spec["configurations"] for configname, config in configs.items(): self.xcode_settings[configname] = config.get("xcode_settings", {}) self._ConvertConditionalKeys(configname) if self.xcode_settings[configname].get("IPHONEOS_DEPLOYMENT_TARGET", None): self.isIOS = True # This is only non-None temporarily during the execution of some methods. self.configname = None # Used by _AdjustLibrary to match .a and .dylib entries in libraries. self.library_re = re.compile(r"^lib([^/]+)\.(a|dylib)$") def _ConvertConditionalKeys(self, configname): """Converts or warns on conditional keys. Xcode supports conditional keys, such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation with some keys converted while the rest force a warning.""" settings = self.xcode_settings[configname] conditional_keys = [key for key in settings if key.endswith("]")] for key in conditional_keys: # If you need more, speak up at http://crbug.com/122592 if key.endswith("[sdk=iphoneos*]"): if configname.endswith("iphoneos"): new_key = key.split("[")[0] settings[new_key] = settings[key] else: print( "Warning: Conditional keys not implemented, ignoring:", " ".join(conditional_keys), ) del settings[key] def _Settings(self): assert self.configname return self.xcode_settings[self.configname] def _Test(self, test_key, cond_key, default): return self._Settings().get(test_key, default) == cond_key def _Appendf(self, lst, test_key, format_str, default=None): if test_key in self._Settings(): lst.append(format_str % str(self._Settings()[test_key])) elif default: lst.append(format_str % str(default)) def _WarnUnimplemented(self, test_key): if test_key in self._Settings(): print('Warning: Ignoring not yet implemented key "%s".' % test_key) def IsBinaryOutputFormat(self, configname): default = "binary" if self.isIOS else "xml" format = self.xcode_settings[configname].get("INFOPLIST_OUTPUT_FORMAT", default) return format == "binary" def IsIosFramework(self): return self.spec["type"] == "shared_library" and self._IsBundle() and self.isIOS def _IsBundle(self): return ( int(self.spec.get("mac_bundle", 0)) != 0 or self._IsXCTest() or self._IsXCUiTest() ) def _IsXCTest(self): return int(self.spec.get("mac_xctest_bundle", 0)) != 0 def _IsXCUiTest(self): return int(self.spec.get("mac_xcuitest_bundle", 0)) != 0 def _IsIosAppExtension(self): return int(self.spec.get("ios_app_extension", 0)) != 0 def _IsIosWatchKitExtension(self): return int(self.spec.get("ios_watchkit_extension", 0)) != 0 def _IsIosWatchApp(self): return int(self.spec.get("ios_watch_app", 0)) != 0 def GetFrameworkVersion(self): """Returns the framework version of the current target. Only valid for bundles.""" assert self._IsBundle() return self.GetPerTargetSetting("FRAMEWORK_VERSION", default="A") def GetWrapperExtension(self): """Returns the bundle extension (.app, .framework, .plugin, etc). Only valid for bundles.""" assert self._IsBundle() if self.spec["type"] in ("loadable_module", "shared_library"): default_wrapper_extension = { "loadable_module": "bundle", "shared_library": "framework", }[self.spec["type"]] wrapper_extension = self.GetPerTargetSetting( "WRAPPER_EXTENSION", default=default_wrapper_extension ) return "." + self.spec.get("product_extension", wrapper_extension) elif self.spec["type"] == "executable": if self._IsIosAppExtension() or self._IsIosWatchKitExtension(): return "." + self.spec.get("product_extension", "appex") else: return "." + self.spec.get("product_extension", "app") else: assert False, "Don't know extension for '%s', target '%s'" % ( self.spec["type"], self.spec["target_name"], ) def GetProductName(self): """Returns PRODUCT_NAME.""" return self.spec.get("product_name", self.spec["target_name"]) def GetFullProductName(self): """Returns FULL_PRODUCT_NAME.""" if self._IsBundle(): return self.GetWrapperName() else: return self._GetStandaloneBinaryPath() def GetWrapperName(self): """Returns the directory name of the bundle represented by this target. Only valid for bundles.""" assert self._IsBundle() return self.GetProductName() + self.GetWrapperExtension() def GetBundleContentsFolderPath(self): """Returns the qualified path to the bundle's contents folder. E.g. Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles.""" if self.isIOS: return self.GetWrapperName() assert self._IsBundle() if self.spec["type"] == "shared_library": return os.path.join( self.GetWrapperName(), "Versions", self.GetFrameworkVersion() ) else: # loadable_modules have a 'Contents' folder like executables. return os.path.join(self.GetWrapperName(), "Contents") def GetBundleResourceFolder(self): """Returns the qualified path to the bundle's resource folder. E.g. Chromium.app/Contents/Resources. Only valid for bundles.""" assert self._IsBundle() if self.isIOS: return self.GetBundleContentsFolderPath() return os.path.join(self.GetBundleContentsFolderPath(), "Resources") def GetBundleExecutableFolderPath(self): """Returns the qualified path to the bundle's executables folder. E.g. Chromium.app/Contents/MacOS. Only valid for bundles.""" assert self._IsBundle() if self.spec["type"] in ("shared_library") or self.isIOS: return self.GetBundleContentsFolderPath() elif self.spec["type"] in ("executable", "loadable_module"): return os.path.join(self.GetBundleContentsFolderPath(), "MacOS") def GetBundleJavaFolderPath(self): """Returns the qualified path to the bundle's Java resource folder. E.g. Chromium.app/Contents/Resources/Java. Only valid for bundles.""" assert self._IsBundle() return os.path.join(self.GetBundleResourceFolder(), "Java") def GetBundleFrameworksFolderPath(self): """Returns the qualified path to the bundle's frameworks folder. E.g, Chromium.app/Contents/Frameworks. Only valid for bundles.""" assert self._IsBundle() return os.path.join(self.GetBundleContentsFolderPath(), "Frameworks") def GetBundleSharedFrameworksFolderPath(self): """Returns the qualified path to the bundle's frameworks folder. E.g, Chromium.app/Contents/SharedFrameworks. Only valid for bundles.""" assert self._IsBundle() return os.path.join(self.GetBundleContentsFolderPath(), "SharedFrameworks") def GetBundleSharedSupportFolderPath(self): """Returns the qualified path to the bundle's shared support folder. E.g, Chromium.app/Contents/SharedSupport. Only valid for bundles.""" assert self._IsBundle() if self.spec["type"] == "shared_library": return self.GetBundleResourceFolder() else: return os.path.join(self.GetBundleContentsFolderPath(), "SharedSupport") def GetBundlePlugInsFolderPath(self): """Returns the qualified path to the bundle's plugins folder. E.g, Chromium.app/Contents/PlugIns. Only valid for bundles.""" assert self._IsBundle() return os.path.join(self.GetBundleContentsFolderPath(), "PlugIns") def GetBundleXPCServicesFolderPath(self): """Returns the qualified path to the bundle's XPC services folder. E.g, Chromium.app/Contents/XPCServices. Only valid for bundles.""" assert self._IsBundle() return os.path.join(self.GetBundleContentsFolderPath(), "XPCServices") def GetBundlePlistPath(self): """Returns the qualified path to the bundle's plist file. E.g. Chromium.app/Contents/Info.plist. Only valid for bundles.""" assert self._IsBundle() if ( self.spec["type"] in ("executable", "loadable_module") or self.IsIosFramework() ): return os.path.join(self.GetBundleContentsFolderPath(), "Info.plist") else: return os.path.join( self.GetBundleContentsFolderPath(), "Resources", "Info.plist" ) def GetProductType(self): """Returns the PRODUCT_TYPE of this target.""" if self._IsIosAppExtension(): assert self._IsBundle(), ( "ios_app_extension flag requires mac_bundle " "(target %s)" % self.spec["target_name"] ) return "com.apple.product-type.app-extension" if self._IsIosWatchKitExtension(): assert self._IsBundle(), ( "ios_watchkit_extension flag requires " "mac_bundle (target %s)" % self.spec["target_name"] ) return "com.apple.product-type.watchkit-extension" if self._IsIosWatchApp(): assert self._IsBundle(), ( "ios_watch_app flag requires mac_bundle " "(target %s)" % self.spec["target_name"] ) return "com.apple.product-type.application.watchapp" if self._IsXCUiTest(): assert self._IsBundle(), ( "mac_xcuitest_bundle flag requires mac_bundle " "(target %s)" % self.spec["target_name"] ) return "com.apple.product-type.bundle.ui-testing" if self._IsBundle(): return { "executable": "com.apple.product-type.application", "loadable_module": "com.apple.product-type.bundle", "shared_library": "com.apple.product-type.framework", }[self.spec["type"]] else: return { "executable": "com.apple.product-type.tool", "loadable_module": "com.apple.product-type.library.dynamic", "shared_library": "com.apple.product-type.library.dynamic", "static_library": "com.apple.product-type.library.static", }[self.spec["type"]] def GetMachOType(self): """Returns the MACH_O_TYPE of this target.""" # Weird, but matches Xcode. if not self._IsBundle() and self.spec["type"] == "executable": return "" return { "executable": "mh_execute", "static_library": "staticlib", "shared_library": "mh_dylib", "loadable_module": "mh_bundle", }[self.spec["type"]] def _GetBundleBinaryPath(self): """Returns the name of the bundle binary of by this target. E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles.""" assert self._IsBundle() return os.path.join( self.GetBundleExecutableFolderPath(), self.GetExecutableName() ) def _GetStandaloneExecutableSuffix(self): if "product_extension" in self.spec: return "." + self.spec["product_extension"] return { "executable": "", "static_library": ".a", "shared_library": ".dylib", "loadable_module": ".so", }[self.spec["type"]] def _GetStandaloneExecutablePrefix(self): return self.spec.get( "product_prefix", { "executable": "", "static_library": "lib", "shared_library": "lib", # Non-bundled loadable_modules are called foo.so for some reason # (that is, .so and no prefix) with the xcode build -- match that. "loadable_module": "", }[self.spec["type"]], ) def _GetStandaloneBinaryPath(self): """Returns the name of the non-bundle binary represented by this target. E.g. hello_world. Only valid for non-bundles.""" assert not self._IsBundle() assert self.spec["type"] in ( "executable", "shared_library", "static_library", "loadable_module", ), ("Unexpected type %s" % self.spec["type"]) target = self.spec["target_name"] if self.spec["type"] == "static_library": if target[:3] == "lib": target = target[3:] elif self.spec["type"] in ("loadable_module", "shared_library"): if target[:3] == "lib": target = target[3:] target_prefix = self._GetStandaloneExecutablePrefix() target = self.spec.get("product_name", target) target_ext = self._GetStandaloneExecutableSuffix() return target_prefix + target + target_ext def GetExecutableName(self): """Returns the executable name of the bundle represented by this target. E.g. Chromium.""" if self._IsBundle(): return self.spec.get("product_name", self.spec["target_name"]) else: return self._GetStandaloneBinaryPath() def GetExecutablePath(self): """Returns the qualified path to the primary executable of the bundle represented by this target. E.g. Chromium.app/Contents/MacOS/Chromium.""" if self._IsBundle(): return self._GetBundleBinaryPath() else: return self._GetStandaloneBinaryPath() def GetActiveArchs(self, configname): """Returns the architectures this target should be built for.""" config_settings = self.xcode_settings[configname] xcode_archs_default = GetXcodeArchsDefault() return xcode_archs_default.ActiveArchs( config_settings.get("ARCHS"), config_settings.get("VALID_ARCHS"), config_settings.get("SDKROOT"), ) def _GetSdkVersionInfoItem(self, sdk, infoitem): # xcodebuild requires Xcode and can't run on Command Line Tools-only # systems from 10.7 onward. # Since the CLT has no SDK paths anyway, returning None is the # most sensible route and should still do the right thing. try: return GetStdoutQuiet(["xcrun", "--sdk", sdk, infoitem]) except GypError: pass def _SdkRoot(self, configname): if configname is None: configname = self.configname return self.GetPerConfigSetting("SDKROOT", configname, default="") def _XcodePlatformPath(self, configname=None): sdk_root = self._SdkRoot(configname) if sdk_root not in XcodeSettings._platform_path_cache: platform_path = self._GetSdkVersionInfoItem( sdk_root, "--show-sdk-platform-path" ) XcodeSettings._platform_path_cache[sdk_root] = platform_path return XcodeSettings._platform_path_cache[sdk_root] def _SdkPath(self, configname=None): sdk_root = self._SdkRoot(configname) if sdk_root.startswith("/"): return sdk_root return self._XcodeSdkPath(sdk_root) def _XcodeSdkPath(self, sdk_root): if sdk_root not in XcodeSettings._sdk_path_cache: sdk_path = self._GetSdkVersionInfoItem(sdk_root, "--show-sdk-path") XcodeSettings._sdk_path_cache[sdk_root] = sdk_path if sdk_root: XcodeSettings._sdk_root_cache[sdk_path] = sdk_root return XcodeSettings._sdk_path_cache[sdk_root] def _AppendPlatformVersionMinFlags(self, lst): self._Appendf(lst, "MACOSX_DEPLOYMENT_TARGET", "-mmacosx-version-min=%s") if "IPHONEOS_DEPLOYMENT_TARGET" in self._Settings(): # TODO: Implement this better? sdk_path_basename = os.path.basename(self._SdkPath()) if sdk_path_basename.lower().startswith("iphonesimulator"): self._Appendf( lst, "IPHONEOS_DEPLOYMENT_TARGET", "-mios-simulator-version-min=%s" ) else: self._Appendf( lst, "IPHONEOS_DEPLOYMENT_TARGET", "-miphoneos-version-min=%s" ) def GetCflags(self, configname, arch=None): """Returns flags that need to be added to .c, .cc, .m, and .mm compilations.""" # This functions (and the similar ones below) do not offer complete # emulation of all xcode_settings keys. They're implemented on demand. self.configname = configname cflags = [] sdk_root = self._SdkPath() if "SDKROOT" in self._Settings() and sdk_root: cflags.append("-isysroot %s" % sdk_root) if self.header_map_path: cflags.append("-I%s" % self.header_map_path) if self._Test("CLANG_WARN_CONSTANT_CONVERSION", "YES", default="NO"): cflags.append("-Wconstant-conversion") if self._Test("GCC_CHAR_IS_UNSIGNED_CHAR", "YES", default="NO"): cflags.append("-funsigned-char") if self._Test("GCC_CW_ASM_SYNTAX", "YES", default="YES"): cflags.append("-fasm-blocks") if "GCC_DYNAMIC_NO_PIC" in self._Settings(): if self._Settings()["GCC_DYNAMIC_NO_PIC"] == "YES": cflags.append("-mdynamic-no-pic") else: pass # TODO: In this case, it depends on the target. xcode passes # mdynamic-no-pic by default for executable and possibly static lib # according to mento if self._Test("GCC_ENABLE_PASCAL_STRINGS", "YES", default="YES"): cflags.append("-mpascal-strings") self._Appendf(cflags, "GCC_OPTIMIZATION_LEVEL", "-O%s", default="s") if self._Test("GCC_GENERATE_DEBUGGING_SYMBOLS", "YES", default="YES"): dbg_format = self._Settings().get("DEBUG_INFORMATION_FORMAT", "dwarf") if dbg_format == "dwarf": cflags.append("-gdwarf-2") elif dbg_format == "stabs": raise NotImplementedError("stabs debug format is not supported yet.") elif dbg_format == "dwarf-with-dsym": cflags.append("-gdwarf-2") else: raise NotImplementedError("Unknown debug format %s" % dbg_format) if self._Settings().get("GCC_STRICT_ALIASING") == "YES": cflags.append("-fstrict-aliasing") elif self._Settings().get("GCC_STRICT_ALIASING") == "NO": cflags.append("-fno-strict-aliasing") if self._Test("GCC_SYMBOLS_PRIVATE_EXTERN", "YES", default="NO"): cflags.append("-fvisibility=hidden") if self._Test("GCC_TREAT_WARNINGS_AS_ERRORS", "YES", default="NO"): cflags.append("-Werror") if self._Test("GCC_WARN_ABOUT_MISSING_NEWLINE", "YES", default="NO"): cflags.append("-Wnewline-eof") # In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or # llvm-gcc. It also requires a fairly recent libtool, and # if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the # path to the libLTO.dylib that matches the used clang. if self._Test("LLVM_LTO", "YES", default="NO"): cflags.append("-flto") self._AppendPlatformVersionMinFlags(cflags) # TODO: if self._Test("COPY_PHASE_STRIP", "YES", default="NO"): self._WarnUnimplemented("COPY_PHASE_STRIP") self._WarnUnimplemented("GCC_DEBUGGING_SYMBOLS") self._WarnUnimplemented("GCC_ENABLE_OBJC_EXCEPTIONS") # TODO: This is exported correctly, but assigning to it is not supported. self._WarnUnimplemented("MACH_O_TYPE") self._WarnUnimplemented("PRODUCT_TYPE") # If GYP_CROSSCOMPILE (--cross-compiling), disable architecture-specific # additions and assume these will be provided as required via CC_host, # CXX_host, CC_target and CXX_target. if not gyp.common.CrossCompileRequested(): if arch is not None: archs = [arch] else: assert self.configname archs = self.GetActiveArchs(self.configname) if len(archs) != 1: # TODO: Supporting fat binaries will be annoying. self._WarnUnimplemented("ARCHS") archs = ["i386"] cflags.append("-arch " + archs[0]) if archs[0] in ("i386", "x86_64"): if self._Test("GCC_ENABLE_SSE3_EXTENSIONS", "YES", default="NO"): cflags.append("-msse3") if self._Test( "GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS", "YES", default="NO" ): cflags.append("-mssse3") # Note 3rd 's'. if self._Test("GCC_ENABLE_SSE41_EXTENSIONS", "YES", default="NO"): cflags.append("-msse4.1") if self._Test("GCC_ENABLE_SSE42_EXTENSIONS", "YES", default="NO"): cflags.append("-msse4.2") cflags += self._Settings().get("WARNING_CFLAGS", []) if self._IsXCTest(): platform_root = self._XcodePlatformPath(configname) if platform_root: cflags.append("-F" + platform_root + "/Developer/Library/Frameworks/") if sdk_root: framework_root = sdk_root else: framework_root = "" config = self.spec["configurations"][self.configname] framework_dirs = config.get("mac_framework_dirs", []) for directory in framework_dirs: cflags.append("-F" + directory.replace("$(SDKROOT)", framework_root)) self.configname = None return cflags def GetCflagsC(self, configname): """Returns flags that need to be added to .c, and .m compilations.""" self.configname = configname cflags_c = [] if self._Settings().get("GCC_C_LANGUAGE_STANDARD", "") == "ansi": cflags_c.append("-ansi") else: self._Appendf(cflags_c, "GCC_C_LANGUAGE_STANDARD", "-std=%s") cflags_c += self._Settings().get("OTHER_CFLAGS", []) self.configname = None return cflags_c def GetCflagsCC(self, configname): """Returns flags that need to be added to .cc, and .mm compilations.""" self.configname = configname cflags_cc = [] clang_cxx_language_standard = self._Settings().get( "CLANG_CXX_LANGUAGE_STANDARD" ) # Note: Don't make c++0x to c++11 so that c++0x can be used with older # clangs that don't understand c++11 yet (like Xcode 4.2's). if clang_cxx_language_standard: cflags_cc.append("-std=%s" % clang_cxx_language_standard) self._Appendf(cflags_cc, "CLANG_CXX_LIBRARY", "-stdlib=%s") if self._Test("GCC_ENABLE_CPP_RTTI", "NO", default="YES"): cflags_cc.append("-fno-rtti") if self._Test("GCC_ENABLE_CPP_EXCEPTIONS", "NO", default="YES"): cflags_cc.append("-fno-exceptions") if self._Test("GCC_INLINES_ARE_PRIVATE_EXTERN", "YES", default="NO"): cflags_cc.append("-fvisibility-inlines-hidden") if self._Test("GCC_THREADSAFE_STATICS", "NO", default="YES"): cflags_cc.append("-fno-threadsafe-statics") # Note: This flag is a no-op for clang, it only has an effect for gcc. if self._Test("GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO", "NO", default="YES"): cflags_cc.append("-Wno-invalid-offsetof") other_ccflags = [] for flag in self._Settings().get("OTHER_CPLUSPLUSFLAGS", ["$(inherited)"]): # TODO: More general variable expansion. Missing in many other places too. if flag in ("$inherited", "$(inherited)", "${inherited}"): flag = "$OTHER_CFLAGS" if flag in ("$OTHER_CFLAGS", "$(OTHER_CFLAGS)", "${OTHER_CFLAGS}"): other_ccflags += self._Settings().get("OTHER_CFLAGS", []) else: other_ccflags.append(flag) cflags_cc += other_ccflags self.configname = None return cflags_cc def _AddObjectiveCGarbageCollectionFlags(self, flags): gc_policy = self._Settings().get("GCC_ENABLE_OBJC_GC", "unsupported") if gc_policy == "supported": flags.append("-fobjc-gc") elif gc_policy == "required": flags.append("-fobjc-gc-only") def _AddObjectiveCARCFlags(self, flags): if self._Test("CLANG_ENABLE_OBJC_ARC", "YES", default="NO"): flags.append("-fobjc-arc") def _AddObjectiveCMissingPropertySynthesisFlags(self, flags): if self._Test( "CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS", "YES", default="NO" ): flags.append("-Wobjc-missing-property-synthesis") def GetCflagsObjC(self, configname): """Returns flags that need to be added to .m compilations.""" self.configname = configname cflags_objc = [] self._AddObjectiveCGarbageCollectionFlags(cflags_objc) self._AddObjectiveCARCFlags(cflags_objc) self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc) self.configname = None return cflags_objc def GetCflagsObjCC(self, configname): """Returns flags that need to be added to .mm compilations.""" self.configname = configname cflags_objcc = [] self._AddObjectiveCGarbageCollectionFlags(cflags_objcc) self._AddObjectiveCARCFlags(cflags_objcc) self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc) if self._Test("GCC_OBJC_CALL_CXX_CDTORS", "YES", default="NO"): cflags_objcc.append("-fobjc-call-cxx-cdtors") self.configname = None return cflags_objcc def GetInstallNameBase(self): """Return DYLIB_INSTALL_NAME_BASE for this target.""" # Xcode sets this for shared_libraries, and for nonbundled loadable_modules. if self.spec["type"] != "shared_library" and ( self.spec["type"] != "loadable_module" or self._IsBundle() ): return None install_base = self.GetPerTargetSetting( "DYLIB_INSTALL_NAME_BASE", default="/Library/Frameworks" if self._IsBundle() else "/usr/local/lib", ) return install_base def _StandardizePath(self, path): """Do :standardizepath processing for path.""" # I'm not quite sure what :standardizepath does. Just call normpath(), # but don't let @executable_path/../foo collapse to foo. if "/" in path: prefix, rest = "", path if path.startswith("@"): prefix, rest = path.split("/", 1) rest = os.path.normpath(rest) # :standardizepath path = os.path.join(prefix, rest) return path def GetInstallName(self): """Return LD_DYLIB_INSTALL_NAME for this target.""" # Xcode sets this for shared_libraries, and for nonbundled loadable_modules. if self.spec["type"] != "shared_library" and ( self.spec["type"] != "loadable_module" or self._IsBundle() ): return None default_install_name = ( "$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)" ) install_name = self.GetPerTargetSetting( "LD_DYLIB_INSTALL_NAME", default=default_install_name ) # Hardcode support for the variables used in chromium for now, to # unblock people using the make build. if "$" in install_name: assert install_name in ( "$(DYLIB_INSTALL_NAME_BASE:standardizepath)/" "$(WRAPPER_NAME)/$(PRODUCT_NAME)", default_install_name, ), ( "Variables in LD_DYLIB_INSTALL_NAME are not generally supported " "yet in target '%s' (got '%s')" % (self.spec["target_name"], install_name) ) install_name = install_name.replace( "$(DYLIB_INSTALL_NAME_BASE:standardizepath)", self._StandardizePath(self.GetInstallNameBase()), ) if self._IsBundle(): # These are only valid for bundles, hence the |if|. install_name = install_name.replace( "$(WRAPPER_NAME)", self.GetWrapperName() ) install_name = install_name.replace( "$(PRODUCT_NAME)", self.GetProductName() ) else: assert "$(WRAPPER_NAME)" not in install_name assert "$(PRODUCT_NAME)" not in install_name install_name = install_name.replace( "$(EXECUTABLE_PATH)", self.GetExecutablePath() ) return install_name def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path): """Checks if ldflag contains a filename and if so remaps it from gyp-directory-relative to build-directory-relative.""" # This list is expanded on demand. # They get matched as: # -exported_symbols_list file # -Wl,exported_symbols_list file # -Wl,exported_symbols_list,file LINKER_FILE = r"(\S+)" WORD = r"\S+" linker_flags = [ ["-exported_symbols_list", LINKER_FILE], # Needed for NaCl. ["-unexported_symbols_list", LINKER_FILE], ["-reexported_symbols_list", LINKER_FILE], ["-sectcreate", WORD, WORD, LINKER_FILE], # Needed for remoting. ] for flag_pattern in linker_flags: regex = re.compile("(?:-Wl,)?" + "[ ,]".join(flag_pattern)) m = regex.match(ldflag) if m: ldflag = ( ldflag[: m.start(1)] + gyp_to_build_path(m.group(1)) + ldflag[m.end(1) :] ) # Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS, # TODO(thakis): Update ffmpeg.gyp): if ldflag.startswith("-L"): ldflag = "-L" + gyp_to_build_path(ldflag[len("-L") :]) return ldflag def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None): """Returns flags that need to be passed to the linker. Args: configname: The name of the configuration to get ld flags for. product_dir: The directory where products such static and dynamic libraries are placed. This is added to the library search path. gyp_to_build_path: A function that converts paths relative to the current gyp file to paths relative to the build directory. """ self.configname = configname ldflags = [] # The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS # can contain entries that depend on this. Explicitly absolutify these. for ldflag in self._Settings().get("OTHER_LDFLAGS", []): ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path)) if self._Test("DEAD_CODE_STRIPPING", "YES", default="NO"): ldflags.append("-Wl,-dead_strip") if self._Test("PREBINDING", "YES", default="NO"): ldflags.append("-Wl,-prebind") self._Appendf( ldflags, "DYLIB_COMPATIBILITY_VERSION", "-compatibility_version %s" ) self._Appendf(ldflags, "DYLIB_CURRENT_VERSION", "-current_version %s") self._AppendPlatformVersionMinFlags(ldflags) if "SDKROOT" in self._Settings() and self._SdkPath(): ldflags.append("-isysroot " + self._SdkPath()) for library_path in self._Settings().get("LIBRARY_SEARCH_PATHS", []): ldflags.append("-L" + gyp_to_build_path(library_path)) if "ORDER_FILE" in self._Settings(): ldflags.append( "-Wl,-order_file " + "-Wl," + gyp_to_build_path(self._Settings()["ORDER_FILE"]) ) if not gyp.common.CrossCompileRequested(): if arch is not None: archs = [arch] else: assert self.configname archs = self.GetActiveArchs(self.configname) if len(archs) != 1: # TODO: Supporting fat binaries will be annoying. self._WarnUnimplemented("ARCHS") archs = ["i386"] ldflags.append("-arch " + archs[0]) # Xcode adds the product directory by default. # Rewrite -L. to -L./ to work around http://www.openradar.me/25313838 ldflags.append("-L" + (product_dir if product_dir != "." else "./")) install_name = self.GetInstallName() if install_name and self.spec["type"] != "loadable_module": ldflags.append("-install_name " + install_name.replace(" ", r"\ ")) for rpath in self._Settings().get("LD_RUNPATH_SEARCH_PATHS", []): ldflags.append("-Wl,-rpath," + rpath) sdk_root = self._SdkPath() if not sdk_root: sdk_root = "" config = self.spec["configurations"][self.configname] framework_dirs = config.get("mac_framework_dirs", []) for directory in framework_dirs: ldflags.append("-F" + directory.replace("$(SDKROOT)", sdk_root)) if self._IsXCTest(): platform_root = self._XcodePlatformPath(configname) if sdk_root and platform_root: ldflags.append("-F" + platform_root + "/Developer/Library/Frameworks/") ldflags.append("-framework XCTest") is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension() if sdk_root and is_extension: # Adds the link flags for extensions. These flags are common for all # extensions and provide loader and main function. # These flags reflect the compilation options used by xcode to compile # extensions. xcode_version, _ = XcodeVersion() if xcode_version < "0900": ldflags.append("-lpkstart") ldflags.append( sdk_root + "/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit" ) else: ldflags.append("-e _NSExtensionMain") ldflags.append("-fapplication-extension") self._Appendf(ldflags, "CLANG_CXX_LIBRARY", "-stdlib=%s") self.configname = None return ldflags def GetLibtoolflags(self, configname): """Returns flags that need to be passed to the static linker. Args: configname: The name of the configuration to get ld flags for. """ self.configname = configname libtoolflags = [] for libtoolflag in self._Settings().get("OTHER_LDFLAGS", []): libtoolflags.append(libtoolflag) # TODO(thakis): ARCHS? self.configname = None return libtoolflags def GetPerTargetSettings(self): """Gets a list of all the per-target settings. This will only fetch keys whose values are the same across all configurations.""" first_pass = True result = {} for configname in sorted(self.xcode_settings.keys()): if first_pass: result = dict(self.xcode_settings[configname]) first_pass = False else: for key, value in self.xcode_settings[configname].items(): if key not in result: continue elif result[key] != value: del result[key] return result def GetPerConfigSetting(self, setting, configname, default=None): if configname in self.xcode_settings: return self.xcode_settings[configname].get(setting, default) else: return self.GetPerTargetSetting(setting, default) def GetPerTargetSetting(self, setting, default=None): """Tries to get xcode_settings.setting from spec. Assumes that the setting has the same value in all configurations and throws otherwise.""" is_first_pass = True result = None for configname in sorted(self.xcode_settings.keys()): if is_first_pass: result = self.xcode_settings[configname].get(setting, None) is_first_pass = False else: assert result == self.xcode_settings[configname].get(setting, None), ( "Expected per-target setting for '%s', got per-config setting " "(target %s)" % (setting, self.spec["target_name"]) ) if result is None: return default return result def _GetStripPostbuilds(self, configname, output_binary, quiet): """Returns a list of shell commands that contain the shell commands necessary to strip this target's binary. These should be run as postbuilds before the actual postbuilds run.""" self.configname = configname result = [] if self._Test("DEPLOYMENT_POSTPROCESSING", "YES", default="NO") and self._Test( "STRIP_INSTALLED_PRODUCT", "YES", default="NO" ): default_strip_style = "debugging" if ( self.spec["type"] == "loadable_module" or self._IsIosAppExtension() ) and self._IsBundle(): default_strip_style = "non-global" elif self.spec["type"] == "executable": default_strip_style = "all" strip_style = self._Settings().get("STRIP_STYLE", default_strip_style) strip_flags = {"all": "", "non-global": "-x", "debugging": "-S"}[ strip_style ] explicit_strip_flags = self._Settings().get("STRIPFLAGS", "") if explicit_strip_flags: strip_flags += " " + _NormalizeEnvVarReferences(explicit_strip_flags) if not quiet: result.append("echo STRIP\\(%s\\)" % self.spec["target_name"]) result.append("strip %s %s" % (strip_flags, output_binary)) self.configname = None return result def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet): """Returns a list of shell commands that contain the shell commands necessary to massage this target's debug information. These should be run as postbuilds before the actual postbuilds run.""" self.configname = configname # For static libraries, no dSYMs are created. result = [] if ( self._Test("GCC_GENERATE_DEBUGGING_SYMBOLS", "YES", default="YES") and self._Test( "DEBUG_INFORMATION_FORMAT", "dwarf-with-dsym", default="dwarf" ) and self.spec["type"] != "static_library" ): if not quiet: result.append("echo DSYMUTIL\\(%s\\)" % self.spec["target_name"]) result.append("dsymutil %s -o %s" % (output_binary, output + ".dSYM")) self.configname = None return result def _GetTargetPostbuilds(self, configname, output, output_binary, quiet=False): """Returns a list of shell commands that contain the shell commands to run as postbuilds for this target, before the actual postbuilds.""" # dSYMs need to build before stripping happens. return self._GetDebugInfoPostbuilds( configname, output, output_binary, quiet ) + self._GetStripPostbuilds(configname, output_binary, quiet) def _GetIOSPostbuilds(self, configname, output_binary): """Return a shell command to codesign the iOS output binary so it can be deployed to a device. This should be run as the very last step of the build.""" if not ( self.isIOS and (self.spec["type"] == "executable" or self._IsXCTest()) or self.IsIosFramework() ): return [] postbuilds = [] product_name = self.GetFullProductName() settings = self.xcode_settings[configname] # Xcode expects XCTests to be copied into the TEST_HOST dir. if self._IsXCTest(): source = os.path.join("${BUILT_PRODUCTS_DIR}", product_name) test_host = os.path.dirname(settings.get("TEST_HOST")) xctest_destination = os.path.join(test_host, "PlugIns", product_name) postbuilds.extend(["ditto %s %s" % (source, xctest_destination)]) key = self._GetIOSCodeSignIdentityKey(settings) if not key: return postbuilds # Warn for any unimplemented signing xcode keys. unimpl = ["OTHER_CODE_SIGN_FLAGS"] unimpl = set(unimpl) & set(self.xcode_settings[configname].keys()) if unimpl: print( "Warning: Some codesign keys not implemented, ignoring: %s" % ", ".join(sorted(unimpl)) ) if self._IsXCTest(): # For device xctests, Xcode copies two extra frameworks into $TEST_HOST. test_host = os.path.dirname(settings.get("TEST_HOST")) frameworks_dir = os.path.join(test_host, "Frameworks") platform_root = self._XcodePlatformPath(configname) frameworks = [ "Developer/Library/PrivateFrameworks/IDEBundleInjection.framework", "Developer/Library/Frameworks/XCTest.framework", ] for framework in frameworks: source = os.path.join(platform_root, framework) destination = os.path.join(frameworks_dir, os.path.basename(framework)) postbuilds.extend(["ditto %s %s" % (source, destination)]) # Then re-sign everything with 'preserve=True' postbuilds.extend( [ '%s code-sign-bundle "%s" "%s" "%s" "%s" %s' % ( os.path.join("${TARGET_BUILD_DIR}", "gyp-mac-tool"), key, settings.get("CODE_SIGN_ENTITLEMENTS", ""), settings.get("PROVISIONING_PROFILE", ""), destination, True, ) ] ) plugin_dir = os.path.join(test_host, "PlugIns") targets = [os.path.join(plugin_dir, product_name), test_host] for target in targets: postbuilds.extend( [ '%s code-sign-bundle "%s" "%s" "%s" "%s" %s' % ( os.path.join("${TARGET_BUILD_DIR}", "gyp-mac-tool"), key, settings.get("CODE_SIGN_ENTITLEMENTS", ""), settings.get("PROVISIONING_PROFILE", ""), target, True, ) ] ) postbuilds.extend( [ '%s code-sign-bundle "%s" "%s" "%s" "%s" %s' % ( os.path.join("${TARGET_BUILD_DIR}", "gyp-mac-tool"), key, settings.get("CODE_SIGN_ENTITLEMENTS", ""), settings.get("PROVISIONING_PROFILE", ""), os.path.join("${BUILT_PRODUCTS_DIR}", product_name), False, ) ] ) return postbuilds def _GetIOSCodeSignIdentityKey(self, settings): identity = settings.get("CODE_SIGN_IDENTITY") if not identity: return None if identity not in XcodeSettings._codesigning_key_cache: output = subprocess.check_output( ["security", "find-identity", "-p", "codesigning", "-v"] ) for line in output.splitlines(): if identity in line: fingerprint = line.split()[1] cache = XcodeSettings._codesigning_key_cache assert identity not in cache or fingerprint == cache[identity], ( "Multiple codesigning fingerprints for identity: %s" % identity ) XcodeSettings._codesigning_key_cache[identity] = fingerprint return XcodeSettings._codesigning_key_cache.get(identity, "") def AddImplicitPostbuilds( self, configname, output, output_binary, postbuilds=[], quiet=False ): """Returns a list of shell commands that should run before and after |postbuilds|.""" assert output_binary is not None pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet) post = self._GetIOSPostbuilds(configname, output_binary) return pre + postbuilds + post def _AdjustLibrary(self, library, config_name=None): if library.endswith(".framework"): l_flag = "-framework " + os.path.splitext(os.path.basename(library))[0] else: m = self.library_re.match(library) if m: l_flag = "-l" + m.group(1) else: l_flag = library sdk_root = self._SdkPath(config_name) if not sdk_root: sdk_root = "" # Xcode 7 started shipping with ".tbd" (text based stubs) files instead of # ".dylib" without providing a real support for them. What it does, for # "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the # library order and cause collision when building Chrome. # # Instead substitute ".tbd" to ".dylib" in the generated project when the # following conditions are both true: # - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib", # - the ".dylib" file does not exists but a ".tbd" file do. library = l_flag.replace("$(SDKROOT)", sdk_root) if l_flag.startswith("$(SDKROOT)"): basename, ext = os.path.splitext(library) if ext == ".dylib" and not os.path.exists(library): tbd_library = basename + ".tbd" if os.path.exists(tbd_library): library = tbd_library return library def AdjustLibraries(self, libraries, config_name=None): """Transforms entries like 'Cocoa.framework' in libraries into entries like '-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc. """ libraries = [self._AdjustLibrary(library, config_name) for library in libraries] return libraries def _BuildMachineOSBuild(self): return GetStdout(["sw_vers", "-buildVersion"]) def _XcodeIOSDeviceFamily(self, configname): family = self.xcode_settings[configname].get("TARGETED_DEVICE_FAMILY", "1") return [int(x) for x in family.split(",")] def GetExtraPlistItems(self, configname=None): """Returns a dictionary with extra items to insert into Info.plist.""" if configname not in XcodeSettings._plist_cache: cache = {} cache["BuildMachineOSBuild"] = self._BuildMachineOSBuild() xcode_version, xcode_build = XcodeVersion() cache["DTXcode"] = xcode_version cache["DTXcodeBuild"] = xcode_build compiler = self.xcode_settings[configname].get("GCC_VERSION") if compiler is not None: cache["DTCompiler"] = compiler sdk_root = self._SdkRoot(configname) if not sdk_root: sdk_root = self._DefaultSdkRoot() sdk_version = self._GetSdkVersionInfoItem(sdk_root, "--show-sdk-version") cache["DTSDKName"] = sdk_root + (sdk_version or "") if xcode_version >= "0720": cache["DTSDKBuild"] = self._GetSdkVersionInfoItem( sdk_root, "--show-sdk-build-version" ) elif xcode_version >= "0430": cache["DTSDKBuild"] = sdk_version else: cache["DTSDKBuild"] = cache["BuildMachineOSBuild"] if self.isIOS: cache["MinimumOSVersion"] = self.xcode_settings[configname].get( "IPHONEOS_DEPLOYMENT_TARGET" ) cache["DTPlatformName"] = sdk_root cache["DTPlatformVersion"] = sdk_version if configname.endswith("iphoneos"): cache["CFBundleSupportedPlatforms"] = ["iPhoneOS"] cache["DTPlatformBuild"] = cache["DTSDKBuild"] else: cache["CFBundleSupportedPlatforms"] = ["iPhoneSimulator"] # This is weird, but Xcode sets DTPlatformBuild to an empty field # for simulator builds. cache["DTPlatformBuild"] = "" XcodeSettings._plist_cache[configname] = cache # Include extra plist items that are per-target, not per global # XcodeSettings. items = dict(XcodeSettings._plist_cache[configname]) if self.isIOS: items["UIDeviceFamily"] = self._XcodeIOSDeviceFamily(configname) return items def _DefaultSdkRoot(self): """Returns the default SDKROOT to use. Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode project, then the environment variable was empty. Starting with this version, Xcode uses the name of the newest SDK installed. """ xcode_version, _ = XcodeVersion() if xcode_version < "0500": return "" default_sdk_path = self._XcodeSdkPath("") default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path) if default_sdk_root: return default_sdk_root try: all_sdks = GetStdout(["xcodebuild", "-showsdks"]) except GypError: # If xcodebuild fails, there will be no valid SDKs return "" for line in all_sdks.splitlines(): items = line.split() if len(items) >= 3 and items[-2] == "-sdk": sdk_root = items[-1] sdk_path = self._XcodeSdkPath(sdk_root) if sdk_path == default_sdk_path: return sdk_root return "" class MacPrefixHeader(object): """A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature. This feature consists of several pieces: * If GCC_PREFIX_HEADER is present, all compilations in that project get an additional |-include path_to_prefix_header| cflag. * If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is instead compiled, and all other compilations in the project get an additional |-include path_to_compiled_header| instead. + Compiled prefix headers have the extension gch. There is one gch file for every language used in the project (c, cc, m, mm), since gch files for different languages aren't compatible. + gch files themselves are built with the target's normal cflags, but they obviously don't get the |-include| flag. Instead, they need a -x flag that describes their language. + All o files in the target need to depend on the gch file, to make sure it's built before any o file is built. This class helps with some of these tasks, but it needs help from the build system for writing dependencies to the gch files, for writing build commands for the gch files, and for figuring out the location of the gch files. """ def __init__( self, xcode_settings, gyp_path_to_build_path, gyp_path_to_build_output ): """If xcode_settings is None, all methods on this class are no-ops. Args: gyp_path_to_build_path: A function that takes a gyp-relative path, and returns a path relative to the build directory. gyp_path_to_build_output: A function that takes a gyp-relative path and a language code ('c', 'cc', 'm', or 'mm'), and that returns a path to where the output of precompiling that path for that language should be placed (without the trailing '.gch'). """ # This doesn't support per-configuration prefix headers. Good enough # for now. self.header = None self.compile_headers = False if xcode_settings: self.header = xcode_settings.GetPerTargetSetting("GCC_PREFIX_HEADER") self.compile_headers = ( xcode_settings.GetPerTargetSetting( "GCC_PRECOMPILE_PREFIX_HEADER", default="NO" ) != "NO" ) self.compiled_headers = {} if self.header: if self.compile_headers: for lang in ["c", "cc", "m", "mm"]: self.compiled_headers[lang] = gyp_path_to_build_output( self.header, lang ) self.header = gyp_path_to_build_path(self.header) def _CompiledHeader(self, lang, arch): assert self.compile_headers h = self.compiled_headers[lang] if arch: h += "." + arch return h def GetInclude(self, lang, arch=None): """Gets the cflags to include the prefix header for language |lang|.""" if self.compile_headers and lang in self.compiled_headers: return "-include %s" % self._CompiledHeader(lang, arch) elif self.header: return "-include %s" % self.header else: return "" def _Gch(self, lang, arch): """Returns the actual file name of the prefix header for language |lang|.""" assert self.compile_headers return self._CompiledHeader(lang, arch) + ".gch" def GetObjDependencies(self, sources, objs, arch=None): """Given a list of source files and the corresponding object files, returns a list of (source, object, gch) tuples, where |gch| is the build-directory relative path to the gch file each object file depends on. |compilable[i]| has to be the source file belonging to |objs[i]|.""" if not self.header or not self.compile_headers: return [] result = [] for source, obj in zip(sources, objs): ext = os.path.splitext(source)[1] lang = { ".c": "c", ".cpp": "cc", ".cc": "cc", ".cxx": "cc", ".m": "m", ".mm": "mm", }.get(ext, None) if lang: result.append((source, obj, self._Gch(lang, arch))) return result def GetPchBuildCommands(self, arch=None): """Returns [(path_to_gch, language_flag, language, header)]. |path_to_gch| and |header| are relative to the build directory. """ if not self.header or not self.compile_headers: return [] return [ (self._Gch("c", arch), "-x c-header", "c", self.header), (self._Gch("cc", arch), "-x c++-header", "cc", self.header), (self._Gch("m", arch), "-x objective-c-header", "m", self.header), (self._Gch("mm", arch), "-x objective-c++-header", "mm", self.header), ] def XcodeVersion(): """Returns a tuple of version and build version of installed Xcode.""" # `xcodebuild -version` output looks like # Xcode 4.6.3 # Build version 4H1503 # or like # Xcode 3.2.6 # Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0 # BuildVersion: 10M2518 # Convert that to ('0463', '4H1503') or ('0326', '10M2518'). global XCODE_VERSION_CACHE if XCODE_VERSION_CACHE: return XCODE_VERSION_CACHE version = "" build = "" try: version_list = GetStdoutQuiet(["xcodebuild", "-version"]).splitlines() # In some circumstances xcodebuild exits 0 but doesn't return # the right results; for example, a user on 10.7 or 10.8 with # a bogus path set via xcode-select # In that case this may be a CLT-only install so fall back to # checking that version. if len(version_list) < 2: raise GypError("xcodebuild returned unexpected results") version = version_list[0].split()[-1] # Last word on first line build = version_list[-1].split()[-1] # Last word on last line except GypError: # Xcode not installed so look for XCode Command Line Tools version = CLTVersion() # macOS Catalina returns 11.0.0.0.1.1567737322 if not version: raise GypError("No Xcode or CLT version detected!") # Be careful to convert "4.2.3" to "0423" and "11.0.0" to "1100": version = version.split(".")[:3] # Just major, minor, micro version[0] = version[0].zfill(2) # Add a leading zero if major is one digit version = ("".join(version) + "00")[:4] # Limit to exactly four characters XCODE_VERSION_CACHE = (version, build) return XCODE_VERSION_CACHE # This function ported from the logic in Homebrew's CLT version check def CLTVersion(): """Returns the version of command-line tools from pkgutil.""" # pkgutil output looks like # package-id: com.apple.pkg.CLTools_Executables # version: 5.0.1.0.1.1382131676 # volume: / # location: / # install-time: 1382544035 # groups: com.apple.FindSystemFiles.pkg-group # com.apple.DevToolsBoth.pkg-group # com.apple.DevToolsNonRelocatableShared.pkg-group STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo" FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI" MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables" regex = re.compile("version: (?P<version>.+)") for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]: try: output = GetStdout(["/usr/sbin/pkgutil", "--pkg-info", key]) return re.search(regex, output).groupdict()["version"] except GypError: continue regex = re.compile(r'Command Line Tools for Xcode\s+(?P<version>\S+)') try: output = GetStdout(["/usr/sbin/softwareupdate", "--history"]) return re.search(regex, output).groupdict()["version"] except GypError: return None def GetStdoutQuiet(cmdlist): """Returns the content of standard output returned by invoking |cmdlist|. Ignores the stderr. Raises |GypError| if the command return with a non-zero return code.""" job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out = job.communicate()[0] if PY3: out = out.decode("utf-8") if job.returncode != 0: raise GypError("Error %d running %s" % (job.returncode, cmdlist[0])) return out.rstrip("\n") def GetStdout(cmdlist): """Returns the content of standard output returned by invoking |cmdlist|. Raises |GypError| if the command return with a non-zero return code.""" job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE) out = job.communicate()[0] if PY3: out = out.decode("utf-8") if job.returncode != 0: sys.stderr.write(out + "\n") raise GypError("Error %d running %s" % (job.returncode, cmdlist[0])) return out.rstrip("\n") def MergeGlobalXcodeSettingsToSpec(global_dict, spec): """Merges the global xcode_settings dictionary into each configuration of the target represented by spec. For keys that are both in the global and the local xcode_settings dict, the local key gets precedence. """ # The xcode generator special-cases global xcode_settings and does something # that amounts to merging in the global xcode_settings into each local # xcode_settings dict. global_xcode_settings = global_dict.get("xcode_settings", {}) for config in spec["configurations"].values(): if "xcode_settings" in config: new_settings = global_xcode_settings.copy() new_settings.update(config["xcode_settings"]) config["xcode_settings"] = new_settings def IsMacBundle(flavor, spec): """Returns if |spec| should be treated as a bundle. Bundles are directories with a certain subdirectory structure, instead of just a single file. Bundle rules do not produce a binary but also package resources into that directory.""" is_mac_bundle = ( int(spec.get("mac_xctest_bundle", 0)) != 0 or int(spec.get("mac_xcuitest_bundle", 0)) != 0 or (int(spec.get("mac_bundle", 0)) != 0 and flavor == "mac") ) if is_mac_bundle: assert spec["type"] != "none", ( 'mac_bundle targets cannot have type none (target "%s")' % spec["target_name"] ) return is_mac_bundle def GetMacBundleResources(product_dir, xcode_settings, resources): """Yields (output, resource) pairs for every resource in |resources|. Only call this for mac bundle targets. Args: product_dir: Path to the directory containing the output bundle, relative to the build directory. xcode_settings: The XcodeSettings of the current target. resources: A list of bundle resources, relative to the build directory. """ dest = os.path.join(product_dir, xcode_settings.GetBundleResourceFolder()) for res in resources: output = dest # The make generator doesn't support it, so forbid it everywhere # to keep the generators more interchangeable. assert " " not in res, "Spaces in resource filenames not supported (%s)" % res # Split into (path,file). res_parts = os.path.split(res) # Now split the path into (prefix,maybe.lproj). lproj_parts = os.path.split(res_parts[0]) # If the resource lives in a .lproj bundle, add that to the destination. if lproj_parts[1].endswith(".lproj"): output = os.path.join(output, lproj_parts[1]) output = os.path.join(output, res_parts[1]) # Compiled XIB files are referred to by .nib. if output.endswith(".xib"): output = os.path.splitext(output)[0] + ".nib" # Compiled storyboard files are referred to by .storyboardc. if output.endswith(".storyboard"): output = os.path.splitext(output)[0] + ".storyboardc" yield output, res def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path): """Returns (info_plist, dest_plist, defines, extra_env), where: * |info_plist| is the source plist path, relative to the build directory, * |dest_plist| is the destination plist path, relative to the build directory, * |defines| is a list of preprocessor defines (empty if the plist shouldn't be preprocessed, * |extra_env| is a dict of env variables that should be exported when invoking |mac_tool copy-info-plist|. Only call this for mac bundle targets. Args: product_dir: Path to the directory containing the output bundle, relative to the build directory. xcode_settings: The XcodeSettings of the current target. gyp_to_build_path: A function that converts paths relative to the current gyp file to paths relative to the build directory. """ info_plist = xcode_settings.GetPerTargetSetting("INFOPLIST_FILE") if not info_plist: return None, None, [], {} # The make generator doesn't support it, so forbid it everywhere # to keep the generators more interchangeable. assert " " not in info_plist, ( "Spaces in Info.plist filenames not supported (%s)" % info_plist ) info_plist = gyp_path_to_build_path(info_plist) # If explicitly set to preprocess the plist, invoke the C preprocessor and # specify any defines as -D flags. if ( xcode_settings.GetPerTargetSetting("INFOPLIST_PREPROCESS", default="NO") == "YES" ): # Create an intermediate file based on the path. defines = shlex.split( xcode_settings.GetPerTargetSetting( "INFOPLIST_PREPROCESSOR_DEFINITIONS", default="" ) ) else: defines = [] dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath()) extra_env = xcode_settings.GetPerTargetSettings() return info_plist, dest_plist, defines, extra_env def _GetXcodeEnv( xcode_settings, built_products_dir, srcroot, configuration, additional_settings=None ): """Return the environment variables that Xcode would set. See http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153 for a full list. Args: xcode_settings: An XcodeSettings object. If this is None, this function returns an empty dict. built_products_dir: Absolute path to the built products dir. srcroot: Absolute path to the source root. configuration: The build configuration name. additional_settings: An optional dict with more values to add to the result. """ if not xcode_settings: return {} # This function is considered a friend of XcodeSettings, so let it reach into # its implementation details. spec = xcode_settings.spec # These are filled in on an as-needed basis. env = { "BUILT_FRAMEWORKS_DIR": built_products_dir, "BUILT_PRODUCTS_DIR": built_products_dir, "CONFIGURATION": configuration, "PRODUCT_NAME": xcode_settings.GetProductName(), # For FULL_PRODUCT_NAME see: # /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec # noqa: E501 "SRCROOT": srcroot, "SOURCE_ROOT": "${SRCROOT}", # This is not true for static libraries, but currently the env is only # written for bundles: "TARGET_BUILD_DIR": built_products_dir, "TEMP_DIR": "${TMPDIR}", "XCODE_VERSION_ACTUAL": XcodeVersion()[0], } if xcode_settings.GetPerConfigSetting("SDKROOT", configuration): env["SDKROOT"] = xcode_settings._SdkPath(configuration) else: env["SDKROOT"] = "" if xcode_settings.mac_toolchain_dir: env["DEVELOPER_DIR"] = xcode_settings.mac_toolchain_dir if spec["type"] in ( "executable", "static_library", "shared_library", "loadable_module", ): env["EXECUTABLE_NAME"] = xcode_settings.GetExecutableName() env["EXECUTABLE_PATH"] = xcode_settings.GetExecutablePath() env["FULL_PRODUCT_NAME"] = xcode_settings.GetFullProductName() mach_o_type = xcode_settings.GetMachOType() if mach_o_type: env["MACH_O_TYPE"] = mach_o_type env["PRODUCT_TYPE"] = xcode_settings.GetProductType() if xcode_settings._IsBundle(): # xcodeproj_file.py sets the same Xcode subfolder value for this as for # FRAMEWORKS_FOLDER_PATH so Xcode builds will actually use FFP's value. env["BUILT_FRAMEWORKS_DIR"] = os.path.join( built_products_dir + os.sep + xcode_settings.GetBundleFrameworksFolderPath() ) env["CONTENTS_FOLDER_PATH"] = xcode_settings.GetBundleContentsFolderPath() env["EXECUTABLE_FOLDER_PATH"] = xcode_settings.GetBundleExecutableFolderPath() env[ "UNLOCALIZED_RESOURCES_FOLDER_PATH" ] = xcode_settings.GetBundleResourceFolder() env["JAVA_FOLDER_PATH"] = xcode_settings.GetBundleJavaFolderPath() env["FRAMEWORKS_FOLDER_PATH"] = xcode_settings.GetBundleFrameworksFolderPath() env[ "SHARED_FRAMEWORKS_FOLDER_PATH" ] = xcode_settings.GetBundleSharedFrameworksFolderPath() env[ "SHARED_SUPPORT_FOLDER_PATH" ] = xcode_settings.GetBundleSharedSupportFolderPath() env["PLUGINS_FOLDER_PATH"] = xcode_settings.GetBundlePlugInsFolderPath() env["XPCSERVICES_FOLDER_PATH"] = xcode_settings.GetBundleXPCServicesFolderPath() env["INFOPLIST_PATH"] = xcode_settings.GetBundlePlistPath() env["WRAPPER_NAME"] = xcode_settings.GetWrapperName() install_name = xcode_settings.GetInstallName() if install_name: env["LD_DYLIB_INSTALL_NAME"] = install_name install_name_base = xcode_settings.GetInstallNameBase() if install_name_base: env["DYLIB_INSTALL_NAME_BASE"] = install_name_base xcode_version, _ = XcodeVersion() if xcode_version >= "0500" and not env.get("SDKROOT"): sdk_root = xcode_settings._SdkRoot(configuration) if not sdk_root: sdk_root = xcode_settings._XcodeSdkPath("") if sdk_root is None: sdk_root = "" env["SDKROOT"] = sdk_root if not additional_settings: additional_settings = {} else: # Flatten lists to strings. for k in additional_settings: if not isinstance(additional_settings[k], str): additional_settings[k] = " ".join(additional_settings[k]) additional_settings.update(env) for k in additional_settings: additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k]) return additional_settings def _NormalizeEnvVarReferences(str): """Takes a string containing variable references in the form ${FOO}, $(FOO), or $FOO, and returns a string with all variable references in the form ${FOO}. """ # $FOO -> ${FOO} str = re.sub(r"\$([a-zA-Z_][a-zA-Z0-9_]*)", r"${\1}", str) # $(FOO) -> ${FOO} matches = re.findall(r"(\$\(([a-zA-Z0-9\-_]+)\))", str) for match in matches: to_replace, variable = match assert "$(" not in match, "$($(FOO)) variables not supported: " + match str = str.replace(to_replace, "${" + variable + "}") return str def ExpandEnvVars(string, expansions): """Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the expansions list. If the variable expands to something that references another variable, this variable is expanded as well if it's in env -- until no variables present in env are left.""" for k, v in reversed(expansions): string = string.replace("${" + k + "}", v) string = string.replace("$(" + k + ")", v) string = string.replace("$" + k, v) return string def _TopologicallySortedEnvVarKeys(env): """Takes a dict |env| whose values are strings that can refer to other keys, for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of env such that key2 is after key1 in L if env[key2] refers to env[key1]. Throws an Exception in case of dependency cycles. """ # Since environment variables can refer to other variables, the evaluation # order is important. Below is the logic to compute the dependency graph # and sort it. regex = re.compile(r"\$\{([a-zA-Z0-9\-_]+)\}") def GetEdges(node): # Use a definition of edges such that user_of_variable -> used_varible. # This happens to be easier in this case, since a variable's # definition contains all variables it references in a single string. # We can then reverse the result of the topological sort at the end. # Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG)) matches = set([v for v in regex.findall(env[node]) if v in env]) for dependee in matches: assert "${" not in dependee, "Nested variables not supported: " + dependee return matches try: # Topologically sort, and then reverse, because we used an edge definition # that's inverted from the expected result of this function (see comment # above). order = gyp.common.TopologicallySorted(env.keys(), GetEdges) order.reverse() return order except gyp.common.CycleError as e: raise GypError( "Xcode environment variables are cyclically dependent: " + str(e.nodes) ) def GetSortedXcodeEnv( xcode_settings, built_products_dir, srcroot, configuration, additional_settings=None ): env = _GetXcodeEnv( xcode_settings, built_products_dir, srcroot, configuration, additional_settings ) return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)] def GetSpecPostbuildCommands(spec, quiet=False): """Returns the list of postbuilds explicitly defined on |spec|, in a form executable by a shell.""" postbuilds = [] for postbuild in spec.get("postbuilds", []): if not quiet: postbuilds.append( "echo POSTBUILD\\(%s\\) %s" % (spec["target_name"], postbuild["postbuild_name"]) ) postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild["action"])) return postbuilds def _HasIOSTarget(targets): """Returns true if any target contains the iOS specific key IPHONEOS_DEPLOYMENT_TARGET.""" for target_dict in targets.values(): for config in target_dict["configurations"].values(): if config.get("xcode_settings", {}).get("IPHONEOS_DEPLOYMENT_TARGET"): return True return False def _AddIOSDeviceConfigurations(targets): """Clone all targets and append -iphoneos to the name. Configure these targets to build for iOS devices and use correct architectures for those builds.""" for target_dict in targets.values(): toolset = target_dict["toolset"] configs = target_dict["configurations"] for config_name, simulator_config_dict in dict(configs).items(): iphoneos_config_dict = copy.deepcopy(simulator_config_dict) configs[config_name + "-iphoneos"] = iphoneos_config_dict configs[config_name + "-iphonesimulator"] = simulator_config_dict if toolset == "target": simulator_config_dict["xcode_settings"]["SDKROOT"] = "iphonesimulator" iphoneos_config_dict["xcode_settings"]["SDKROOT"] = "iphoneos" return targets def CloneConfigurationForDeviceAndEmulator(target_dicts): """If |target_dicts| contains any iOS targets, automatically create -iphoneos targets for iOS device builds.""" if _HasIOSTarget(target_dicts): return _AddIOSDeviceConfigurations(target_dicts) return target_dicts
codeparrot/github-code-clean
# pylint: disable=E1101,E1103,W0232 import datetime import warnings from functools import partial from sys import getsizeof import numpy as np from pandas._libs import index as libindex, lib, Timestamp from pandas.compat import range, zip, lrange, lzip, map from pandas.compat.numpy import function as nv from pandas import compat from pandas.core.dtypes.common import ( _ensure_int64, _ensure_platform_int, is_object_dtype, is_iterator, is_list_like, is_scalar) from pandas.core.dtypes.missing import isna, array_equivalent from pandas.errors import PerformanceWarning, UnsortedIndexError from pandas.core.common import (_values_from_object, is_bool_indexer, is_null_slice, is_true_slices) import pandas.core.base as base from pandas.util._decorators import (Appender, cache_readonly, deprecate, deprecate_kwarg) import pandas.core.common as com import pandas.core.missing as missing import pandas.core.algorithms as algos from pandas.io.formats.printing import pprint_thing from pandas.core.config import get_option from pandas.core.indexes.base import ( Index, _ensure_index, _get_na_value, InvalidIndexError, _index_shared_docs) from pandas.core.indexes.frozen import ( FrozenNDArray, FrozenList, _ensure_frozen) import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update( dict(klass='MultiIndex', target_klass='MultiIndex or list of tuples')) class MultiIndex(Index): """ A multi-level, or hierarchical, index object for pandas objects Parameters ---------- levels : sequence of arrays The unique labels for each level labels : sequence of arrays Integers for each level designating which label at each location sortorder : optional int Level of sortedness (must be lexicographically sorted by that level) names : optional sequence of objects Names for each of the index levels. (name is accepted for compat) copy : boolean, default False Copy the meta-data verify_integrity : boolean, default True Check that the levels/labels are consistent and valid """ # initialize to zero-length tuples to make everything work _typ = 'multiindex' _names = FrozenList() _levels = FrozenList() _labels = FrozenList() _comparables = ['names'] rename = Index.set_names def __new__(cls, levels=None, labels=None, sortorder=None, names=None, copy=False, verify_integrity=True, _set_identity=True, name=None, **kwargs): # compat with Index if name is not None: names = name if levels is None or labels is None: raise TypeError("Must pass both levels and labels") if len(levels) != len(labels): raise ValueError('Length of levels and labels must be the same.') if len(levels) == 0: raise ValueError('Must pass non-zero number of levels/labels') if len(levels) == 1: if names: name = names[0] else: name = None return Index(levels[0], name=name, copy=True).take(labels[0]) result = object.__new__(MultiIndex) # we've already validated levels and labels, so shortcut here result._set_levels(levels, copy=copy, validate=False) result._set_labels(labels, copy=copy, validate=False) if names is not None: # handles name validation result._set_names(names) if sortorder is not None: result.sortorder = int(sortorder) else: result.sortorder = sortorder if verify_integrity: result._verify_integrity() if _set_identity: result._reset_identity() return result def _verify_integrity(self, labels=None, levels=None): """ Parameters ---------- labels : optional list Labels to check for validity. Defaults to current labels. levels : optional list Levels to check for validity. Defaults to current levels. Raises ------ ValueError * if length of levels and labels don't match or any label would exceed level bounds """ # NOTE: Currently does not check, among other things, that cached # nlevels matches nor that sortorder matches actually sortorder. labels = labels or self.labels levels = levels or self.levels if len(levels) != len(labels): raise ValueError("Length of levels and labels must match. NOTE:" " this index is in an inconsistent state.") label_length = len(self.labels[0]) for i, (level, label) in enumerate(zip(levels, labels)): if len(label) != label_length: raise ValueError("Unequal label lengths: %s" % ([len(lab) for lab in labels])) if len(label) and label.max() >= len(level): raise ValueError("On level %d, label max (%d) >= length of" " level (%d). NOTE: this index is in an" " inconsistent state" % (i, label.max(), len(level))) def _get_levels(self): return self._levels def _set_levels(self, levels, level=None, copy=False, validate=True, verify_integrity=False): # This is NOT part of the levels property because it should be # externally not allowed to set levels. User beware if you change # _levels directly if validate and len(levels) == 0: raise ValueError('Must set non-zero number of levels.') if validate and level is None and len(levels) != self.nlevels: raise ValueError('Length of levels must match number of levels.') if validate and level is not None and len(levels) != len(level): raise ValueError('Length of levels must match length of level.') if level is None: new_levels = FrozenList( _ensure_index(lev, copy=copy)._shallow_copy() for lev in levels) else: level = [self._get_level_number(l) for l in level] new_levels = list(self._levels) for l, v in zip(level, levels): new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy() new_levels = FrozenList(new_levels) if verify_integrity: self._verify_integrity(levels=new_levels) names = self.names self._levels = new_levels if any(names): self._set_names(names) self._tuples = None self._reset_cache() def set_levels(self, levels, level=None, inplace=False, verify_integrity=True): """ Set new levels on MultiIndex. Defaults to returning new index. Parameters ---------- levels : sequence or list of sequence new level(s) to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and labels are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'), (2, u'one'), (2, u'two')], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]]) MultiIndex(levels=[[u'a', u'b'], [1, 2]], labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[u'foo', u'bar']) >>> idx.set_levels(['a','b'], level=0) MultiIndex(levels=[[u'a', u'b'], [u'one', u'two']], labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[u'foo', u'bar']) >>> idx.set_levels(['a','b'], level='bar') MultiIndex(levels=[[1, 2], [u'a', u'b']], labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[u'foo', u'bar']) >>> idx.set_levels([['a','b'], [1,2]], level=[0,1]) MultiIndex(levels=[[u'a', u'b'], [1, 2]], labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[u'foo', u'bar']) """ if level is not None and not is_list_like(level): if not is_list_like(levels): raise TypeError("Levels must be list-like") if is_list_like(levels[0]): raise TypeError("Levels must be list-like") level = [level] levels = [levels] elif level is None or is_list_like(level): if not is_list_like(levels) or not is_list_like(levels[0]): raise TypeError("Levels must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_levels(levels, level=level, validate=True, verify_integrity=verify_integrity) if not inplace: return idx # remove me in 0.14 and change to read only property __set_levels = deprecate("setting `levels` directly", partial(set_levels, inplace=True, verify_integrity=True), alt_name="set_levels") levels = property(fget=_get_levels, fset=__set_levels) def _get_labels(self): return self._labels def _set_labels(self, labels, level=None, copy=False, validate=True, verify_integrity=False): if validate and level is None and len(labels) != self.nlevels: raise ValueError("Length of labels must match number of levels") if validate and level is not None and len(labels) != len(level): raise ValueError('Length of labels must match length of levels.') if level is None: new_labels = FrozenList( _ensure_frozen(lab, lev, copy=copy)._shallow_copy() for lev, lab in zip(self.levels, labels)) else: level = [self._get_level_number(l) for l in level] new_labels = list(self._labels) for l, lev, lab in zip(level, self.levels, labels): new_labels[l] = _ensure_frozen( lab, lev, copy=copy)._shallow_copy() new_labels = FrozenList(new_labels) if verify_integrity: self._verify_integrity(labels=new_labels) self._labels = new_labels self._tuples = None self._reset_cache() def set_labels(self, labels, level=None, inplace=False, verify_integrity=True): """ Set new labels on MultiIndex. Defaults to returning new index. Parameters ---------- labels : sequence or list of sequence new labels to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and labels are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'), (2, u'one'), (2, u'two')], names=['foo', 'bar']) >>> idx.set_labels([[1,0,1,0], [0,0,1,1]]) MultiIndex(levels=[[1, 2], [u'one', u'two']], labels=[[1, 0, 1, 0], [0, 0, 1, 1]], names=[u'foo', u'bar']) >>> idx.set_labels([1,0,1,0], level=0) MultiIndex(levels=[[1, 2], [u'one', u'two']], labels=[[1, 0, 1, 0], [0, 1, 0, 1]], names=[u'foo', u'bar']) >>> idx.set_labels([0,0,1,1], level='bar') MultiIndex(levels=[[1, 2], [u'one', u'two']], labels=[[0, 0, 1, 1], [0, 0, 1, 1]], names=[u'foo', u'bar']) >>> idx.set_labels([[1,0,1,0], [0,0,1,1]], level=[0,1]) MultiIndex(levels=[[1, 2], [u'one', u'two']], labels=[[1, 0, 1, 0], [0, 0, 1, 1]], names=[u'foo', u'bar']) """ if level is not None and not is_list_like(level): if not is_list_like(labels): raise TypeError("Labels must be list-like") if is_list_like(labels[0]): raise TypeError("Labels must be list-like") level = [level] labels = [labels] elif level is None or is_list_like(level): if not is_list_like(labels) or not is_list_like(labels[0]): raise TypeError("Labels must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_labels(labels, level=level, verify_integrity=verify_integrity) if not inplace: return idx # remove me in 0.14 and change to readonly property __set_labels = deprecate("setting labels directly", partial(set_labels, inplace=True, verify_integrity=True), alt_name="set_labels") labels = property(fget=_get_labels, fset=__set_labels) def copy(self, names=None, dtype=None, levels=None, labels=None, deep=False, _set_identity=False, **kwargs): """ Make a copy of this object. Names, dtype, levels and labels can be passed and will be set on new copy. Parameters ---------- names : sequence, optional dtype : numpy dtype or pandas type, optional levels : sequence, optional labels : sequence, optional Returns ------- copy : MultiIndex Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. This could be potentially expensive on large MultiIndex objects. """ name = kwargs.get('name') names = self._validate_names(name=name, names=names, deep=deep) if deep: from copy import deepcopy if levels is None: levels = deepcopy(self.levels) if labels is None: labels = deepcopy(self.labels) else: if levels is None: levels = self.levels if labels is None: labels = self.labels return MultiIndex(levels=levels, labels=labels, names=names, sortorder=self.sortorder, verify_integrity=False, _set_identity=_set_identity) def __array__(self, dtype=None): """ the array interface, return my values """ return self.values def view(self, cls=None): """ this is defined as a copy with the same identity """ result = self.copy() result._id = self._id return result def _shallow_copy_with_infer(self, values=None, **kwargs): # On equal MultiIndexes the difference is empty. # Therefore, an empty MultiIndex is returned GH13490 if len(values) == 0: return MultiIndex(levels=[[] for _ in range(self.nlevels)], labels=[[] for _ in range(self.nlevels)], **kwargs) return self._shallow_copy(values, **kwargs) @Appender(_index_shared_docs['_shallow_copy']) def _shallow_copy(self, values=None, **kwargs): if values is not None: if 'name' in kwargs: kwargs['names'] = kwargs.pop('name', None) # discards freq kwargs.pop('freq', None) return MultiIndex.from_tuples(values, **kwargs) return self.view() @cache_readonly def dtype(self): return np.dtype('O') def _is_memory_usage_qualified(self): """ return a boolean if we need a qualified .info display """ def f(l): return 'mixed' in l or 'string' in l or 'unicode' in l return any([f(l) for l in self._inferred_type_levels]) @Appender(Index.memory_usage.__doc__) def memory_usage(self, deep=False): # we are overwriting our base class to avoid # computing .values here which could materialize # a tuple representation uncessarily return self._nbytes(deep) @cache_readonly def nbytes(self): """ return the number of bytes in the underlying data """ return self._nbytes(False) def _nbytes(self, deep=False): """ return the number of bytes in the underlying data deeply introspect the level data if deep=True include the engine hashtable *this is in internal routine* """ level_nbytes = sum((i.memory_usage(deep=deep) for i in self.levels)) label_nbytes = sum((i.nbytes for i in self.labels)) names_nbytes = sum((getsizeof(i) for i in self.names)) result = level_nbytes + label_nbytes + names_nbytes # include our engine hashtable result += self._engine.sizeof(deep=deep) return result def _format_attrs(self): """ Return a list of tuples of the (attr,formatted_value) """ attrs = [ ('levels', ibase.default_pprint(self._levels, max_seq_items=False)), ('labels', ibase.default_pprint(self._labels, max_seq_items=False))] if not all(name is None for name in self.names): attrs.append(('names', ibase.default_pprint(self.names))) if self.sortorder is not None: attrs.append(('sortorder', ibase.default_pprint(self.sortorder))) return attrs def _format_space(self): return "\n%s" % (' ' * (len(self.__class__.__name__) + 1)) def _format_data(self): # we are formatting thru the attributes return None def __len__(self): return len(self.labels[0]) def _get_names(self): return FrozenList(level.name for level in self.levels) def _set_names(self, names, level=None, validate=True): """ sets names on levels. WARNING: mutates! Note that you generally want to set this *after* changing levels, so that it only acts on copies """ # GH 15110 # Don't allow a single string for names in a MultiIndex if names is not None and not is_list_like(names): raise ValueError('Names should be list-like for a MultiIndex') names = list(names) if validate and level is not None and len(names) != len(level): raise ValueError('Length of names must match length of level.') if validate and level is None and len(names) != self.nlevels: raise ValueError('Length of names must match number of levels in ' 'MultiIndex.') if level is None: level = range(self.nlevels) else: level = [self._get_level_number(l) for l in level] # set the name for l, name in zip(level, names): self.levels[l].rename(name, inplace=True) names = property(fset=_set_names, fget=_get_names, doc="Names of levels in MultiIndex") def _reference_duplicate_name(self, name): """ Returns True if the name refered to in self.names is duplicated. """ # count the times name equals an element in self.names. return sum(name == n for n in self.names) > 1 def _format_native_types(self, na_rep='nan', **kwargs): new_levels = [] new_labels = [] # go through the levels and format them for level, label in zip(self.levels, self.labels): level = level._format_native_types(na_rep=na_rep, **kwargs) # add nan values, if there are any mask = (label == -1) if mask.any(): nan_index = len(level) level = np.append(level, na_rep) label = label.values() label[mask] = nan_index new_levels.append(level) new_labels.append(label) # reconstruct the multi-index mi = MultiIndex(levels=new_levels, labels=new_labels, names=self.names, sortorder=self.sortorder, verify_integrity=False) return mi.values @Appender(_index_shared_docs['_get_grouper_for_level']) def _get_grouper_for_level(self, mapper, level): indexer = self.labels[level] level_index = self.levels[level] if mapper is not None: # Handle group mapping function and return level_values = self.levels[level].take(indexer) grouper = level_values.map(mapper) return grouper, None, None labels, uniques = algos.factorize(indexer, sort=True) if len(uniques) > 0 and uniques[0] == -1: # Handle NAs mask = indexer != -1 ok_labels, uniques = algos.factorize(indexer[mask], sort=True) labels = np.empty(len(indexer), dtype=indexer.dtype) labels[mask] = ok_labels labels[~mask] = -1 if len(uniques) < len(level_index): # Remove unobserved levels from level_index level_index = level_index.take(uniques) grouper = level_index.take(labels) return grouper, labels, level_index @property def _constructor(self): return MultiIndex.from_tuples @cache_readonly def inferred_type(self): return 'mixed' @staticmethod def _from_elements(values, labels=None, levels=None, names=None, sortorder=None): return MultiIndex(levels, labels, names, sortorder=sortorder) def _get_level_number(self, level): try: count = self.names.count(level) if count > 1: raise ValueError('The name %s occurs multiple times, use a ' 'level number' % level) level = self.names.index(level) except ValueError: if not isinstance(level, int): raise KeyError('Level %s not found' % str(level)) elif level < 0: level += self.nlevels if level < 0: orig_level = level - self.nlevels raise IndexError('Too many levels: Index has only %d ' 'levels, %d is not a valid level number' % (self.nlevels, orig_level)) # Note: levels are zero-based elif level >= self.nlevels: raise IndexError('Too many levels: Index has only %d levels, ' 'not %d' % (self.nlevels, level + 1)) return level _tuples = None @cache_readonly def _engine(self): # choose our engine based on our size # the hashing based MultiIndex for larger # sizes, and the MultiIndexOjbect for smaller # xref: https://github.com/pandas-dev/pandas/pull/16324 l = len(self) if l > 10000: return libindex.MultiIndexHashEngine(lambda: self, l) return libindex.MultiIndexObjectEngine(lambda: self.values, l) @property def values(self): if self._tuples is not None: return self._tuples values = [] for lev, lab in zip(self.levels, self.labels): # Need to box timestamps, etc. box = hasattr(lev, '_box_values') # Try to minimize boxing. if box and len(lev) > len(lab): taken = lev._box_values(algos.take_1d(lev._values, lab)) elif box: taken = algos.take_1d(lev._box_values(lev._values), lab, fill_value=_get_na_value(lev.dtype.type)) else: taken = algos.take_1d(np.asarray(lev._values), lab) values.append(taken) self._tuples = lib.fast_zip(values) return self._tuples # fml @property def _is_v1(self): return False @property def _is_v2(self): return False @property def _has_complex_internals(self): # to disable groupby tricks return True @cache_readonly def is_monotonic(self): """ return if the index is monotonic increasing (only equal or increasing) values. """ return self.is_monotonic_increasing @cache_readonly def is_monotonic_increasing(self): """ return if the index is monotonic increasing (only equal or increasing) values. """ # reversed() because lexsort() wants the most significant key last. values = [self._get_level_values(i).values for i in reversed(range(len(self.levels)))] try: sort_order = np.lexsort(values) return Index(sort_order).is_monotonic except TypeError: # we have mixed types and np.lexsort is not happy return Index(self.values).is_monotonic @property def is_monotonic_decreasing(self): """ return if the index is monotonic decreasing (only equal or decreasing) values. """ return False @cache_readonly def is_unique(self): return not self.duplicated().any() @cache_readonly def _have_mixed_levels(self): """ return a boolean list indicated if we have mixed levels """ return ['mixed' in l for l in self._inferred_type_levels] @cache_readonly def _inferred_type_levels(self): """ return a list of the inferred types, one for each level """ return [i.inferred_type for i in self.levels] @cache_readonly def _hashed_values(self): """ return a uint64 ndarray of my hashed values """ from pandas.core.util.hashing import hash_tuples return hash_tuples(self) def _hashed_indexing_key(self, key): """ validate and return the hash for the provided key *this is internal for use for the cython routines* Paramters --------- key : string or tuple Returns ------- np.uint64 Notes ----- we need to stringify if we have mixed levels """ from pandas.core.util.hashing import hash_tuples, hash_tuple if not isinstance(key, tuple): return hash_tuples(key) if not len(key) == self.nlevels: raise KeyError def f(k, stringify): if stringify and not isinstance(k, compat.string_types): k = str(k) return k key = tuple([f(k, stringify) for k, stringify in zip(key, self._have_mixed_levels)]) return hash_tuple(key) @Appender(base._shared_docs['duplicated'] % _index_doc_kwargs) def duplicated(self, keep='first'): from pandas.core.sorting import get_group_index from pandas._libs.hashtable import duplicated_int64 shape = map(len, self.levels) ids = get_group_index(self.labels, shape, sort=False, xnull=False) return duplicated_int64(ids, keep) @Appender(ibase._index_shared_docs['fillna']) def fillna(self, value=None, downcast=None): # isna is not implemented for MultiIndex raise NotImplementedError('isna is not defined for MultiIndex') @Appender(_index_shared_docs['dropna']) def dropna(self, how='any'): nans = [label == -1 for label in self.labels] if how == 'any': indexer = np.any(nans, axis=0) elif how == 'all': indexer = np.all(nans, axis=0) else: raise ValueError("invalid how option: {0}".format(how)) new_labels = [label[~indexer] for label in self.labels] return self.copy(labels=new_labels, deep=True) def get_value(self, series, key): # somewhat broken encapsulation from pandas.core.indexing import maybe_droplevels # Label-based s = _values_from_object(series) k = _values_from_object(key) def _try_mi(k): # TODO: what if a level contains tuples?? loc = self.get_loc(k) new_values = series._values[loc] new_index = self[loc] new_index = maybe_droplevels(new_index, k) return series._constructor(new_values, index=new_index, name=series.name).__finalize__(self) try: return self._engine.get_value(s, k) except KeyError as e1: try: return _try_mi(key) except KeyError: pass try: return libindex.get_value_at(s, k) except IndexError: raise except TypeError: # generator/iterator-like if is_iterator(key): raise InvalidIndexError(key) else: raise e1 except Exception: # pragma: no cover raise e1 except TypeError: # a Timestamp will raise a TypeError in a multi-index # rather than a KeyError, try it here # note that a string that 'looks' like a Timestamp will raise # a KeyError! (GH5725) if (isinstance(key, (datetime.datetime, np.datetime64)) or (compat.PY3 and isinstance(key, compat.string_types))): try: return _try_mi(key) except (KeyError): raise except: pass try: return _try_mi(Timestamp(key)) except: pass raise InvalidIndexError(key) def _get_level_values(self, level): """ Return vector of label values for requested level, equal to the length of the index **this is an internal method** Parameters ---------- level : int level Returns ------- values : ndarray """ unique = self.levels[level] labels = self.labels[level] filled = algos.take_1d(unique._values, labels, fill_value=unique._na_value) values = unique._shallow_copy(filled) return values def get_level_values(self, level): """ Return vector of label values for requested level, equal to the length of the index Parameters ---------- level : int or level name Returns ------- values : Index """ level = self._get_level_number(level) values = self._get_level_values(level) return values def format(self, space=2, sparsify=None, adjoin=True, names=False, na_rep=None, formatter=None): if len(self) == 0: return [] stringified_levels = [] for lev, lab in zip(self.levels, self.labels): na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type) if len(lev) > 0: formatted = lev.take(lab).format(formatter=formatter) # we have some NA mask = lab == -1 if mask.any(): formatted = np.array(formatted, dtype=object) formatted[mask] = na formatted = formatted.tolist() else: # weird all NA case formatted = [pprint_thing(na if isna(x) else x, escape_chars=('\t', '\r', '\n')) for x in algos.take_1d(lev._values, lab)] stringified_levels.append(formatted) result_levels = [] for lev, name in zip(stringified_levels, self.names): level = [] if names: level.append(pprint_thing(name, escape_chars=('\t', '\r', '\n')) if name is not None else '') level.extend(np.array(lev, dtype=object)) result_levels.append(level) if sparsify is None: sparsify = get_option("display.multi_sparse") if sparsify: sentinel = '' # GH3547 # use value of sparsify as sentinel, unless it's an obvious # "Truthey" value if sparsify not in [True, 1]: sentinel = sparsify # little bit of a kludge job for #1217 result_levels = _sparsify(result_levels, start=int(names), sentinel=sentinel) if adjoin: from pandas.io.formats.format import _get_adjustment adj = _get_adjustment() return adj.adjoin(space, *result_levels).split('\n') else: return result_levels def _to_safe_for_reshape(self): """ convert to object if we are a categorical """ return self.set_levels([i._to_safe_for_reshape() for i in self.levels]) def to_frame(self, index=True): """ Create a DataFrame with the columns the levels of the MultiIndex .. versionadded:: 0.20.0 Parameters ---------- index : boolean, default True return this MultiIndex as the index Returns ------- DataFrame """ from pandas import DataFrame result = DataFrame({(name or level): self._get_level_values(level) for name, level in zip(self.names, range(len(self.levels)))}, copy=False) if index: result.index = self return result def to_hierarchical(self, n_repeat, n_shuffle=1): """ Return a MultiIndex reshaped to conform to the shapes given by n_repeat and n_shuffle. Useful to replicate and rearrange a MultiIndex for combination with another Index with n_repeat items. Parameters ---------- n_repeat : int Number of times to repeat the labels on self n_shuffle : int Controls the reordering of the labels. If the result is going to be an inner level in a MultiIndex, n_shuffle will need to be greater than one. The size of each label must divisible by n_shuffle. Returns ------- MultiIndex Examples -------- >>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'), (2, u'one'), (2, u'two')]) >>> idx.to_hierarchical(3) MultiIndex(levels=[[1, 2], [u'one', u'two']], labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]) """ levels = self.levels labels = [np.repeat(x, n_repeat) for x in self.labels] # Assumes that each label is divisible by n_shuffle labels = [x.reshape(n_shuffle, -1).ravel(order='F') for x in labels] names = self.names return MultiIndex(levels=levels, labels=labels, names=names) @property def is_all_dates(self): return False def is_lexsorted(self): """ Return True if the labels are lexicographically sorted """ return self.lexsort_depth == self.nlevels @cache_readonly def lexsort_depth(self): if self.sortorder is not None: if self.sortorder == 0: return self.nlevels else: return 0 int64_labels = [_ensure_int64(lab) for lab in self.labels] for k in range(self.nlevels, 0, -1): if lib.is_lexsorted(int64_labels[:k]): return k return 0 @classmethod def from_arrays(cls, arrays, sortorder=None, names=None): """ Convert arrays to MultiIndex Parameters ---------- arrays : list / sequence of array-likes Each array-like gives one level's value for each data point. len(arrays) is the number of levels. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level) Returns ------- index : MultiIndex Examples -------- >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] >>> MultiIndex.from_arrays(arrays, names=('number', 'color')) See Also -------- MultiIndex.from_tuples : Convert list of tuples to MultiIndex MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables """ if len(arrays) == 1: name = None if names is None else names[0] return Index(arrays[0], name=name) # Check if lengths of all arrays are equal or not, # raise ValueError, if not for i in range(1, len(arrays)): if len(arrays[i]) != len(arrays[i - 1]): raise ValueError('all arrays must be same length') from pandas.core.categorical import _factorize_from_iterables labels, levels = _factorize_from_iterables(arrays) if names is None: names = [getattr(arr, "name", None) for arr in arrays] return MultiIndex(levels=levels, labels=labels, sortorder=sortorder, names=names, verify_integrity=False) @classmethod def from_tuples(cls, tuples, sortorder=None, names=None): """ Convert list of tuples to MultiIndex Parameters ---------- tuples : list / sequence of tuple-likes Each tuple is the index of one row/column. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level) Returns ------- index : MultiIndex Examples -------- >>> tuples = [(1, u'red'), (1, u'blue'), (2, u'red'), (2, u'blue')] >>> MultiIndex.from_tuples(tuples, names=('number', 'color')) See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables """ if len(tuples) == 0: if names is None: msg = 'Cannot infer number of levels from empty list' raise TypeError(msg) arrays = [[]] * len(names) elif isinstance(tuples, (np.ndarray, Index)): if isinstance(tuples, Index): tuples = tuples._values arrays = list(lib.tuples_to_object_array(tuples).T) elif isinstance(tuples, list): arrays = list(lib.to_object_array_tuples(tuples).T) else: arrays = lzip(*tuples) return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names) @classmethod def from_product(cls, iterables, sortorder=None, names=None): """ Make a MultiIndex from the cartesian product of multiple iterables Parameters ---------- iterables : list / sequence of iterables Each iterable has unique labels for each level of the index. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of strings or None Names for the levels in the index. Returns ------- index : MultiIndex Examples -------- >>> numbers = [0, 1, 2] >>> colors = [u'green', u'purple'] >>> MultiIndex.from_product([numbers, colors], names=['number', 'color']) MultiIndex(levels=[[0, 1, 2], [u'green', u'purple']], labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], names=[u'number', u'color']) See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex MultiIndex.from_tuples : Convert list of tuples to MultiIndex """ from pandas.core.categorical import _factorize_from_iterables from pandas.core.reshape.util import cartesian_product labels, levels = _factorize_from_iterables(iterables) labels = cartesian_product(labels) return MultiIndex(levels, labels, sortorder=sortorder, names=names) def _sort_levels_monotonic(self): """ .. versionadded:: 0.20.0 This is an *internal* function. create a new MultiIndex from the current to monotonically sorted items IN the levels. This does not actually make the entire MultiIndex monotonic, JUST the levels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], labels=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], labels=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i.sort_monotonic() MultiIndex(levels=[['a', 'b'], ['aa', 'bb']], labels=[[0, 0, 1, 1], [1, 0, 1, 0]]) """ if self.is_lexsorted() and self.is_monotonic: return self new_levels = [] new_labels = [] for lev, lab in zip(self.levels, self.labels): if lev.is_monotonic: new_levels.append(lev) new_labels.append(lab) continue # indexer to reorder the levels indexer = lev.argsort() lev = lev.take(indexer) # indexer to reorder the labels indexer = _ensure_int64(indexer) ri = lib.get_reverse_indexer(indexer, len(indexer)) lab = algos.take_1d(ri, lab) new_levels.append(lev) new_labels.append(lab) return MultiIndex(new_levels, new_labels, names=self.names, sortorder=self.sortorder, verify_integrity=False) def remove_unused_levels(self): """ create a new MultiIndex from the current that removing unused levels, meaning that they are not expressed in the labels The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. .. versionadded:: 0.20.0 Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex.from_product([range(2), list('ab')]) MultiIndex(levels=[[0, 1], ['a', 'b']], labels=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i[2:] MultiIndex(levels=[[0, 1], ['a', 'b']], labels=[[1, 1], [0, 1]]) The 0 from the first level is not represented and can be removed >>> i[2:].remove_unused_levels() MultiIndex(levels=[[1], ['a', 'b']], labels=[[0, 0], [0, 1]]) """ new_levels = [] new_labels = [] changed = False for lev, lab in zip(self.levels, self.labels): uniques = algos.unique(lab) # nothing unused if len(uniques) == len(lev): new_levels.append(lev) new_labels.append(lab) continue changed = True # labels get mapped from uniques to 0:len(uniques) label_mapping = np.zeros(len(lev)) label_mapping[uniques] = np.arange(len(uniques)) lab = label_mapping[lab] # new levels are simple lev = lev.take(uniques) new_levels.append(lev) new_labels.append(lab) result = self._shallow_copy() if changed: result._reset_identity() result._set_levels(new_levels, validate=False) result._set_labels(new_labels, validate=False) return result @property def nlevels(self): return len(self.levels) @property def levshape(self): return tuple(len(x) for x in self.levels) @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) def __contains__(self, key): hash(key) try: self.get_loc(key) return True except LookupError: return False contains = __contains__ def __reduce__(self): """Necessary for making this object picklable""" d = dict(levels=[lev for lev in self.levels], labels=[label for label in self.labels], sortorder=self.sortorder, names=list(self.names)) return ibase._new_Index, (self.__class__, d), None def __setstate__(self, state): """Necessary for making this object picklable""" if isinstance(state, dict): levels = state.get('levels') labels = state.get('labels') sortorder = state.get('sortorder') names = state.get('names') elif isinstance(state, tuple): nd_state, own_state = state levels, labels, sortorder, names = own_state self._set_levels([Index(x) for x in levels], validate=False) self._set_labels(labels) self._set_names(names) self.sortorder = sortorder self._verify_integrity() self._reset_identity() def __getitem__(self, key): if is_scalar(key): retval = [] for lev, lab in zip(self.levels, self.labels): if lab[key] == -1: retval.append(np.nan) else: retval.append(lev[lab[key]]) return tuple(retval) else: if is_bool_indexer(key): key = np.asarray(key) sortorder = self.sortorder else: # cannot be sure whether the result will be sorted sortorder = None if isinstance(key, Index): key = np.asarray(key) new_labels = [lab[key] for lab in self.labels] return MultiIndex(levels=self.levels, labels=new_labels, names=self.names, sortorder=sortorder, verify_integrity=False) @Appender(_index_shared_docs['take'] % _index_doc_kwargs) def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): nv.validate_take(tuple(), kwargs) indices = _ensure_platform_int(indices) taken = self._assert_take_fillable(self.labels, indices, allow_fill=allow_fill, fill_value=fill_value, na_value=-1) return MultiIndex(levels=self.levels, labels=taken, names=self.names, verify_integrity=False) def _assert_take_fillable(self, values, indices, allow_fill=True, fill_value=None, na_value=None): """ Internal method to handle NA filling of take """ # only fill if we are passing a non-None fill_value if allow_fill and fill_value is not None: if (indices < -1).any(): msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') raise ValueError(msg) taken = [lab.take(indices) for lab in self.labels] mask = indices == -1 if mask.any(): masked = [] for new_label in taken: label_values = new_label.values() label_values[mask] = na_value masked.append(FrozenNDArray(label_values)) taken = masked else: taken = [lab.take(indices) for lab in self.labels] return taken def append(self, other): """ Append a collection of Index options together Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index """ if not isinstance(other, (list, tuple)): other = [other] if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other): arrays = [] for i in range(self.nlevels): label = self._get_level_values(i) appended = [o._get_level_values(i) for o in other] arrays.append(label.append(appended)) return MultiIndex.from_arrays(arrays, names=self.names) to_concat = (self.values, ) + tuple(k._values for k in other) new_tuples = np.concatenate(to_concat) # if all(isinstance(x, MultiIndex) for x in other): try: return MultiIndex.from_tuples(new_tuples, names=self.names) except: return Index(new_tuples) def argsort(self, *args, **kwargs): return self.values.argsort(*args, **kwargs) @deprecate_kwarg(old_arg_name='n', new_arg_name='repeats') def repeat(self, repeats, *args, **kwargs): nv.validate_repeat(args, kwargs) return MultiIndex(levels=self.levels, labels=[label.view(np.ndarray).repeat(repeats) for label in self.labels], names=self.names, sortorder=self.sortorder, verify_integrity=False) def where(self, cond, other=None): raise NotImplementedError(".where is not supported for " "MultiIndex operations") def drop(self, labels, level=None, errors='raise'): """ Make new MultiIndex with passed list of labels deleted Parameters ---------- labels : array-like Must be a list of tuples level : int or level name, default None Returns ------- dropped : MultiIndex """ if level is not None: return self._drop_from_level(labels, level) try: if not isinstance(labels, (np.ndarray, Index)): labels = com._index_labels_to_array(labels) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): if errors != 'ignore': raise ValueError('labels %s not contained in axis' % labels[mask]) indexer = indexer[~mask] except Exception: pass inds = [] for label in labels: try: loc = self.get_loc(label) # get_loc returns either an integer, a slice, or a boolean # mask if isinstance(loc, int): inds.append(loc) elif isinstance(loc, slice): inds.extend(lrange(loc.start, loc.stop)) elif is_bool_indexer(loc): if self.lexsort_depth == 0: warnings.warn('dropping on a non-lexsorted multi-index' ' without a level parameter may impact ' 'performance.', PerformanceWarning, stacklevel=3) loc = loc.nonzero()[0] inds.extend(loc) else: msg = 'unsupported indexer of type {}'.format(type(loc)) raise AssertionError(msg) except KeyError: if errors != 'ignore': raise return self.delete(inds) def _drop_from_level(self, labels, level): labels = com._index_labels_to_array(labels) i = self._get_level_number(level) index = self.levels[i] values = index.get_indexer(labels) mask = ~algos.isin(self.labels[i], values) return self[mask] def droplevel(self, level=0): """ Return Index with requested level removed. If MultiIndex has only 2 levels, the result will be of Index type not MultiIndex. Parameters ---------- level : int/level name or list thereof Notes ----- Does not check if result index is unique or not Returns ------- index : Index or MultiIndex """ levels = level if not isinstance(levels, (tuple, list)): levels = [level] new_levels = list(self.levels) new_labels = list(self.labels) new_names = list(self.names) levnums = sorted(self._get_level_number(lev) for lev in levels)[::-1] for i in levnums: new_levels.pop(i) new_labels.pop(i) new_names.pop(i) if len(new_levels) == 1: # set nan if needed mask = new_labels[0] == -1 result = new_levels[0].take(new_labels[0]) if mask.any(): result = result.putmask(mask, np.nan) result.name = new_names[0] return result else: return MultiIndex(levels=new_levels, labels=new_labels, names=new_names, verify_integrity=False) def swaplevel(self, i=-2, j=-1): """ Swap level i with level j. Do not change the ordering of anything Parameters ---------- i, j : int, string (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- swapped : MultiIndex .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. """ new_levels = list(self.levels) new_labels = list(self.labels) new_names = list(self.names) i = self._get_level_number(i) j = self._get_level_number(j) new_levels[i], new_levels[j] = new_levels[j], new_levels[i] new_labels[i], new_labels[j] = new_labels[j], new_labels[i] new_names[i], new_names[j] = new_names[j], new_names[i] return MultiIndex(levels=new_levels, labels=new_labels, names=new_names, verify_integrity=False) def reorder_levels(self, order): """ Rearrange levels using input order. May not drop or duplicate levels Parameters ---------- """ order = [self._get_level_number(i) for i in order] if len(order) != self.nlevels: raise AssertionError('Length of order must be same as ' 'number of levels (%d), got %d' % (self.nlevels, len(order))) new_levels = [self.levels[i] for i in order] new_labels = [self.labels[i] for i in order] new_names = [self.names[i] for i in order] return MultiIndex(levels=new_levels, labels=new_labels, names=new_names, verify_integrity=False) def __getslice__(self, i, j): return self.__getitem__(slice(i, j)) def _get_labels_for_sorting(self): """ we categorizing our labels by using the available catgories (all, not just observed) excluding any missing ones (-1); this is in preparation for sorting, where we need to disambiguate that -1 is not a valid valid """ from pandas.core.categorical import Categorical def cats(label): return np.arange(np.array(label).max() + 1 if len(label) else 0, dtype=label.dtype) return [Categorical.from_codes(label, cats(label), ordered=True) for label in self.labels] def sortlevel(self, level=0, ascending=True, sort_remaining=True): """ Sort MultiIndex at the requested level. The result will respect the original ordering of the associated factor at that level. Parameters ---------- level : list-like, int or str, default 0 If a string is given, must be a name of the level If list-like must be names or ints of levels. ascending : boolean, default True False to sort in descending order Can also be a list to specify a directed ordering sort_remaining : sort by the remaining levels after level. Returns ------- sorted_index : pd.MultiIndex Resulting index indexer : np.ndarray Indices of output values in original index """ from pandas.core.sorting import indexer_from_factorized if isinstance(level, (compat.string_types, int)): level = [level] level = [self._get_level_number(lev) for lev in level] sortorder = None # we have a directed ordering via ascending if isinstance(ascending, list): if not len(level) == len(ascending): raise ValueError("level must have same length as ascending") from pandas.core.sorting import lexsort_indexer indexer = lexsort_indexer([self.labels[lev] for lev in level], orders=ascending) # level ordering else: labels = list(self.labels) shape = list(self.levshape) # partition labels and shape primary = tuple(labels.pop(lev - i) for i, lev in enumerate(level)) primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level)) if sort_remaining: primary += primary + tuple(labels) primshp += primshp + tuple(shape) else: sortorder = level[0] indexer = indexer_from_factorized(primary, primshp, compress=False) if not ascending: indexer = indexer[::-1] indexer = _ensure_platform_int(indexer) new_labels = [lab.take(indexer) for lab in self.labels] new_index = MultiIndex(labels=new_labels, levels=self.levels, names=self.names, sortorder=sortorder, verify_integrity=False) return new_index, indexer def _convert_listlike_indexer(self, keyarr, kind=None): """ Parameters ---------- keyarr : list-like Indexer to convert. Returns ------- tuple (indexer, keyarr) indexer is an ndarray or None if cannot convert keyarr are tuple-safe keys """ indexer, keyarr = super(MultiIndex, self)._convert_listlike_indexer( keyarr, kind=kind) # are we indexing a specific level if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple): level = 0 _, indexer = self.reindex(keyarr, level=level) # take all if indexer is None: indexer = np.arange(len(self)) check = self.levels[0].get_indexer(keyarr) mask = check == -1 if mask.any(): raise KeyError('%s not in index' % keyarr[mask]) return indexer, keyarr @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): method = missing.clean_reindex_fill_method(method) target = _ensure_index(target) # empty indexer if is_list_like(target) and not len(target): return _ensure_platform_int(np.array([])) if not isinstance(target, MultiIndex): try: target = MultiIndex.from_tuples(target) except (TypeError, ValueError): # let's instead try with a straight Index if method is None: return Index(self.values).get_indexer(target, method=method, limit=limit, tolerance=tolerance) if not self.is_unique: raise Exception('Reindexing only valid with uniquely valued Index ' 'objects') if method == 'pad' or method == 'backfill': if tolerance is not None: raise NotImplementedError("tolerance not implemented yet " 'for MultiIndex') indexer = self._get_fill_indexer(target, method, limit) elif method == 'nearest': raise NotImplementedError("method='nearest' not implemented yet " 'for MultiIndex; see GitHub issue 9365') else: # we may not compare equally because of hashing if we # don't have the same dtypes if self._inferred_type_levels != target._inferred_type_levels: return Index(self.values).get_indexer(target.values) indexer = self._engine.get_indexer(target) return _ensure_platform_int(indexer) @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): return super(MultiIndex, self).get_indexer_non_unique(target) def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ Create index with target's values (move/add/delete values as necessary) Returns ------- new_index : pd.MultiIndex Resulting index indexer : np.ndarray or None Indices of output values in original index """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, 'names') if level is not None: if method is not None: raise TypeError('Fill method not supported if level passed') # GH7774: preserve dtype/tz if target is empty and not an Index. # target may be an iterator target = ibase._ensure_has_len(target) if len(target) == 0 and not isinstance(target, Index): idx = self.levels[level] attrs = idx._get_attributes_dict() attrs.pop('freq', None) # don't preserve freq target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs) else: target = _ensure_index(target) target, indexer, _ = self._join_level(target, level, how='right', return_indexers=True, keep_order=False) else: target = _ensure_index(target) if self.equals(target): indexer = None else: if self.is_unique: indexer = self.get_indexer(target, method=method, limit=limit, tolerance=tolerance) else: raise Exception("cannot handle a non-unique multi-index!") if not isinstance(target, MultiIndex): if indexer is None: target = self elif (indexer >= 0).all(): target = self.take(indexer) else: # hopefully? target = MultiIndex.from_tuples(target) if (preserve_names and target.nlevels == self.nlevels and target.names != self.names): target = target.copy(deep=False) target.names = self.names return target, indexer def get_slice_bound(self, label, side, kind): if not isinstance(label, tuple): label = label, return self._partial_tup_index(label, side=side) def slice_locs(self, start=None, end=None, step=None, kind=None): """ For an ordered MultiIndex, compute the slice locations for input labels. They can be tuples representing partial levels, e.g. for a MultiIndex with 3 levels, you can pass a single value (corresponding to the first level), or a 1-, 2-, or 3-tuple. Parameters ---------- start : label or tuple, default None If None, defaults to the beginning end : label or tuple If None, defaults to the end step : int or None Slice step kind : string, optional, defaults None Returns ------- (start, end) : (int, int) Notes ----- This function assumes that the data is sorted by the first level """ # This function adds nothing to its parent implementation (the magic # happens in get_slice_bound method), but it adds meaningful doc. return super(MultiIndex, self).slice_locs(start, end, step, kind=kind) def _partial_tup_index(self, tup, side='left'): if len(tup) > self.lexsort_depth: raise UnsortedIndexError( 'Key length (%d) was greater than MultiIndex' ' lexsort depth (%d)' % (len(tup), self.lexsort_depth)) n = len(tup) start, end = 0, len(self) zipped = zip(tup, self.levels, self.labels) for k, (lab, lev, labs) in enumerate(zipped): section = labs[start:end] if lab not in lev: if not lev.is_type_compatible(lib.infer_dtype([lab])): raise TypeError('Level type mismatch: %s' % lab) # short circuit loc = lev.searchsorted(lab, side=side) if side == 'right' and loc >= 0: loc -= 1 return start + section.searchsorted(loc, side=side) idx = lev.get_loc(lab) if k < n - 1: end = start + section.searchsorted(idx, side='right') start = start + section.searchsorted(idx, side='left') else: return start + section.searchsorted(idx, side=side) def get_loc(self, key, method=None): """ Get integer location, slice or boolean mask for requested label or tuple. If the key is past the lexsort depth, the return may be a boolean mask array, otherwise it is always a slice or int. Parameters ---------- key : label or tuple method : None Returns ------- loc : int, slice object or boolean mask """ if method is not None: raise NotImplementedError('only the default get_loc method is ' 'currently supported for MultiIndex') def _maybe_to_slice(loc): """convert integer indexer to boolean mask or slice if possible""" if not isinstance(loc, np.ndarray) or loc.dtype != 'int64': return loc loc = lib.maybe_indices_to_slice(loc, len(self)) if isinstance(loc, slice): return loc mask = np.empty(len(self), dtype='bool') mask.fill(False) mask[loc] = True return mask if not isinstance(key, tuple): loc = self._get_level_indexer(key, level=0) return _maybe_to_slice(loc) keylen = len(key) if self.nlevels < keylen: raise KeyError('Key length ({0}) exceeds index depth ({1})' ''.format(keylen, self.nlevels)) if keylen == self.nlevels and self.is_unique: def _maybe_str_to_time_stamp(key, lev): if lev.is_all_dates and not isinstance(key, Timestamp): try: return Timestamp(key, tz=getattr(lev, 'tz', None)) except Exception: pass return key key = _values_from_object(key) key = tuple(map(_maybe_str_to_time_stamp, key, self.levels)) return self._engine.get_loc(key) # -- partial selection or non-unique index # break the key into 2 parts based on the lexsort_depth of the index; # the first part returns a continuous slice of the index; the 2nd part # needs linear search within the slice i = self.lexsort_depth lead_key, follow_key = key[:i], key[i:] start, stop = (self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))) if start == stop: raise KeyError(key) if not follow_key: return slice(start, stop) warnings.warn('indexing past lexsort depth may impact performance.', PerformanceWarning, stacklevel=10) loc = np.arange(start, stop, dtype='int64') for i, k in enumerate(follow_key, len(lead_key)): mask = self.labels[i][loc] == self.levels[i].get_loc(k) if not mask.all(): loc = loc[mask] if not len(loc): raise KeyError(key) return (_maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)) def get_loc_level(self, key, level=0, drop_level=True): """ Get integer location slice for requested label or tuple Parameters ---------- key : label or tuple level : int/level name or list thereof Returns ------- loc : int or slice object """ def maybe_droplevels(indexer, levels, drop_level): if not drop_level: return self[indexer] # kludgearound orig_index = new_index = self[indexer] levels = [self._get_level_number(i) for i in levels] for i in sorted(levels, reverse=True): try: new_index = new_index.droplevel(i) except: # no dropping here return orig_index return new_index if isinstance(level, (tuple, list)): if len(key) != len(level): raise AssertionError('Key for location must have same ' 'length as number of levels') result = None for lev, k in zip(level, key): loc, new_index = self.get_loc_level(k, level=lev) if isinstance(loc, slice): mask = np.zeros(len(self), dtype=bool) mask[loc] = True loc = mask result = loc if result is None else result & loc return result, maybe_droplevels(result, level, drop_level) level = self._get_level_number(level) # kludge for #1796 if isinstance(key, list): key = tuple(key) if isinstance(key, tuple) and level == 0: try: if key in self.levels[0]: indexer = self._get_level_indexer(key, level=level) new_index = maybe_droplevels(indexer, [0], drop_level) return indexer, new_index except TypeError: pass if not any(isinstance(k, slice) for k in key): # partial selection # optionally get indexer to avoid re-calculation def partial_selection(key, indexer=None): if indexer is None: indexer = self.get_loc(key) ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] return indexer, maybe_droplevels(indexer, ilevels, drop_level) if len(key) == self.nlevels: if self.is_unique: # here we have a completely specified key, but are # using some partial string matching here # GH4758 all_dates = [(l.is_all_dates and not isinstance(k, compat.string_types)) for k, l in zip(key, self.levels)] can_index_exactly = any(all_dates) if (any([l.is_all_dates for k, l in zip(key, self.levels)]) and not can_index_exactly): indexer = self.get_loc(key) # we have a multiple selection here if (not isinstance(indexer, slice) or indexer.stop - indexer.start != 1): return partial_selection(key, indexer) key = tuple(self[indexer].tolist()[0]) return (self._engine.get_loc( _values_from_object(key)), None) else: return partial_selection(key) else: return partial_selection(key) else: indexer = None for i, k in enumerate(key): if not isinstance(k, slice): k = self._get_level_indexer(k, level=i) if isinstance(k, slice): # everything if k.start == 0 and k.stop == len(self): k = slice(None, None) else: k_index = k if isinstance(k, slice): if k == slice(None, None): continue else: raise TypeError(key) if indexer is None: indexer = k_index else: # pragma: no cover indexer &= k_index if indexer is None: indexer = slice(None, None) ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] return indexer, maybe_droplevels(indexer, ilevels, drop_level) else: indexer = self._get_level_indexer(key, level=level) return indexer, maybe_droplevels(indexer, [level], drop_level) def _get_level_indexer(self, key, level=0, indexer=None): # return an indexer, boolean array or a slice showing where the key is # in the totality of values # if the indexer is provided, then use this level_index = self.levels[level] labels = self.labels[level] def convert_indexer(start, stop, step, indexer=indexer, labels=labels): # given the inputs and the labels/indexer, compute an indexer set # if we have a provided indexer, then this need not consider # the entire labels set r = np.arange(start, stop, step) if indexer is not None and len(indexer) != len(labels): # we have an indexer which maps the locations in the labels # that we have already selected (and is not an indexer for the # entire set) otherwise this is wasteful so we only need to # examine locations that are in this set the only magic here is # that the result are the mappings to the set that we have # selected from pandas import Series mapper = Series(indexer) indexer = labels.take(_ensure_platform_int(indexer)) result = Series(Index(indexer).isin(r).nonzero()[0]) m = result.map(mapper)._values else: m = np.zeros(len(labels), dtype=bool) m[np.in1d(labels, r, assume_unique=Index(labels).is_unique)] = True return m if isinstance(key, slice): # handle a slice, returnig a slice if we can # otherwise a boolean indexer try: if key.start is not None: start = level_index.get_loc(key.start) else: start = 0 if key.stop is not None: stop = level_index.get_loc(key.stop) else: stop = len(level_index) - 1 step = key.step except KeyError: # we have a partial slice (like looking up a partial date # string) start = stop = level_index.slice_indexer(key.start, key.stop, key.step, kind='loc') step = start.step if isinstance(start, slice) or isinstance(stop, slice): # we have a slice for start and/or stop # a partial date slicer on a DatetimeIndex generates a slice # note that the stop ALREADY includes the stopped point (if # it was a string sliced) return convert_indexer(start.start, stop.stop, step) elif level > 0 or self.lexsort_depth == 0 or step is not None: # need to have like semantics here to right # searching as when we are using a slice # so include the stop+1 (so we include stop) return convert_indexer(start, stop + 1, step) else: # sorted, so can return slice object -> view i = labels.searchsorted(start, side='left') j = labels.searchsorted(stop, side='right') return slice(i, j, step) else: loc = level_index.get_loc(key) if isinstance(loc, slice): return loc elif level > 0 or self.lexsort_depth == 0: return np.array(labels == loc, dtype=bool) i = labels.searchsorted(loc, side='left') j = labels.searchsorted(loc, side='right') return slice(i, j) def get_locs(self, tup): """ Given a tuple of slices/lists/labels/boolean indexer to a level-wise spec produce an indexer to extract those locations Parameters ---------- key : tuple of (slices/list/labels) Returns ------- locs : integer list of locations or boolean indexer suitable for passing to iloc """ # must be lexsorted to at least as many levels true_slices = [i for (i, s) in enumerate(is_true_slices(tup)) if s] if true_slices and true_slices[-1] >= self.lexsort_depth: raise UnsortedIndexError('MultiIndex slicing requires the index ' 'to be lexsorted: slicing on levels {0}, ' 'lexsort depth {1}' .format(true_slices, self.lexsort_depth)) # indexer # this is the list of all values that we want to select n = len(self) indexer = None def _convert_to_indexer(r): # return an indexer if isinstance(r, slice): m = np.zeros(n, dtype=bool) m[r] = True r = m.nonzero()[0] elif is_bool_indexer(r): if len(r) != n: raise ValueError("cannot index with a boolean indexer " "that is not the same length as the " "index") r = r.nonzero()[0] from .numeric import Int64Index return Int64Index(r) def _update_indexer(idxr, indexer=indexer): if indexer is None: indexer = Index(np.arange(n)) if idxr is None: return indexer return indexer & idxr for i, k in enumerate(tup): if is_bool_indexer(k): # a boolean indexer, must be the same length! k = np.asarray(k) indexer = _update_indexer(_convert_to_indexer(k), indexer=indexer) elif is_list_like(k): # a collection of labels to include from this level (these # are or'd) indexers = None for x in k: try: idxrs = _convert_to_indexer( self._get_level_indexer(x, level=i, indexer=indexer)) indexers = (idxrs if indexers is None else indexers | idxrs) except KeyError: # ignore not founds continue if indexers is not None: indexer = _update_indexer(indexers, indexer=indexer) else: from .numeric import Int64Index # no matches we are done return Int64Index([])._values elif is_null_slice(k): # empty slice indexer = _update_indexer(None, indexer=indexer) elif isinstance(k, slice): # a slice, include BOTH of the labels indexer = _update_indexer(_convert_to_indexer( self._get_level_indexer(k, level=i, indexer=indexer)), indexer=indexer) else: # a single label indexer = _update_indexer(_convert_to_indexer( self.get_loc_level(k, level=i, drop_level=False)[0]), indexer=indexer) # empty indexer if indexer is None: return Int64Index([])._values return indexer._values def truncate(self, before=None, after=None): """ Slice index between two labels / tuples, return new MultiIndex Parameters ---------- before : label or tuple, can be partial. Default None None defaults to start after : label or tuple, can be partial. Default None None defaults to end Returns ------- truncated : MultiIndex """ if after and before and after < before: raise ValueError('after < before') i, j = self.levels[0].slice_locs(before, after) left, right = self.slice_locs(before, after) new_levels = list(self.levels) new_levels[0] = new_levels[0][i:j] new_labels = [lab[left:right] for lab in self.labels] new_labels[0] = new_labels[0] - i return MultiIndex(levels=new_levels, labels=new_labels, verify_integrity=False) def equals(self, other): """ Determines if two MultiIndex objects have the same labeling information (the levels themselves do not necessarily have to be the same) See also -------- equal_levels """ if self.is_(other): return True if not isinstance(other, Index): return False if not isinstance(other, MultiIndex): return array_equivalent(self._values, _values_from_object(_ensure_index(other))) if self.nlevels != other.nlevels: return False if len(self) != len(other): return False for i in range(self.nlevels): slabels = self.labels[i] slabels = slabels[slabels != -1] svalues = algos.take_nd(np.asarray(self.levels[i]._values), slabels, allow_fill=False) olabels = other.labels[i] olabels = olabels[olabels != -1] ovalues = algos.take_nd(np.asarray(other.levels[i]._values), olabels, allow_fill=False) # since we use NaT both datetime64 and timedelta64 # we can have a situation where a level is typed say # timedelta64 in self (IOW it has other values than NaT) # but types datetime64 in other (where its all NaT) # but these are equivalent if len(svalues) == 0 and len(ovalues) == 0: continue if not array_equivalent(svalues, ovalues): return False return True def equal_levels(self, other): """ Return True if the levels of both MultiIndex objects are the same """ if self.nlevels != other.nlevels: return False for i in range(self.nlevels): if not self.levels[i].equals(other.levels[i]): return False return True def union(self, other): """ Form the union of two MultiIndex objects, sorting if possible Parameters ---------- other : MultiIndex or array / Index of tuples Returns ------- Index >>> index.union(index2) """ self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0 or self.equals(other): return self uniq_tuples = lib.fast_unique_multiple([self._values, other._values]) return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names) def intersection(self, other): """ Form the intersection of two MultiIndex objects, sorting if possible Parameters ---------- other : MultiIndex or array / Index of tuples Returns ------- Index """ self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if self.equals(other): return self self_tuples = self._values other_tuples = other._values uniq_tuples = sorted(set(self_tuples) & set(other_tuples)) if len(uniq_tuples) == 0: return MultiIndex(levels=[[]] * self.nlevels, labels=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names) def difference(self, other): """ Compute sorted set difference of two MultiIndex objects Returns ------- diff : MultiIndex """ self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0: return self if self.equals(other): return MultiIndex(levels=[[]] * self.nlevels, labels=[[]] * self.nlevels, names=result_names, verify_integrity=False) difference = sorted(set(self._values) - set(other._values)) if len(difference) == 0: return MultiIndex(levels=[[]] * self.nlevels, labels=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_tuples(difference, sortorder=0, names=result_names) @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): if not is_object_dtype(np.dtype(dtype)): raise TypeError('Setting %s dtype to anything other than object ' 'is not supported' % self.__class__) elif copy is True: return self._shallow_copy() return self def _convert_can_do_setop(self, other): result_names = self.names if not hasattr(other, 'names'): if len(other) == 0: other = MultiIndex(levels=[[]] * self.nlevels, labels=[[]] * self.nlevels, verify_integrity=False) else: msg = 'other must be a MultiIndex or a list of tuples' try: other = MultiIndex.from_tuples(other) except: raise TypeError(msg) else: result_names = self.names if self.names == other.names else None return other, result_names def insert(self, loc, item): """ Make new MultiIndex inserting new item at location Parameters ---------- loc : int item : tuple Must be same length as number of levels in the MultiIndex Returns ------- new_index : Index """ # Pad the key with empty strings if lower levels of the key # aren't specified: if not isinstance(item, tuple): item = (item, ) + ('', ) * (self.nlevels - 1) elif len(item) != self.nlevels: raise ValueError('Item must have length equal to number of ' 'levels.') new_levels = [] new_labels = [] for k, level, labels in zip(item, self.levels, self.labels): if k not in level: # have to insert into level # must insert at end otherwise you have to recompute all the # other labels lev_loc = len(level) level = level.insert(lev_loc, k) else: lev_loc = level.get_loc(k) new_levels.append(level) new_labels.append(np.insert(_ensure_int64(labels), loc, lev_loc)) return MultiIndex(levels=new_levels, labels=new_labels, names=self.names, verify_integrity=False) def delete(self, loc): """ Make new index with passed location deleted Returns ------- new_index : MultiIndex """ new_labels = [np.delete(lab, loc) for lab in self.labels] return MultiIndex(levels=self.levels, labels=new_labels, names=self.names, verify_integrity=False) get_major_bounds = slice_locs __bounds = None @property def _bounds(self): """ Return or compute and return slice points for level 0, assuming sortedness """ if self.__bounds is None: inds = np.arange(len(self.levels[0])) self.__bounds = self.labels[0].searchsorted(inds) return self.__bounds def _wrap_joined_index(self, joined, other): names = self.names if self.names == other.names else None return MultiIndex.from_tuples(joined, names=names) @Appender(Index.isin.__doc__) def isin(self, values, level=None): if level is None: values = MultiIndex.from_tuples(values, names=self.names).values return algos.isin(self.values, values) else: num = self._get_level_number(level) levs = self.levels[num] labs = self.labels[num] sought_labels = levs.isin(values).nonzero()[0] if levs.size == 0: return np.zeros(len(labs), dtype=np.bool_) else: return np.lib.arraysetops.in1d(labs, sought_labels) MultiIndex._add_numeric_methods_disabled() MultiIndex._add_numeric_methods_add_sub_disabled() MultiIndex._add_logical_methods_disabled() def _sparsify(label_list, start=0, sentinel=''): pivoted = lzip(*label_list) k = len(label_list) result = pivoted[:start + 1] prev = pivoted[start] for cur in pivoted[start + 1:]: sparse_cur = [] for i, (p, t) in enumerate(zip(prev, cur)): if i == k - 1: sparse_cur.append(t) result.append(sparse_cur) break if p == t: sparse_cur.append(sentinel) else: sparse_cur.extend(cur[i:]) result.append(sparse_cur) break prev = cur return lzip(*result) def _get_na_rep(dtype): return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype, 'NaN')
codeparrot/github-code-clean
""" libguestfs tools test utility functions. """ import logging import signal import os import re import aexpect from avocado.utils import path from avocado.utils import process from . import propcan class LibguestfsCmdError(Exception): """ Error of libguestfs-tool command. """ def __init__(self, details=''): self.details = details Exception.__init__(self) def __str__(self): return str(self.details) def lgf_cmd_check(cmd): """ To check whether the cmd is supported on this host. :param cmd: the cmd to use a libguest tool. :return: None if the cmd is not exist, otherwise return its path. """ libguestfs_cmds = ['libguestfs-test-tool', 'guestfish', 'guestmount', 'virt-alignment-scan', 'virt-cat', 'virt-copy-in', 'virt-copy-out', 'virt-df', 'virt-edit', 'virt-filesystems', 'virt-format', 'virt-inspector', 'virt-list-filesystems', 'virt-list-partitions', 'virt-ls', 'virt-make-fs', 'virt-rescue', 'virt-resize', 'virt-sparsify', 'virt-sysprep', 'virt-tar', 'virt-tar-in', 'virt-tar-out', 'virt-win-reg', 'virt-inspector2'] if cmd not in libguestfs_cmds: raise LibguestfsCmdError( "Command %s is not supported by libguestfs yet." % cmd) try: return path.find_command(cmd) except path.CmdNotFoundError: logging.warning("You have not installed %s on this host.", cmd) return None def lgf_command(cmd, ignore_status=True, debug=False, timeout=60): """ Interface of libguestfs tools' commands. :param cmd: Command line to execute. :return: CmdResult object. :raise: LibguestfsCmdError if non-zero exit status and ignore_status=False """ if debug: logging.debug("Running command %s in debug mode.", cmd) # Raise exception if ignore_status is False try: ret = process.run(cmd, ignore_status=ignore_status, verbose=debug, timeout=timeout) except process.CmdError as detail: raise LibguestfsCmdError(detail) if debug: logging.debug("status: %s", ret.exit_status) logging.debug("stdout: %s", ret.stdout_text.strip()) logging.debug("stderr: %s", ret.stderr_text.strip()) # Return CmdResult instance when ignore_status is True return ret class LibguestfsBase(propcan.PropCanBase): """ Base class of libguestfs tools. """ __slots__ = ['ignore_status', 'debug', 'timeout', 'uri', 'lgf_exec'] def __init__(self, lgf_exec="/bin/true", ignore_status=True, debug=False, timeout=60, uri=None): init_dict = {} init_dict['ignore_status'] = ignore_status init_dict['debug'] = debug init_dict['timeout'] = timeout init_dict['uri'] = uri init_dict['lgf_exec'] = lgf_exec super(LibguestfsBase, self).__init__(init_dict) def set_ignore_status(self, ignore_status): """ Enforce setting ignore_status as a boolean. """ if bool(ignore_status): self.__dict_set__('ignore_status', True) else: self.__dict_set__('ignore_status', False) def set_debug(self, debug): """ Accessor method for 'debug' property that logs message on change """ if not self.INITIALIZED: self.__dict_set__('debug', debug) else: current_setting = self.__dict_get__('debug') desired_setting = bool(debug) if not current_setting and desired_setting: self.__dict_set__('debug', True) logging.debug("Libguestfs debugging enabled") # current and desired could both be True if current_setting and not desired_setting: self.__dict_set__('debug', False) logging.debug("Libguestfs debugging disabled") def set_timeout(self, timeout): """ Accessor method for 'timeout' property, timeout should be digit """ if type(timeout) is int: self.__dict_set__('timeout', timeout) else: try: timeout = int(str(timeout)) self.__dict_set__('timeout', timeout) except ValueError: logging.debug("Set timeout failed.") def get_uri(self): """ Accessor method for 'uri' property that must exist """ # self.get() would call get_uri() recursivly try: return self.__dict_get__('uri') except KeyError: return None # There are two ways to call guestfish: # 1.Guestfish classies provided below(shell session) # 2.guestfs module provided in system libguestfs package class Guestfish(LibguestfsBase): """ Execute guestfish, using a new guestfish shell each time. """ __slots__ = [] def __init__(self, disk_img=None, ro_mode=False, libvirt_domain=None, inspector=False, uri=None, mount_options=None, run_mode="interactive"): """ Initialize guestfish command with options. :param disk_img: if it is not None, use option '-a disk'. :param ro_mode: only for disk_img. add option '--ro' if it is True. :param libvirt_domain: if it is not None, use option '-d domain'. :param inspector: guestfish mounts vm's disks automatically :param uri: guestfish's connect uri :param mount_options: Mount the named partition or logical volume on the given mountpoint. """ guestfs_exec = "guestfish" if lgf_cmd_check(guestfs_exec) is None: raise LibguestfsCmdError if run_mode not in ['remote', 'interactive']: raise AssertionError("run_mode should be remote or interactive") # unset GUESTFISH_XXX environment parameters # to avoid color of guestfish shell session for testing color_envs = ["GUESTFISH_PS1", "GUESTFISH_OUTPUT", "GUESTFISH_RESTORE", "GUESTFISH_INIT"] unset_cmd = "" for env in color_envs: unset_cmd += "unset %s;" % env if run_mode == "interactive" and unset_cmd: guestfs_exec = unset_cmd + " " + guestfs_exec if run_mode == "remote": guestfs_exec += " --listen" else: if uri: guestfs_exec += " -c '%s'" % uri if disk_img: guestfs_exec += " -a '%s'" % disk_img if libvirt_domain: guestfs_exec += " -d '%s'" % libvirt_domain if ro_mode: guestfs_exec += " --ro" if inspector: guestfs_exec += " -i" if mount_options is not None: guestfs_exec += " --mount %s" % mount_options super(Guestfish, self).__init__(guestfs_exec) def complete_cmd(self, command): """ Execute built-in command in a complete guestfish command (Not a guestfish session). command: guestfish [--options] [commands] """ guestfs_exec = self.__dict_get__('lgf_exec') ignore_status = self.__dict_get__('ignore_status') debug = self.__dict_get__('debug') timeout = self.__dict_get__('timeout') if command: guestfs_exec += " %s" % command return lgf_command(guestfs_exec, ignore_status, debug, timeout) else: raise LibguestfsCmdError("No built-in command was passed.") class GuestfishSession(aexpect.ShellSession): """ A shell session of guestfish. """ # Check output against list of known error-status strings ERROR_REGEX_LIST = ['libguestfs: error:\s*'] def __init__(self, guestfs_exec=None, a_id=None, prompt=r"><fs>\s*"): """ Initialize guestfish session server, or client if id set. :param guestfs_cmd: path to guestfish executable :param id: ID of an already running server, if accessing a running server, or None if starting a new one. :param prompt: Regular expression describing the shell's prompt line. """ # aexpect tries to auto close session because no clients connected yet super(GuestfishSession, self).__init__(guestfs_exec, a_id, prompt=prompt, auto_close=False) def cmd_status_output(self, cmd, timeout=60, internal_timeout=None, print_func=None): """ Send a guestfish command and return its exit status and output. :param cmd: guestfish command to send (must not contain newline characters) :param timeout: The duration (in seconds) to wait for the prompt to return :param internal_timeout: The timeout to pass to read_nonblocking :param print_func: A function to be used to print the data being read (should take a string parameter) :return: A tuple (status, output) where status is the exit status and output is the output of cmd :raise ShellTimeoutError: Raised if timeout expires :raise ShellProcessTerminatedError: Raised if the shell process terminates while waiting for output :raise ShellStatusError: Raised if the exit status cannot be obtained :raise ShellError: Raised if an unknown error occurs """ out = self.cmd_output(cmd, timeout, internal_timeout, print_func) for line in out.splitlines(): if self.match_patterns(line, self.ERROR_REGEX_LIST) is not None: return 1, out return 0, out def cmd_result(self, cmd, ignore_status=False): """Mimic process.run()""" exit_status, stdout = self.cmd_status_output(cmd) stderr = '' # no way to retrieve this separately result = process.CmdResult(cmd, stdout, stderr, exit_status) if not ignore_status and exit_status: raise process.CmdError(cmd, result, "Guestfish Command returned non-zero exit status") return result class GuestfishRemote(object): """ Remote control of guestfish. """ # Check output against list of known error-status strings ERROR_REGEX_LIST = ['libguestfs: error:\s*'] def __init__(self, guestfs_exec=None, a_id=None): """ Initialize guestfish session server, or client if id set. :param guestfs_cmd: path to guestfish executable :param a_id: guestfish remote id """ if a_id is None: try: ret = process.run(guestfs_exec, ignore_status=False, verbose=True, timeout=60) except process.CmdError as detail: raise LibguestfsCmdError(detail) self.a_id = re.search(b"\d+", ret.stdout.strip()).group() else: self.a_id = a_id def get_id(self): return self.a_id def cmd_status_output(self, cmd, ignore_status=None, verbose=None, timeout=60): """ Send a guestfish command and return its exit status and output. :param cmd: guestfish command to send(must not contain newline characters) :param timeout: The duration (in seconds) to wait for the prompt to return :return: A tuple (status, output) where status is the exit status and output is the output of cmd :raise LibguestfsCmdError: Raised if commands execute failed """ guestfs_exec = "guestfish --remote=%s " % self.a_id cmd = guestfs_exec + cmd try: ret = process.run(cmd, ignore_status=ignore_status, verbose=verbose, timeout=timeout) except process.CmdError as detail: raise LibguestfsCmdError(detail) for line in self.ERROR_REGEX_LIST: if re.search(line, ret.stdout_text.strip()): e_msg = ('Error pattern %s found on output of %s: %s' % (line, cmd, ret.stdout_text.strip())) raise LibguestfsCmdError(e_msg) logging.debug("command: %s", cmd) logging.debug("stdout: %s", ret.stdout_text.strip()) return 0, ret.stdout_text.strip() def cmd(self, cmd, ignore_status=False): """Mimic process.run()""" exit_status, stdout = self.cmd_status_output(cmd) stderr = '' # no way to retrieve this separately result = process.CmdResult(cmd, stdout, stderr, exit_status) if not ignore_status and exit_status: raise process.CmdError(cmd, result, "Guestfish Command returned non-zero exit status") return result def cmd_result(self, cmd, ignore_status=False): """Mimic process.run()""" exit_status, stdout = self.cmd_status_output(cmd) stderr = '' # no way to retrieve this separately result = process.CmdResult(cmd, stdout, stderr, exit_status) if not ignore_status and exit_status: raise process.CmdError(cmd, result, "Guestfish Command returned non-zero exit status") return result class GuestfishPersistent(Guestfish): """ Execute operations using persistent guestfish session. """ __slots__ = ['session_id', 'run_mode'] # Help detect leftover sessions SESSION_COUNTER = 0 def __init__(self, disk_img=None, ro_mode=False, libvirt_domain=None, inspector=False, uri=None, mount_options=None, run_mode="interactive"): super(GuestfishPersistent, self).__init__(disk_img, ro_mode, libvirt_domain, inspector, uri, mount_options, run_mode) self.__dict_set__('run_mode', run_mode) if self.get('session_id') is None: # set_uri does not call when INITIALIZED = False # and no session_id passed to super __init__ self.new_session() # Check whether guestfish session is prepared. guestfs_session = self.open_session() if run_mode != "remote": status, output = guestfs_session.cmd_status_output( 'is-config', timeout=60) if status != 0: logging.debug( "Persistent guestfish session is not responding.") raise aexpect.ShellStatusError(self.lgf_exec, 'is-config') def close_session(self): """ If a persistent session exists, close it down. """ try: run_mode = self.get('run_mode') existing = self.open_session() # except clause exits function # Try to end session with inner command 'quit' try: existing.cmd("quit") # It should jump to exception followed normally except aexpect.ShellProcessTerminatedError: self.__class__.SESSION_COUNTER -= 1 self.__dict_del__('session_id') return # guestfish session was closed normally # Close with 'quit' did not respond # So close with aexpect functions if run_mode != "remote": if existing.is_alive(): # try nicely first existing.close() if existing.is_alive(): # Be mean, incase it's hung existing.close(sig=signal.SIGTERM) # Keep count: self.__class__.SESSION_COUNTER -= 1 self.__dict_del__('session_id') except LibguestfsCmdError: # Allow other exceptions to be raised pass # session was closed already def new_session(self): """ Open new session, closing any existing """ # Accessors may call this method, avoid recursion # Must exist, can't be None guestfs_exec = self.__dict_get__('lgf_exec') self.close_session() # Always create new session run_mode = self.get('run_mode') if run_mode == "remote": new_session = GuestfishRemote(guestfs_exec) else: new_session = GuestfishSession(guestfs_exec) # Keep count self.__class__.SESSION_COUNTER += 1 session_id = new_session.get_id() self.__dict_set__('session_id', session_id) def open_session(self): """ Return session with session_id in this class. """ try: session_id = self.__dict_get__('session_id') run_mode = self.get('run_mode') if session_id: try: if run_mode == "remote": return GuestfishRemote(a_id=session_id) else: return GuestfishSession(a_id=session_id) except aexpect.ShellStatusError: # session was already closed self.__dict_del__('session_id') raise LibguestfsCmdError( "Open session '%s' failed." % session_id) except KeyError: raise LibguestfsCmdError("No session id.") # Inner command for guestfish should be executed in a guestfish session def inner_cmd(self, command): """ Execute inner command of guestfish in a pesistent session. :param command: inner command to be executed. """ session = self.open_session() # Allow to raise error by default. ignore_status = self.__dict_get__('ignore_status') return session.cmd_result(command, ignore_status=ignore_status) def add_drive(self, filename): """ add-drive - add an image to examine or modify This function is the equivalent of calling "add_drive_opts" with no optional parameters, so the disk is added writable, with the format being detected automatically. """ return self.inner_cmd("add-drive %s" % filename) def add_drive_opts(self, filename, readonly=False, format=None, iface=None, name=None, label=None, protocol=None, server=None, username=None, secret=None, cachemode=None, discard=None, copyonread=False): """ add-drive-opts - add an image to examine or modify. This function adds a disk image called "filename" to the handle. "filename" may be a regular host file or a host device. """ cmd = "add-drive-opts %s" % filename if readonly: cmd += " readonly:true" else: cmd += " readonly:false" if format: cmd += " format:%s" % format if iface: cmd += " iface:%s" % iface if name: cmd += " name:%s" % name if label: cmd += " label:%s" % label if protocol: cmd += " protocol:%s" % protocol if server: cmd += " server:%s" % server if username: cmd += " username:%s" % username if secret: cmd += " secret:%s" % secret if cachemode: cmd += " cachemode:%s" % cachemode if discard: cmd += " discard:%s" % discard if copyonread: cmd += " copyonread:true" else: # The default is false for copyonread. # If copyonread param is false, # It's no need to set " copyonread:false" explicitly. pass return self.inner_cmd(cmd) def add_drive_ro(self, filename): """ add-ro/add-drive-ro - add a drive in snapshot mode (read-only) This function is the equivalent of calling "add_drive_opts" with the optional parameter "GUESTFS_ADD_DRIVE_OPTS_READONLY" set to 1, so the disk is added read-only, with the format being detected automatically. """ return self.inner_cmd("add-drive-ro %s" % filename) def add_domain(self, domain, libvirturi=None, readonly=False, iface=None, live=False, allowuuid=False, readonlydisk=None): """ domain/add-domain - add the disk(s) from a named libvirt domain This function adds the disk(s) attached to the named libvirt domain "dom". It works by connecting to libvirt, requesting the domain and domain XML from libvirt, parsing it for disks, and calling "add_drive_opts" on each one. """ cmd = "add-domain %s" % domain if libvirturi: cmd += " libvirturi:%s" % libvirturi if readonly: cmd += " readonly:true" else: cmd += " readonly:false" if iface: cmd += " iface:%s" % iface if live: cmd += " live:true" if allowuuid: cmd += " allowuuid:true" if readonlydisk: cmd += " readonlydisk:%s" % readonlydisk return self.inner_cmd(cmd) def run(self): """ run/launch - launch the qemu subprocess Internally libguestfs is implemented by running a virtual machine using qemu. """ return self.inner_cmd("launch") def df(self): """ df - report file system disk space usage This command runs the "df" command to report disk space used. """ return self.inner_cmd("df") def df_h(self): """ df-h - report file system disk space usage (human readable) This command runs the "df -h" command to report disk space used in human-readable format. """ return self.inner_cmd("df-h") def dd(self, src, dest): """ dd - copy from source to destination using dd This command copies from one source device or file "src" to another destination device or file "dest".Normally you would use this to copy to or from a device or partition,for example to duplicate a filesystem """ return self.inner_cmd("dd %s %s" % (src, dest)) def copy_size(self, src, dest, size): """ copy-size - copy size bytes from source to destination using dd This command copies exactly "size" bytes from one source device or file "src" to another destination device or file "dest". """ return self.inner_cmd("copy-size %s %s %s" % (src, dest, size)) def list_partitions(self): """ list-partitions - list the partitions List all the partitions detected on all block devices. """ return self.inner_cmd("list-partitions") def mount(self, device, mountpoint): """ mount - mount a guest disk at a position in the filesystem Mount a guest disk at a position in the filesystem. """ return self.inner_cmd("mount %s %s" % (device, mountpoint)) def mount_ro(self, device, mountpoint): """ mount-ro - mount a guest disk, read-only This is the same as the "mount" command, but it mounts the filesystem with the read-only (*-o ro*) flag. """ return self.inner_cmd("mount-ro %s %s" % (device, mountpoint)) def mount_options(self, options, device, mountpoint): """ mount - mount a guest disk at a position in the filesystem Mount a guest disk at a position in the filesystem. """ return self.inner_cmd("mount-options %s %s %s" % (options, device, mountpoint)) def mounts(self): """ mounts - show mounted filesystems This returns the list of currently mounted filesystems. """ return self.inner_cmd("mounts") def mountpoints(self): """ mountpoints - show mountpoints This call is similar to "mounts". That call returns a list of devices. """ return self.inner_cmd("mountpoints") def do_mount(self, mountpoint): """ do_mount - Automaticly mount Mount a lvm or physical partation to '/' """ partition_type = self.params.get("partition_type") if partition_type == "lvm": vg_name = self.params.get("vg_name", "vol_test") lv_name = self.params.get("lv_name", "vol_file") device = "/dev/%s/%s" % (vg_name, lv_name) logging.info("mount lvm partition...%s" % device) elif partition_type == "physical": pv_name = self.params.get("pv_name", "/dev/sdb") device = pv_name + "1" logging.info("mount physical partition...%s" % device) self.mount(device, mountpoint) def read_file(self, path): """ read-file - read a file This calls returns the contents of the file "path" as a buffer. """ return self.inner_cmd("read-file %s" % path) def cat(self, path): """ cat - list the contents of a file Return the contents of the file named "path". """ return self.inner_cmd("cat %s" % path) def write(self, path, content): """ write - create a new file This call creates a file called "path". The content of the file is the string "content" (which can contain any 8 bit data). """ return self.inner_cmd("write '%s' \"%s\"" % (path, content)) def write_append(self, path, content): """ write-append - append content to end of file This call appends "content" to the end of file "path". If "path" does not exist, then a new file is created. """ return self.inner_cmd("write-append '%s' \"%s\"" % (path, content)) def inspect_os(self): """ inspect-os - inspect disk and return list of operating systems found This function uses other libguestfs functions and certain heuristics to inspect the disk(s) (usually disks belonging to a virtual machine), looking for operating systems. """ return self.inner_cmd("inspect-os") def inspect_get_roots(self): """ inspect-get-roots - return list of operating systems found by last inspection This function is a convenient way to get the list of root devices """ return self.inner_cmd("inspect-get-roots") def inspect_get_arch(self, root): """ inspect-get-arch - get architecture of inspected operating system This returns the architecture of the inspected operating system. """ return self.inner_cmd("inspect-get-arch %s" % root) def inspect_get_distro(self, root): """ inspect-get-distro - get distro of inspected operating system This returns the distro (distribution) of the inspected operating system. """ return self.inner_cmd("inspect-get-distro %s" % root) def inspect_get_filesystems(self, root): """ inspect-get-filesystems - get filesystems associated with inspected operating system This returns a list of all the filesystems that we think are associated with this operating system. """ return self.inner_cmd("inspect-get-filesystems %s" % root) def inspect_get_hostname(self, root): """ inspect-get-hostname - get hostname of the operating system This function returns the hostname of the operating system as found by inspection of the guest's configuration files. """ return self.inner_cmd("inspect-get-hostname %s" % root) def inspect_get_major_version(self, root): """ inspect-get-major-version - get major version of inspected operating system This returns the major version number of the inspected operating system. """ return self.inner_cmd("inspect-get-major-version %s" % root) def inspect_get_minor_version(self, root): """ inspect-get-minor-version - get minor version of inspected operating system This returns the minor version number of the inspected operating system """ return self.inner_cmd("inspect-get-minor-version %s" % root) def inspect_get_mountpoints(self, root): """ inspect-get-mountpoints - get mountpoints of inspected operating system This returns a hash of where we think the filesystems associated with this operating system should be mounted. """ return self.inner_cmd("inspect-get-mountpoints %s" % root) def list_filesystems(self): """ list-filesystems - list filesystems This inspection command looks for filesystems on partitions, block devices and logical volumes, returning a list of devices containing filesystems and their type. """ return self.inner_cmd("list-filesystems") def list_devices(self): """ list-devices - list the block devices List all the block devices. """ return self.inner_cmd("list-devices") def tar_out(self, directory, tarfile): """ tar-out - pack directory into tarfile This command packs the contents of "directory" and downloads it to local file "tarfile". """ return self.inner_cmd("tar-out %s %s" % (directory, tarfile)) def tar_in(self, tarfile, directory): """ tar-in - unpack tarfile to directory This command uploads and unpacks local file "tarfile" (an *uncompressed* tar file) into "directory". """ return self.inner_cmd("tar-in %s %s" % (tarfile, directory)) def tar_in_opts(self, tarfile, directory, compress=None): """ tar-in-opts - unpack tarfile to directory This command uploads and unpacks local file "tarfile" (an *compressed* tar file) into "directory". """ if compress: return self.inner_cmd("tar-in-opts %s %s compress:%s" % (tarfile, directory, compress)) else: return self.inner_cmd("tar-in-opts %s %s" % (tarfile, directory)) def file_architecture(self, filename): """ file-architecture - detect the architecture of a binary file This detects the architecture of the binary "filename", and returns it if known. """ return self.inner_cmd("file-architecture %s" % filename) def filesize(self, file): """ filesize - return the size of the file in bytes This command returns the size of "file" in bytes. """ return self.inner_cmd("filesize %s" % file) def stat(self, path): """ stat - get file information Returns file information for the given "path". """ return self.inner_cmd("stat %s" % path) def lstat(self, path): """ lstat - get file information for a symbolic link Returns file information for the given "path". """ return self.inner_cmd("lstat %s" % path) def lstatlist(self, path, names): """ lstatlist - lstat on multiple files This call allows you to perform the "lstat" operation on multiple files, where all files are in the directory "path". "names" is the list of files from this directory. """ return self.inner_cmd("lstatlist %s %s" % (path, names)) def umask(self, mask): """ umask - set file mode creation mask (umask) This function sets the mask used for creating new files and device nodes to "mask & 0777". """ return self.inner_cmd("umask %s" % mask) def get_umask(self): """ get-umask - get the current umask Return the current umask. By default the umask is 022 unless it has been set by calling "umask". """ return self.inner_cmd("get-umask") def mkdir(self, path): """ mkdir - create a directory Create a directory named "path". """ return self.inner_cmd("mkdir %s" % path) def mkdir_p(self, path): """ mkdir-p - create a directory and parents Create a directory named "path", creating any parent directories as necessary. This is like the "mkdir -p" shell command. """ return self.inner_cmd("mkdir-p %s" % path) def mkdir_mode(self, path, mode): """ mkdir-mode - create a directory with a particular mode This command creates a directory, setting the initial permissions of the directory to "mode". """ return self.inner_cmd("mkdir-mode %s %s" % (path, mode)) def mknod(self, mode, devmajor, devminor, path): """ mknod - make block, character or FIFO devices This call creates block or character special devices, or named pipes (FIFOs). """ return self.inner_cmd("mknod %s %s %s %s" % (mode, devmajor, devminor, path)) def rm_rf(self, path): """ rm-rf - remove a file or directory recursively Remove the file or directory "path", recursively removing the contents if its a directory. This is like the "rm -rf" shell command. """ return self.inner_cmd("rm-rf %s" % path) def copy_out(self, remote, localdir): """ copy-out - copy remote files or directories out of an image "copy-out" copies remote files or directories recursively out of the disk image, placing them on the host disk in a local directory called "localdir" (which must exist). """ return self.inner_cmd("copy-out %s %s" % (remote, localdir)) def copy_in(self, local, remotedir): """ copy-in - copy local files or directories into an image "copy-in" copies local files or directories recursively into the disk image, placing them in the directory called "/remotedir" (which must exist). """ return self.inner_cmd("copy-in %s /%s" % (local, remotedir)) def chmod(self, mode, path): """ chmod - change file mode Change the mode (permissions) of "path" to "mode". Only numeric modes are supported. """ return self.inner_cmd("chmod %s %s" % (mode, path)) def chown(self, owner, group, path): """ chown - change file owner and group Change the file owner to "owner" and group to "group". """ return self.inner_cmd("chown %s %s %s" % (owner, group, path)) def lchown(self, owner, group, path): """ lchown - change file owner and group Change the file owner to "owner" and group to "group". This is like "chown" but if "path" is a symlink then the link itself is changed, not the target. """ return self.inner_cmd("lchown %s %s %s" % (owner, group, path)) def du(self, path): """ du - estimate file space usage This command runs the "du -s" command to estimate file space usage for "path". """ return self.inner_cmd("du %s" % path) def file(self, path): """ file - determine file type This call uses the standard file(1) command to determine the type or contents of the file. """ return self.inner_cmd("file %s" % path) def rm(self, path): """ rm - remove a file Remove the single file "path". """ return self.inner_cmd("rm %s" % path) def is_file(self, path, followsymlinks=None): """ is-file - test if a regular file This returns "true" if and only if there is a regular file with the given "path" name. """ cmd = "is-file %s" % path if followsymlinks: cmd += " followsymlinks:%s" % followsymlinks return self.inner_cmd(cmd) def is_file_opts(self, path, followsymlinks=None): """ is-file_opts - test if a regular file This returns "true" if and only if there is a regular file with the given "path" name. An alias of command is-file """ cmd = "is-file-opts %s" % path if followsymlinks: cmd += " followsymlinks:%s" % followsymlinks return self.inner_cmd(cmd) def is_blockdev(self, path, followsymlinks=None): """ is-blockdev - test if block device This returns "true" if and only if there is a block device with the given "path" name """ cmd = "is-blockdev %s" % path if followsymlinks: cmd += " followsymlinks:%s" % followsymlinks return self.inner_cmd(cmd) def is_blockdev_opts(self, path, followsymlinks=None): """ is-blockdev_opts - test if block device This returns "true" if and only if there is a block device with the given "path" name An alias of command is-blockdev """ cmd = "is-blockdev-opts %s" % path if followsymlinks: cmd += " followsymlinks:%s" % followsymlinks return self.inner_cmd(cmd) def is_chardev(self, path, followsymlinks=None): """ is-chardev - test if character device This returns "true" if and only if there is a character device with the given "path" name. """ cmd = "is-chardev %s" % path if followsymlinks: cmd += " followsymlinks:%s" % followsymlinks return self.inner_cmd(cmd) def is_chardev_opts(self, path, followsymlinks=None): """ is-chardev_opts - test if character device This returns "true" if and only if there is a character device with the given "path" name. An alias of command is-chardev """ cmd = "is-chardev-opts %s" % path if followsymlinks: cmd += " followsymlinks:%s" % followsymlinks return self.inner_cmd(cmd) def is_dir(self, path, followsymlinks=None): """ is-dir - test if a directory This returns "true" if and only if there is a directory with the given "path" name. Note that it returns false for other objects like files. """ cmd = "is-dir %s" % path if followsymlinks: cmd += " followsymlinks:%s" % followsymlinks return self.inner_cmd(cmd) def is_dir_opts(self, path, followsymlinks=None): """ is-dir-opts - test if character device This returns "true" if and only if there is a character device with the given "path" name. An alias of command is-dir """ cmd = "is-dir-opts %s" % path if followsymlinks: cmd += " followsymlinks:%s" % followsymlinks return self.inner_cmd(cmd) def is_fifo(self, path, followsymlinks=None): """ is-fifo - test if FIFO (named pipe) This returns "true" if and only if there is a FIFO (named pipe) with the given "path" name. """ cmd = "is-fifo %s" % path if followsymlinks: cmd += " followsymlinks:%s" % followsymlinks return self.inner_cmd(cmd) def is_fifo_opts(self, path, followsymlinks=None): """ is-fifo-opts - test if FIFO (named pipe) This returns "true" if and only if there is a FIFO (named pipe) with the given "path" name. An alias of command is-fifo """ cmd = "is-fifo-opts %s" % path if followsymlinks: cmd += " followsymlinks:%s" % followsymlinks return self.inner_cmd(cmd) def is_lv(self, device): """ is-lv - test if device is a logical volume This command tests whether "device" is a logical volume, and returns true iff this is the case. """ return self.inner_cmd("is-lv %s" % device) def is_socket(self, path, followsymlinks=None): """ is-socket - test if socket This returns "true" if and only if there is a Unix domain socket with the given "path" name. """ cmd = "is-socket %s" % path if followsymlinks: cmd += " followsymlinks:%s" % followsymlinks return self.inner_cmd(cmd) def is_socket_opts(self, path, followsymlinks=None): """ is-socket-opts - test if socket This returns "true" if and only if there is a Unix domain socket with the given "path" name. An alias of command is-socket """ cmd = "is-socket-opts %s" % path if followsymlinks: cmd += " followsymlinks:%s" % followsymlinks return self.inner_cmd(cmd) def is_symlink(self, path): """ is-symlink - test if symbolic link This returns "true" if and only if there is a symbolic link with the given "path" name. """ return self.inner_cmd("is-symlink %s" % path) def is_whole_device(self, device): """ is_whole_device - test if a device is a whole device This returns "true" if and only if "device" refers to a whole block device. That is, not a partition or a logical device. """ return self.inner_cmd("is-whole-device %s" % device) def is_zero(self, path): """ is-zero - test if a file contains all zero bytes This returns true iff the file exists and the file is empty or it contains all zero bytes. """ return self.inner_cmd("is-zero %s" % path) def is_zero_device(self, device): """ is-zero-device - test if a device contains all zero bytes This returns true iff the device exists and contains all zero bytes. Note that for large devices this can take a long time to run. """ return self.inner_cmd("is-zero-device %s" % device) def cp(self, src, dest): """ cp - copy a file This copies a file from "src" to "dest" where "dest" is either a destination filename or destination directory. """ return self.inner_cmd("cp %s %s" % (src, dest)) def exists(self, path): """ exists - test if file or directory exists This returns "true" if and only if there is a file, directory (or anything) with the given "path" name """ return self.inner_cmd("exists %s" % path) def cp_a(self, src, dest): """ cp-a - copy a file or directory recursively This copies a file or directory from "src" to "dest" recursively using the "cp -a" command. """ return self.inner_cmd("cp-a %s %s" % (src, dest)) def equal(self, file1, file2): """ equal - test if two files have equal contents This compares the two files "file1" and "file2" and returns true if their content is exactly equal, or false otherwise. """ return self.inner_cmd("equal %s %s" % (file1, file2)) def fill(self, c, len, path): """ fill - fill a file with octets This command creates a new file called "path". The initial content of the file is "len" octets of "c", where "c" must be a number in the range "[0..255]". """ return self.inner_cmd("fill %s %s %s" % (c, len, path)) def fill_dir(self, dir, nr): """ fill-dir - fill a directory with empty files This function, useful for testing filesystems, creates "nr" empty files in the directory "dir" with names 00000000 through "nr-1" (ie. each file name is 8 digits long padded with zeroes). """ return self.inner_cmd("fill-dir %s %s" % (dir, nr)) def fill_pattern(self, pattern, len, path): """ fill-pattern - fill a file with a repeating pattern of bytes This function is like "fill" except that it creates a new file of length "len" containing the repeating pattern of bytes in "pattern". The pattern is truncated if necessary to ensure the length of the file is exactly "len" bytes. """ return self.inner_cmd("fill-pattern %s %s %s" % (pattern, len, path)) def strings(self, path): """ strings - print the printable strings in a file This runs the strings(1) command on a file and returns the list of printable strings found. """ return self.inner_cmd("strings %s" % path) def head(self, path): """ head - return first 10 lines of a file This command returns up to the first 10 lines of a file as a list of strings. """ return self.inner_cmd("head %s" % path) def head_n(self, nrlines, path): """ head-n - return first N lines of a file If the parameter "nrlines" is a positive number, this returns the first "nrlines" lines of the file "path". """ return self.inner_cmd("head-n %s %s" % (nrlines, path)) def tail(self, path): """ tail - return last 10 lines of a file This command returns up to the last 10 lines of a file as a list of strings. """ return self.inner_cmd("tail %s" % path) def pread(self, path, count, offset): """ pread - read part of a file This command lets you read part of a file. It reads "count" bytes of the file, starting at "offset", from file "path". """ return self.inner_cmd("pread %s %s %s" % (path, count, offset)) def hexdump(self, path): """ hexdump - dump a file in hexadecimal This runs "hexdump -C" on the given "path". The result is the human-readable, canonical hex dump of the file. """ return self.inner_cmd("hexdump %s" % path) def more(self, filename): """ more - view a file This is used to view a file. """ return self.inner_cmd("more %s" % filename) def download(self, remotefilename, filename): """ download - download a file to the local machine Download file "remotefilename" and save it as "filename" on the local machine. """ return self.inner_cmd("download %s %s" % (remotefilename, filename)) def download_offset(self, remotefilename, filename, offset, size): """ download-offset - download a file to the local machine with offset and size Download file "remotefilename" and save it as "filename" on the local machine. """ return self.inner_cmd("download-offset %s %s %s %s" % (remotefilename, filename, offset, size)) def upload(self, filename, remotefilename): """ upload - upload a file from the local machine Upload local file "filename" to "remotefilename" on the filesystem. """ return self.inner_cmd("upload %s %s" % (filename, remotefilename)) def upload_offset(self, filename, remotefilename, offset): """ upload - upload a file from the local machine with offset Upload local file "filename" to "remotefilename" on the filesystem. """ return self.inner_cmd("upload-offset %s %s %s" % (filename, remotefilename, offset)) def fallocate(self, path, len): """ fallocate - preallocate a file in the guest filesystem This command preallocates a file (containing zero bytes) named "path" of size "len" bytes. If the file exists already, it is overwritten. """ return self.inner_cmd("fallocate %s %s" % (path, len)) def fallocate64(self, path, len): """ fallocate - preallocate a file in the guest filesystem This command preallocates a file (containing zero bytes) named "path" of size "len" bytes. If the file exists already, it is overwritten. """ return self.inner_cmd("fallocate64 %s %s" % (path, len)) def part_init(self, device, parttype): """ part-init - create an empty partition table This creates an empty partition table on "device" of one of the partition types listed below. Usually "parttype" should be either "msdos" or "gpt" (for large disks). """ return self.inner_cmd("part-init %s %s" % (device, parttype)) def part_add(self, device, prlogex, startsect, endsect): """ part-add - add a partition to the device This command adds a partition to "device". If there is no partition table on the device, call "part_init" first. """ cmd = "part-add %s %s %s %s" % (device, prlogex, startsect, endsect) return self.inner_cmd(cmd) def part_del(self, device, partnum): """ part-del device partnum This command deletes the partition numbered "partnum" on "device". Note that in the case of MBR partitioning, deleting an extended partition also deletes any logical partitions it contains. """ return self.inner_cmd("part_del %s %s" % (device, partnum)) def part_set_bootable(self, device, partnum, bootable): """ part-set-bootable device partnum bootable This sets the bootable flag on partition numbered "partnum" on device "device". Note that partitions are numbered from 1. """ return self.inner_cmd("part-set-bootable %s %s %s" % (device, partnum, bootable)) def part_set_mbr_id(self, device, partnum, idbyte): """ part-set-mbr-id - set the MBR type byte (ID byte) of a partition Sets the MBR type byte (also known as the ID byte) of the numbered partition "partnum" to "idbyte". Note that the type bytes quoted in most documentation are in fact hexadecimal numbers, but usually documented without any leading "0x" which might be confusing. """ return self.inner_cmd("part-set-mbr-id %s %s %s" % (device, partnum, idbyte)) def part_set_name(self, device, partnum, name): """ part-set-name - set partition name This sets the partition name on partition numbered "partnum" on device "device". Note that partitions are numbered from 1. """ return self.inner_cmd("part-set-name %s %s %s" % (device, partnum, name)) def part_to_dev(self, partition): """ part-to-dev - convert partition name to device name This function takes a partition name (eg. "/dev/sdb1") and removes the partition number, returning the device name (eg. "/dev/sdb"). The named partition must exist, for example as a string returned from "list_partitions". """ return self.inner_cmd("part-to-dev %s" % partition) def part_to_partnum(self, partition): """ part-to-partnum - convert partition name to partition number This function takes a partition name (eg. "/dev/sdb1") and returns the partition number (eg. 1). The named partition must exist, for example as a string returned from "list_partitions". """ return self.inner_cmd("part_to_partnum %s" % partition) def checksum(self, csumtype, path): """ checksum - compute MD5, SHAx or CRC checksum of file This call computes the MD5, SHAx or CRC checksum of the file named "path". """ return self.inner_cmd("checksum %s %s" % (csumtype, path)) def checksum_device(self, csumtype, device): """ checksum-device - compute MD5, SHAx or CRC checksum of the contents of a device This call computes the MD5, SHAx or CRC checksum of the contents of the device named "device". For the types of checksums supported see the "checksum" command. """ return self.inner_cmd("checksum-device %s %s" % (csumtype, device)) def checksums_out(self, csumtype, directory, sumsfile): """ checksums-out - compute MD5, SHAx or CRC checksum of files in a directory This command computes the checksums of all regular files in "directory" and then emits a list of those checksums to the local output file "sumsfile". """ return self.inner_cmd("checksums-out %s %s %s" % (csumtype, directory, sumsfile)) def is_config(self): """ is-config - is ready to accept commands This returns true if this handle is in the "CONFIG" state """ return self.inner_cmd("is-config") def is_ready(self): """ is-ready - is ready to accept commands This returns true if this handle is ready to accept commands (in the "READY" state). """ return self.inner_cmd("is-ready") def part_list(self, device): """ part-list - list partitions on a device This command parses the partition table on "device" and returns the list of partitions found. """ return self.inner_cmd("part-list %s" % device) def mkfs(self, fstype, device, blocksize=None, features=None, inode=None, sectorsize=None): """ mkfs - make a filesystem This function creates a filesystem on "device". The filesystem type is "fstype", for example "ext3". """ cmd = 'mkfs %s %s' % (fstype, device) if blocksize: cmd += ' blocksize:%s ' % blocksize if features: cmd += ' features:%s ' % features if inode: cmd += ' inode:%s ' % inode if sectorsize: cmd += ' sectorsize:%s ' % sectorsize return self.inner_cmd(cmd) def mkfs_opts(self, fstype, device, blocksize=None, features=None, inode=None, sectorsize=None): """ same with mkfs """ return self.mkfs(fstype, device, blocksize, features, inode, sectorsize) def part_disk(self, device, parttype): """ part-disk - partition whole disk with a single primary partition This command is simply a combination of "part_init" followed by "part_add" to create a single primary partition covering the whole disk. """ return self.inner_cmd("part-disk %s %s" % (device, parttype)) def part_get_bootable(self, device, partnum): """ part-get-bootable - return true if a partition is bootable This command returns true if the partition "partnum" on "device" has the bootable flag set. """ return self.inner_cmd("part-get-bootable %s %s" % (device, partnum)) def part_get_mbr_id(self, device, partnum): """ part-get-mbr-id - get the MBR type byte (ID byte) from a partition Returns the MBR type byte (also known as the ID byte) from the numbered partition "partnum". """ return self.inner_cmd("part-get-mbr-id %s %s" % (device, partnum)) def part_get_parttype(self, device): """ part-get-parttype - get the partition table type This command examines the partition table on "device" and returns the partition table type (format) being used. """ return self.inner_cmd("part-get-parttype %s" % device) def fsck(self, fstype, device): """ fsck - run the filesystem checker This runs the filesystem checker (fsck) on "device" which should have filesystem type "fstype". """ return self.inner_cmd("fsck %s %s" % (fstype, device)) def blockdev_getss(self, device): """ blockdev-getss - get sectorsize of block device This returns the size of sectors on a block device. Usually 512, but can be larger for modern devices. """ return self.inner_cmd("blockdev-getss %s" % device) def blockdev_getsz(self, device): """ blockdev-getsz - get total size of device in 512-byte sectors This returns the size of the device in units of 512-byte sectors (even if the sectorsize isn't 512 bytes ... weird). """ return self.inner_cmd("blockdev-getsz %s" % device) def blockdev_getbsz(self, device): """ blockdev-getbsz - get blocksize of block device This returns the block size of a device. """ return self.inner_cmd("blockdev-getbsz %s" % device) def blockdev_getsize64(self, device): """ blockdev-getsize64 - get total size of device in bytes This returns the size of the device in bytes """ return self.inner_cmd("blockdev-getsize64 %s" % device) def blockdev_setbsz(self, device, blocksize): """ blockdev-setbsz - set blocksize of block device This sets the block size of a device. """ return self.inner_cmd("blockdev-setbsz %s %s" % (device, blocksize)) def blockdev_getro(self, device): """ blockdev-getro - is block device set to read-only Returns a boolean indicating if the block device is read-only (true if read-only, false if not). """ return self.inner_cmd("blockdev-getro %s" % device) def blockdev_setro(self, device): """ blockdev-setro - set block device to read-only Sets the block device named "device" to read-only. """ return self.inner_cmd("blockdev-setro %s" % device) def blockdev_setrw(self, device): """ blockdev-setrw - set block device to read-write Sets the block device named "device" to read-write. """ return self.inner_cmd("blockdev-setrw %s" % device) def blockdev_flushbufs(self, device): """ blockdev-flushbufs - flush device buffers This tells the kernel to flush internal buffers associated with "device". """ return self.inner_cmd("blockdev-flushbufs %s" % device) def blockdev_rereadpt(self, device): """ blockdev-rereadpt - reread partition table Reread the partition table on "device". """ return self.inner_cmd("blockdev-rereadpt %s" % device) def canonical_device_name(self, device): """ canonical-device-name - return canonical device name This utility function is useful when displaying device names to the user. """ return self.inner_cmd("canonical-device-name %s" % device) def device_index(self, device): """ device-index - convert device to index This function takes a device name (eg. "/dev/sdb") and returns the index of the device in the list of devices """ return self.inner_cmd("device-index %s" % device) def disk_format(self, filename): """ disk-format - detect the disk format of a disk image Detect and return the format of the disk image called "filename", "filename" can also be a host device, etc """ return self.inner_cmd("disk-format %s" % filename) def disk_has_backing_file(self, filename): """ disk-has-backing-file - return whether disk has a backing file Detect and return whether the disk image "filename" has a backing file """ return self.inner_cmd("disk-has-backing-file %s" % filename) def disk_virtual_size(self, filename): """ disk-virtual-size - return virtual size of a disk Detect and return the virtual size in bytes of the disk image" """ return self.inner_cmd("disk-virtual-size %s" % filename) def max_disks(self): """ max-disks - maximum number of disks that may be added Return the maximum number of disks that may be added to a handle """ return self.inner_cmd("max-disks") def nr_devices(self): """ nr-devices - return number of whole block devices (disks) added This returns the number of whole block devices that were added """ return self.inner_cmd("nr-devices") def scrub_device(self, device): """ scrub-device - scrub (securely wipe) a device This command writes patterns over "device" to make data retrieval more difficult """ return self.inner_cmd("scrub-device %s" % device) def scrub_file(self, file): """ scrub-file - scrub (securely wipe) a file This command writes patterns over a file to make data retrieval more difficult """ return self.inner_cmd("scrub-file %s" % file) def scrub_freespace(self, dir): """ scrub-freespace - scrub (securely wipe) free space This command creates the directory "dir" and then fills it with files until the filesystem is full,and scrubs the files as for "scrub_file", and deletes them. The intention is to scrub any free space on the partition containing "dir" """ return self.inner_cmd("scrub-freespace %s" % dir) def md_create(self, name, device, missingbitmap=None, nrdevices=None, spare=None, chunk=None, level=None): """ md-create - create a Linux md (RAID) device Create a Linux md (RAID) device named "name" on the devices in the list "devices". """ cmd = "md-create %s %s" % (name, device) if missingbitmap: cmd += " missingbitmap:%s" % missingbitmap if nrdevices: cmd += " nrdevices:%s" % nrdevices if spare: cmd += " spare:%s" % spare if chunk: cmd += " chunk:%s" % chunk if level: cmd += " level:%s" % level return self.inner_cmd(cmd) def list_md_devices(self): """ list-md-devices - list Linux md (RAID) devices List all Linux md devices. """ return self.inner_cmd("list-md-devices") def md_stop(self, md): """ md-stop - stop a Linux md (RAID) device This command deactivates the MD array named "md". The device is stopped, but it is not destroyed or zeroed. """ return self.inner_cmd("md-stop %s" % md) def md_stat(self, md): """ md-stat - get underlying devices from an MD device This call returns a list of the underlying devices which make up the single software RAID array device "md". """ return self.inner_cmd("md-stat %s" % md) def md_detail(self, md): """ md-detail - obtain metadata for an MD device This command exposes the output of 'mdadm -DY <md>'. The following fields are usually present in the returned hash. Other fields may also be present. """ return self.inner_cmd("md-detail %s" % md) def sfdisk(self, device, cyls, heads, sectors, lines): """ sfdisk - create partitions on a block device This is a direct interface to the sfdisk(8) program for creating partitions on block devices. *This function is deprecated.* In new code, use the "part-add" call instead. Deprecated functions will not be removed from the API, but the fact that they are deprecated indicates that there are problems with correct use of these functions. """ return self.inner_cmd("sfdisk %s %s %s %s %s" % (device, cyls, heads, sectors, lines)) def sfdisk_l(self, device): """ sfdisk-l - display the partition table This displays the partition table on "device", in the human-readable output of the sfdisk(8) command. It is not intended to be parsed. *This function is deprecated.* In new code, use the "part-list" call instead. """ return self.inner_cmd("sfdisk-l %s" % device) def sfdiskM(self, device, lines): """ sfdiskM - create partitions on a block device This is a simplified interface to the "sfdisk" command, where partition sizes are specified in megabytes only (rounded to the nearest cylinder) and you don't need to specify the cyls, heads and sectors parameters which were rarely if ever used anyway. *This function is deprecated.* In new code, use the "part-add" call instead. """ return self.inner_cmd("sfdiskM %s %s" % (device, lines)) def sfdisk_N(self, device, partnum, cyls, heads, sectors, line): """ sfdisk-N - modify a single partition on a block device This runs sfdisk(8) option to modify just the single partition "n" (note: "n" counts from 1). For other parameters, see "sfdisk". You should usually pass 0 for the cyls/heads/sectors parameters. *This function is deprecated.* In new code, use the "part-add" call instead. """ return self.inner_cmd("sfdisk-N %s %s %s %s %s %s" % (device, partnum, cyls, heads, sectors, line)) def sfdisk_disk_geometry(self, device): """ sfdisk-disk-geometry - display the disk geometry from the partition table This displays the disk geometry of "device" read from the partition table. Especially in the case where the underlying block device has been resized, this can be different from the kernel's idea of the geometry """ return self.inner_cmd("sfdisk-disk-geometry %s" % device) def sfdisk_kernel_geometry(self, device): """ sfdisk-kernel-geometry - display the kernel geometry This displays the kernel's idea of the geometry of "device". """ return self.inner_cmd("sfdisk-kernel-geometry %s" % device) def pvcreate(self, physvols): """ pvcreate - create an LVM physical volume This creates an LVM physical volume called "physvols". """ return self.inner_cmd("pvcreate %s" % (physvols)) def pvs(self): """ pvs - list the LVM physical volumes (PVs) List all the physical volumes detected. This is the equivalent of the pvs(8) command. """ return self.inner_cmd("pvs") def pvs_full(self): """ pvs-full - list the LVM physical volumes (PVs) List all the physical volumes detected. This is the equivalent of the pvs(8) command. The "full" version includes all fields. """ return self.inner_cmd("pvs-full") def pvresize(self, device): """ pvresize - resize an LVM physical volume This resizes (expands or shrinks) an existing LVM physical volume to match the new size of the underlying device """ return self.inner_cmd("pvresize %s" % device) def pvresize_size(self, device, size): """ pvresize-size - resize an LVM physical volume (with size) This command is the same as "pvresize" except that it allows you to specify the new size (in bytes) explicitly. """ return self.inner_cmd("pvresize-size %s %s" % (device, size)) def pvremove(self, device): """ pvremove - remove an LVM physical volume This wipes a physical volume "device" so that LVM will no longer recognise it. The implementation uses the "pvremove" command which refuses to wipe physical volumes that contain any volume groups, so you have to remove those first. """ return self.inner_cmd("pvremove %s" % device) def pvuuid(self, device): """ pvuuid - get the UUID of a physical volume This command returns the UUID of the LVM PV "device". """ return self.inner_cmd("pvuuid %s" % device) def vgcreate(self, volgroup, physvols): """ vgcreate - create an LVM volume group This creates an LVM volume group called "volgroup" from the non-empty list of physical volumes "physvols". """ return self.inner_cmd("vgcreate %s %s" % (volgroup, physvols)) def vgs(self): """ vgs - list the LVM volume groups (VGs) List all the volumes groups detected. """ return self.inner_cmd("vgs") def vgs_full(self): """ vgs-full - list the LVM volume groups (VGs) List all the volumes groups detected. This is the equivalent of the vgs(8) command. The "full" version includes all fields. """ return self.inner_cmd("vgs-full") def vgrename(self, volgroup, newvolgroup): """ vgrename - rename an LVM volume group Rename a volume group "volgroup" with the new name "newvolgroup". """ return self.inner_cmd("vgrename %s %s" % (volgroup, newvolgroup)) def vgremove(self, vgname): """ vgremove - remove an LVM volume group Remove an LVM volume group "vgname", (for example "VG"). """ return self.inner_cmd("vgremove %s" % vgname) def vgscan(self): """ vgscan - rescan for LVM physical volumes, volume groups and logical volumes This rescans all block devices and rebuilds the list of LVM physical volumes, volume groups and logical volumes. """ return self.inner_cmd("vgscan") def vguuid(self, vgname): """ vguuid - get the UUID of a volume group This command returns the UUID of the LVM VG named "vgname" """ return self.inner_cmd("vguuid %s" % vgname) def vg_activate(self, activate, volgroups): """ vg-activate - activate or deactivate some volume groups This command activates or (if "activate" is false) deactivates all logical volumes in the listed volume groups "volgroups" """ return self.inner_cmd("vg-activate %s %s" % (activate, volgroups)) def vg_activate_all(self, activate): """ vg-activate-all - activate or deactivate all volume groups This command activates or (if "activate" is false) deactivates all logical volumes in all volume groups. """ return self.inner_cmd("vg-activate-all %s" % activate) def vglvuuids(self, vgname): """ vglvuuids - get the LV UUIDs of all LVs in the volume group Given a VG called "vgname", this returns the UUIDs of all the logical volumes created in this volume group. """ return self.inner_cmd("vglvuuids %s" % vgname) def vgpvuuids(self, vgname): """ vgpvuuids - get the PV UUIDs containing the volume group Given a VG called "vgname", this returns the UUIDs of all the physical volumes that this volume group resides on. """ return self.inner_cmd("vgpvuuids %s" % vgname) def lvcreate(self, logvol, volgroup, mbytes): """ lvcreate - create an LVM logical volume This creates an LVM logical volume called "logvol" on the volume group "volgroup", with "size" megabytes. """ return self.inner_cmd("lvcreate %s %s %s" % (logvol, volgroup, mbytes)) def lvuuid(self, device): """ lvuuid - get the UUID of a logical volume This command returns the UUID of the LVM LV "device". """ return self.inner_cmd("lvuuid %s" % device) def lvm_canonical_lv_name(self, lvname): """ lvm-canonical-lv-name - get canonical name of an LV This converts alternative naming schemes for LVs that you might find to the canonical name. """ return self.inner_cmd("lvm-canonical-lv-name %s" % lvname) def lvremove(self, device): """ lvremove - remove an LVM logical volume Remove an LVM logical volume "device", where "device" is the path to the LV, such as "/dev/VG/LV". """ return self.inner_cmd("lvremove %s" % device) def lvresize(self, device, mbytes): """ lvresize - resize an LVM logical volume This resizes (expands or shrinks) an existing LVM logical volume to "mbytes". """ return self.inner_cmd("lvresize %s %s" % (device, mbytes)) def lvs(self): """ lvs - list the LVM logical volumes (LVs) List all the logical volumes detected. """ return self.inner_cmd("lvs") def lvs_full(self): """ lvs-full - list the LVM logical volumes (LVs) List all the logical volumes detected. This is the equivalent of the lvs(8) command. The "full" version includes all fields. """ return self.inner_cmd("lvs-full") def lvm_clear_filter(self): """ lvm-clear-filter - clear LVM device filter This undoes the effect of "lvm_set_filter". LVM will be able to see every block device. This command also clears the LVM cache and performs a volume group scan. """ return self.inner_cmd("lvm-clear-filter") def lvm_remove_all(self): """ lvm-remove-all - remove all LVM LVs, VGs and PVs This command removes all LVM logical volumes, volume groups and physical volumes. """ return self.inner_cmd("lvm-remove-all") def lvm_set_filter(self, device): """ lvm-set-filter - set LVM device filter This sets the LVM device filter so that LVM will only be able to "see" the block devices in the list "devices", and will ignore all other attached block devices. """ return self.inner_cmd("lvm-set-filter %s" % device) def lvresize_free(self, lv, percent): """ lvresize-free - expand an LV to fill free space This expands an existing logical volume "lv" so that it fills "pc"% of the remaining free space in the volume group. Commonly you would call this with pc = 100 which expands the logical volume as much as possible, using all remaining free space in the volume group. """ return self.inner_cmd("lvresize-free %s %s" % (lv, percent)) def lvrename(self, logvol, newlogvol): """ lvrename - rename an LVM logical volume Rename a logical volume "logvol" with the new name "newlogvol" """ return self.inner_cmd("lvrename %s %s" % (logvol, newlogvol)) def vfs_type(self, mountable): """ vfs-type - get the Linux VFS type corresponding to a mounted device Gets the filesystem type corresponding to the filesystem on "mountable" """ return self.inner_cmd("vfs-type %s" % (mountable)) def touch(self, path): """ touch - update file timestamps or create a new file Touch acts like the touch(1) command. It can be used to update the timestamps on a file, or, if the file does not exist, to create a new zero-length file. """ return self.inner_cmd("touch %s" % (path)) def umount_all(self): """ umount-all - unmount all filesystems This unmounts all mounted filesystems. Some internal mounts are not unmounted by this call. """ return self.inner_cmd("umount-all") def ls(self, directory): """ ls - list the files in a directory List the files in "directory" (relative to the root directory, there is no cwd). The '.' and '..' entries are not returned, but hidden files are shown. """ return self.inner_cmd("ls %s" % (directory)) def ll(self, directory): """ ll - list the files in a directory (long format) List the files in "directory" (relative to the root directory, there is no cwd) in the format of 'ls -la'. """ return self.inner_cmd("ll %s" % (directory)) def sync(self): """ lsync - sync disks, writes are flushed through to the disk image This syncs the disk, so that any writes are flushed through to the underlying disk image. """ return self.inner_cmd("sync") def debug(self, subcmd, extraargs): """ debug - debugging and internals The "debug" command exposes some internals of "guestfsd" (the guestfs daemon) that runs inside the hypervisor. """ return self.inner_cmd("debug %s %s" % (subcmd, extraargs)) def set_e2uuid(self, device, uuid): """ set-e2uuid - set the ext2/3/4 filesystem UUID This sets the ext2/3/4 filesystem UUID of the filesystem on "device" to "uuid". The format of the UUID and alternatives such as "clear", "random" and "time" are described in the tune2fs(8) manpage. """ return self.inner_cmd("set_e2uuid %s %s" % (device, uuid)) def get_e2uuid(self, device): """ get-e2uuid - get the ext2/3/4 filesystem UUID This returns the ext2/3/4 filesystem UUID of the filesystem on "device". """ return self.inner_cmd("get_e2uuid %s" % (device)) def vfs_uuid(self, mountable): """ vfs-uuid - get the filesystem UUID This returns the filesystem UUID of the filesystem on "mountable". """ return self.inner_cmd("vfs_uuid %s" % (mountable)) def findfs_uuid(self, uuid): """ findfs-uuid - find a filesystem by UUID This command searches the filesystems and returns the one which has the given UUID. An error is returned if no such filesystem can be found. """ return self.inner_cmd("findfs_uuid %s" % (uuid)) def set_uuid(self, device, uuid): """ set-uuid - set the filesystem UUID Set the filesystem UUID on "device" to "uuid". """ return self.inner_cmd("set_uuid %s %s" % (device, uuid)) def set_e2label(self, device, label): """ set-e2label - set the ext2/3/4 filesystem label This sets the ext2/3/4 filesystem label of the filesystem on "device" to "label". Filesystem labels are limited to 16 characters. """ return self.inner_cmd("set_e2label %s %s" % (device, label)) def get_e2label(self, device): """ get-e2label - get the ext2/3/4 filesystem label This returns the ext2/3/4 filesystem label of the filesystem on "device". """ return self.inner_cmd("get_e2label %s" % (device)) def vfs_label(self, mountable): """ vfs-label - get the filesystem label This returns the label of the filesystem on "mountable". """ return self.inner_cmd("vfs_label %s" % (mountable)) def findfs_label(self, label): """ findfs-label - find a filesystem by label This command searches the filesystems and returns the one which has the given label. An error is returned if no such filesystem can be found. """ return self.inner_cmd("findfs_label %s" % (label)) def set_label(self, mountable, label): """ set-label - set filesystem label Set the filesystem label on "mountable" to "label". """ return self.inner_cmd("set_label %s %s" % (mountable, label)) def set_e2attrs(self, file, attrs, clear=None): """ set-e2attrs - set ext2 file attributes of a file This sets or clears the file attributes "attrs" associated with the inode "file". """ cmd = "set_e2attrs %s %s" % (file, attrs) if clear: cmd += " clear:%s" % clear return self.inner_cmd(cmd) def get_e2attrs(self, file): """ get-e2attrs - get ext2 file attributes of a file This returns the file attributes associated with "file". """ return self.inner_cmd("get_e2attrs %s" % (file)) def set_e2generation(self, file, generation): """ set-e2generation - set ext2 file generation of a file This sets the ext2 file generation of a file. """ return self.inner_cmd("set_e2generation %s %s" % (file, generation)) def get_e2generation(self, file): """ get-e2generation - get ext2 file generation of a file This returns the ext2 file generation of a file. The generation (which used to be called the "version") is a number associated with an inode. This is most commonly used by NFS servers. """ return self.inner_cmd("get_e2generation %s" % (file)) def statvfs(self, path): """ statvfs - get file system statistics Returns file system statistics for any mounted file system. "path" should be a file or directory in the mounted file system (typically it is the mount point itself, but it doesn't need to be). """ return self.inner_cmd("statvfs %s" % (path)) def tune2fs_l(self, device): """ tune2fs-l - get ext2/ext3/ext4 superblock details This returns the contents of the ext2, ext3 or ext4 filesystem superblock on "device". """ return self.inner_cmd("tune2fs_l %s" % (device)) def tune2fs(self, device, force=None, maxmountcount=None, mountcount=None, errorbehavior=None, group=None, intervalbetweenchecks=None, reservedblockspercentage=None, lastmounteddirectory=None, reservedblockscount=None, user=None): """ tune2fs - adjust ext2/ext3/ext4 filesystem parameters This call allows you to adjust various filesystem parameters of an ext2/ext3/ext4 filesystem called "device". """ cmd = "tune2fs %s" % device if force: cmd += ' force:%s' % force if maxmountcount: cmd += ' maxmountcount:%s' % maxmountcount if mountcount: cmd += ' mountcount:%s' % mountcount if errorbehavior: cmd += ' errorbehavior:%s' % errorbehavior if group: cmd += ' group:%s' % group if intervalbetweenchecks: cmd += ' intervalbetweenchecks:%s' % intervalbetweenchecks if reservedblockspercentage: cmd += ' reservedblockspercentage:%s' % reservedblockspercentage if lastmounteddirectory: cmd += ' lastmounteddirectory:%s' % lastmounteddirectory if reservedblockscount: cmd += ' reservedblockscount:%s' % reservedblockscount if user: cmd += ' user:%s' % user return self.inner_cmd(cmd) def umount(self, pathordevice, force=None, lazyunmount=None): """ umount - unmount a filesystem This unmounts the given filesystem. The filesystem may be specified either by its mountpoint (path) or the device which contains the filesystem. """ cmd = 'umount %s' % pathordevice if force: cmd += ' force:%s ' % force if lazyunmount: cmd += ' lazyunmount:%s ' % lazyunmount return self.inner_cmd(cmd) def blkid(self, device): """ blkid - print block device attributes This command returns block device attributes for "device". The following fields are usually present in the returned hash. Other fields may also be present. """ return self.inner_cmd("blkid %s" % device) def filesystem_available(self, filesystem): """ filesystem-available - check if filesystem is available Check whether libguestfs supports the named filesystem. The argument "filesystem" is a filesystem name, such as "ext3". """ return self.inner_cmd("filesystem_available %s" % filesystem) def e2fsck(self, device, correct=None, forceall=None): """ e2fsck - check an ext2/ext3 filesystem This runs the ext2/ext3 filesystem checker on "device". It can take the following optional arguments: """ cmd = 'e2fsck %s' % device if correct: cmd += ' correct:%s ' % correct if forceall: cmd += ' forceall:%s ' % forceall return self.inner_cmd(cmd) def mkfifo(self, mode, path): """ mkfifo - make FIFO (named pipe) This call creates a FIFO (named pipe) called "path" with mode "mode". It is just a convenient wrapper around "mknod". """ return self.inner_cmd('mkfifo %s %s' % (mode, path)) def mklost_and_found(self, mountpoint): """ mklost-and-found - make lost+found directory on an ext2/3/4 filesystem Make the "lost+found" directory, normally in the root directory of an ext2/3/4 filesystem. "mountpoint" is the directory under which we try to create the "lost+found" directory. """ return self.inner_cmd('mklost_and_found %s' % mountpoint) def mknod_b(self, mode, devmajor, devminor, path): """ mknod-b - make block device node This call creates a block device node called "path" with mode "mode" and device major/minor "devmajor" and "devminor". It is just a convenient wrapper around "mknod". """ return self.inner_cmd('mknod_b %s %s %s %s' % (mode, devmajor, devminor, path)) def mknod_c(self, mode, devmajor, devminor, path): """ mknod-c - make char device node This call creates a char device node called "path" with mode "mode" and device major/minor "devmajor" and "devminor". It is just a convenient wrapper around "mknod". """ return self.inner_cmd('mknod_c %s %s %s %s' % (mode, devmajor, devminor, path)) def ntfsresize_opts(self, device, size=None, force=None): """ ntfsresize - resize an NTFS filesystem This command resizes an NTFS filesystem, expanding or shrinking it to the size of the underlying device. """ cmd = 'ntfsresize-opts %s' % device if size: cmd += ' size:%s ' % size if force: cmd += ' force:%s ' % force return self.inner_cmd(cmd) def resize2fs(self, device): """ resize2fs - resize an ext2, ext3 or ext4 filesystem This resizes an ext2, ext3 or ext4 filesystem to match the size of the underlying device. """ return self.inner_cmd('resize2fs %s' % device) def resize2fs_M(self, device): """ resize2fs-M - resize an ext2, ext3 or ext4 filesystem to the minimum size This command is the same as "resize2fs", but the filesystem is resized to its minimum size. This works like the *-M* option to the "resize2fs" command. """ return self.inner_cmd('resize2fs_M %s' % device) def resize2fs_size(self, device, size): """ resize2fs-size - resize an ext2, ext3 or ext4 filesystem (with size) This command is the same as "resize2fs" except that it allows you to specify the new size (in bytes) explicitly. """ return self.inner_cmd('resize2fs_size %s %s' % (device, size)) def e2fsck_f(self, device): """ e2fsck-f - check an ext2/ext3 filesystem This runs "e2fsck -p -f device", ie. runs the ext2/ext3 filesystem checker on "device", noninteractively (*-p*), even if the filesystem appears to be clean (*-f*). """ return self.inner_cmd('e2fsck_f %s' % (device)) def readdir(self, dir): """ readdir - read directories entries This returns the list of directory entries in directory "dir" """ return self.inner_cmd('readdir %s' % (dir)) def mount_loop(self, file, mountpoint): """ mount-loop - mount a file using the loop device This command lets you mount "file" (a filesystem image in a file) on a mount point. It is entirely equivalent to the command "mount -o loop file mountpoint". """ return self.inner_cmd('mount_loop %s %s' % (file, mountpoint)) def mount_vfs(self, options, vfstype, mountable, mountpoint): """ mount-vfs - mount a guest disk with mount options and vfstype This is the same as the "mount" command, but it allows you to set both the mount options and the vfstype as for the mount(8) *-o* and *-t* flags. """ return self.inner_cmd('mount_vfs %s %s %s %s' % (options, vfstype, mountable, mountpoint)) def mkswap(self, device, label=None, uuid=None): """ mkswap - create a swap partition Create a Linux swap partition on "device" """ cmd = 'mkswap %s ' % device if label: cmd += ' label:%s ' % label if uuid: cmd += ' uuid:%s ' % uuid return self.inner_cmd(cmd) def swapon_device(self, device): """ swapon-device - enable swap on device This command enables the libguestfs appliance to use the swap device or partition named "device". The increased memory is made available for all commands, for example those run using "command" or "sh". """ return self.inner_cmd('swapon_device %s' % device) def swapoff_device(self, device): """ swapoff-device - disable swap on device This command disables the libguestfs appliance swap device or partition named "device". See "swapon_device". """ return self.inner_cmd('swapoff_device %s' % device) def mkswap_L(self, label, device): """ mkswap-L - create a swap partition with a label Create a swap partition on "device" with label "label". """ return self.inner_cmd('mkswap_L %s %s' % (label, device)) def swapon_label(self, label): """ swapon-label - enable swap on labeled swap partition This command enables swap to a labeled swap partition. See "swapon_device" for other notes. """ return self.inner_cmd("swapon_label %s" % label) def swapoff_label(self, label): """ swapoff-label - disable swap on labeled swap partition This command disables the libguestfs appliance swap on labeled swap partition. """ return self.inner_cmd("swapoff_label %s" % label) def mkswap_U(self, uuid, device): """ mkswap-U - create a swap partition with an explicit UUID Create a swap partition on "device" with UUID "uuid". """ return self.inner_cmd('mkswap_U %s %s' % (uuid, device)) def swapon_uuid(self, uuid): """ swapon-uuid - enable swap on swap partition by UUID This command enables swap to a swap partition with the given UUID. See "swapon_device" for other notes. """ return self.inner_cmd('swapon_uuid %s' % uuid) def swapoff_uuid(self, uuid): """ swapoff-uuid - disable swap on swap partition by UUID This command disables the libguestfs appliance swap partition with the given UUID. """ return self.inner_cmd('swapoff_uuid %s' % uuid) def mkswap_file(self, file): """ mkswap-file - create a swap file Create a swap file. """ return self.inner_cmd("mkswap_file %s" % file) def swapon_file(self, file): """ swapon-file - enable swap on file This command enables swap to a file. See "swapon_device" for other notes. """ return self.inner_cmd('swapon_file %s' % file) def swapoff_file(self, file): """ swapoff-file - disable swap on file This command disables the libguestfs appliance swap on file. """ return self.inner_cmd('swapoff_file %s' % file) def alloc(self, filename, size): """ alloc - allocate and add a disk file This creates an empty (zeroed) file of the given size, and then adds so it can be further examined. """ return self.inner_cmd('alloc %s %s' % (filename, size)) def list_disk_labels(self): """ list-disk-labels - mapping of disk labels to devices If you add drives using the optional "label" parameter of "add_drive_opts", you can use this call to map between disk labels, and raw block device and partition names (like "/dev/sda" and "/dev/sda1"). """ return self.inner_cmd('list_disk_labels') def add_drive_ro_with_if(self, filename, iface): """ add-drive-ro-with-if - add a drive read-only specifying the QEMU block emulation to use This is the same as "add_drive_ro" but it allows you to specify the QEMU interface emulation to use at run time. """ return self.inner_cmd('add_drive_ro_with_if %s %s' % (filename, iface)) def add_drive_with_if(self, filename, iface): """ add-drive-with-if - add a drive specifying the QEMU block emulation to use This is the same as "add_drive" but it allows you to specify the QEMU interface emulation to use at run time. """ return self.inner_cmd('add_drive_with_if %s %s' % (filename, iface)) def available(self, groups): """ available - test availability of some parts of the API This command is used to check the availability of some groups of functionality in the appliance, which not all builds of the libguestfs appliance will be able to provide. """ return self.inner_cmd('available %s' % groups) def available_all_groups(self): """ available-all-groups - return a list of all optional groups This command returns a list of all optional groups that this daemon knows about. Note this returns both supported and unsupported groups. To find out which ones the daemon can actually support you have to call "available" / "feature_available" on each member of the returned list. """ return self.inner_cmd('available_all_groups') def help(self, orcmd=None): """ help - display a list of commands or help on a command """ cmd = 'help' if orcmd: cmd += ' %s' % orcmd return self.inner_cmd(cmd) def quit(self): """ quit - quit guestfish """ return self.inner_cmd('quit') def echo(self, params=None): """ echo - display a line of text This echos the parameters to the terminal. """ cmd = 'echo' if params: cmd += ' %s' % params return self.inner_cmd(cmd) def echo_daemon(self, words): """ echo-daemon - echo arguments back to the client This command concatenates the list of "words" passed with single spaces between them and returns the resulting string. """ return self.inner_cmd('echo_daemon %s' % words) def launch(self): """ launch - launch the backend You should call this after configuring the handle (eg. adding drives) but before performing any actions. """ return self.inner_cmd('launch') def dmesg(self): """ dmesg - return kernel messages This returns the kernel messages ("dmesg" output) from the guest kernel. This is sometimes useful for extended debugging of problems. """ return self.inner_cmd('dmesg') def version(self): """ version - get the library version number Return the libguestfs version number that the program is linked against. """ return self.inner_cmd('version') def sparse(self, filename, size): """ sparse - create a sparse disk image and add This creates an empty sparse file of the given size, and then adds so it can be further examined. """ return self.inner_cmd('sparse %s %s' % (filename, size)) def modprobe(self, modulename): """ modprobe - load a kernel module This loads a kernel module in the appliance. """ return self.inner_cmd('modprobe %s' % modulename) def ping_daemon(self): """ ping-daemon - ping the guest daemon This is a test probe into the guestfs daemon running inside the hypervisor. Calling this function checks that the daemon responds to the ping message, without affecting the daemon or attached block device(s) in any other way. """ return self.inner_cmd('ping_daemon') def sleep(self, secs): """ sleep - sleep for some seconds Sleep for "secs" seconds. """ return self.inner_cmd('sleep %s' % secs) def reopen(self): """ reopen - close and reopen libguestfs handle Close and reopen the libguestfs handle. It is not necessary to use this normally, because the handle is closed properly when guestfish exits. However this is occasionally useful for testing. """ return self.inner_cmd('reopen') def time(self, command, args=None): """ time - print elapsed time taken to run a command Run the command as usual, but print the elapsed time afterwards. This can be useful for benchmarking operations. """ cmd = 'time %s' % command if args: cmd += args return self.inner_cmd(cmd) def config(self, hvparam, hvvalue): """ config - add hypervisor parameters This can be used to add arbitrary hypervisor parameters of the form *-param value*. Actually it's not quite arbitrary - we prevent you from setting some parameters which would interfere with parameters that we use. """ return self.inner_cmd('config %s %s' % (hvparam, hvvalue)) def kill_subprocess(self): """ kill-subprocess - kill the hypervisor This kills the hypervisor. """ return self.inner_cmd('kill_subprocess') def set_backend(self, backend): """ set-backend - set the backend Set the method that libguestfs uses to connect to the backend guestfsd daemon. """ return self.inner_cmd('set_backend %s' % backend) def get_backend(self): """ get-backend - get the backend Return the current backend. """ return self.inner_cmd('get_backend') def shutdown(self): """ shutdown - shutdown the hypervisor This is the opposite of "launch". It performs an orderly shutdown of the backend process(es). If the autosync flag is set (which is the default) then the disk image is synchronized. """ return self.inner_cmd('shutdown') def ntfs_3g_probe(self, rw, device): """ ntfs-3g-probe - probe NTFS volume This command runs the ntfs-3g.probe(8) command which probes an NTFS "device" for mountability. (Not all NTFS volumes can be mounted read-write, and some cannot be mounted at all). """ return self.inner_cmd('ntfs_3g_probe %s %s' % (rw, device)) def event(self, name, eventset, script): """ event - register a handler for an event or events Register a shell script fragment which is executed when an event is raised. See "guestfs_set_event_callback" in guestfs(3) for a discussion of the event API in libguestfs. """ return self.inner_cmd('event %s %s %s' % (name, eventset, script)) def list_events(self): """ list-events - list event handlers List the event handlers registered using the guestfish "event" command. """ return self.inner_cmd('list_events') def delete_event(self, name): """ delete-event - delete a previously registered event handler Delete the event handler which was previously registered as "name". If multiple event handlers were registered with the same name, they are all deleted. """ return self.inner_cmd('delete_event %s' % name) def set_append(self, append): """ set-append - add options to kernel command line This function is used to add additional options to the libguestfs appliance kernel command line. """ return self.inner_cmd('set_append %s' % append) def get_append(self): """ get-append - get the additional kernel options Return the additional kernel options which are added to the libguestfs appliance kernel command line. """ return self.inner_cmd('get_append') def set_smp(self, smp): """ set-smp - set number of virtual CPUs in appliance Change the number of virtual CPUs assigned to the appliance. The default is 1. Increasing this may improve performance, though often it has no effect. """ return self.inner_cmd('set_smp %s' % smp) def get_smp(self): """ get-smp - get number of virtual CPUs in appliance This returns the number of virtual CPUs assigned to the appliance. """ return self.inner_cmd('get_smp') def set_pgroup(self, pgroup): """ set-pgroup - set process group flag If "pgroup" is true, child processes are placed into their own process group. """ return self.inner_cmd('set_pgroup %s' % pgroup) def get_pgroup(self): """ get-pgroup - get process group flag This returns the process group flag. """ return self.inner_cmd('get_pgroup') def set_attach_method(self, backend): """ set-attach-method - set the backend Set the method that libguestfs uses to connect to the backend guestfsd daemon. """ return self.inner_cmd('set_attach_method %s' % backend) def get_attach_method(self): """ get-attach-method - get the backend Return the current backend. """ return self.inner_cmd('get_attach_method') def set_autosync(self, autosync): """ set-autosync autosync If "autosync" is true, this enables autosync. Libguestfs will make a best effort attempt to make filesystems consistent and synchronized when the handle is closed (also if the program exits without closing handles). """ return self.inner_cmd('set_autosync %s' % autosync) def get_autosync(self): """ get-autosync - get autosync mode Get the autosync flag. """ return self.inner_cmd('get_autosync') def set_direct(self, direct): """ set-direct - enable or disable direct appliance mode If the direct appliance mode flag is enabled, then stdin and stdout are passed directly through to the appliance once it is launched. """ return self.inner_cmd('set_direct %s' % direct) def get_direct(self): """ get-direct - get direct appliance mode flag Return the direct appliance mode flag. """ return self.inner_cmd('get_direct') def set_memsize(self, memsize): """ set-memsize - set memory allocated to the hypervisor This sets the memory size in megabytes allocated to the hypervisor. This only has any effect if called before "launch". """ return self.inner_cmd('set_memsize %s' % memsize) def get_memsize(self): """ get-memsize - get memory allocated to the hypervisor This gets the memory size in megabytes allocated to the hypervisor. """ return self.inner_cmd('get_memsize') def set_path(self, searchpath): """ set-path - set the search path Set the path that libguestfs searches for kernel and initrd.img. """ return self.inner_cmd('set_path %s' % searchpath) def get_path(self): """ get-path - get the search path Return the current search path. """ return self.inner_cmd('get_path') def set_qemu(self, hv): """ set-qemu - set the hypervisor binary (usually qemu) Set the hypervisor binary (usually qemu) that we will use. """ return self.inner_cmd('set_qemu %s' % hv) def get_qemu(self): """ get-qemu - get the hypervisor binary (usually qemu) Return the current hypervisor binary (usually qemu). """ return self.inner_cmd('get_qemu') def set_recovery_proc(self, recoveryproc): """ set-recovery-proc - enable or disable the recovery process If this is called with the parameter "false" then "launch" does not create a recovery process. The purpose of the recovery process is to stop runaway hypervisor processes in the case where the main program aborts abruptly. """ return self.inner_cmd('set_recovery_proc %s' % recoveryproc) def get_recovery_proc(self): """ get-recovery-proc - get recovery process enabled flag Return the recovery process enabled flag. """ return self.inner_cmd('get_recovery_proc') def set_trace(self, trace): """ set-trace - enable or disable command traces If the command trace flag is set to 1, then libguestfs calls, parameters and return values are traced. """ return self.inner_cmd('set_trace %s' % trace) def get_trace(self): """ get-trace - get command trace enabled flag Return the command trace flag. """ return self.inner_cmd('get_trace') def set_verbose(self, verbose): """ set-verbose - set verbose mode If "verbose" is true, this turns on verbose messages. """ return self.inner_cmd('set_verbose %s' % verbose) def get_verbose(self): """ get-verbose - get verbose mode This returns the verbose messages flag. """ return self.inner_cmd('get_verbose') def get_pid(self): """ get-pid - get PID of hypervisor Return the process ID of the hypervisor. If there is no hypervisor running, then this will return an error. """ return self.inner_cmd('get_pid') def set_network(self, network): """ set-network - set enable network flag If "network" is true, then the network is enabled in the libguestfs appliance. The default is false. """ return self.inner_cmd('set_network %s' % network) def get_network(self): """ get-network - get enable network flag This returns the enable network flag. """ return self.inner_cmd('get_network') def setenv(self, VAR, value): """ setenv - set an environment variable Set the environment variable "VAR" to the string "value". """ return self.inner_cmd('setenv %s %s' % (VAR, value)) def unsetenv(self, VAR): """ unsetenv - unset an environment variable Remove "VAR" from the environment. """ return self.inner_cmd('unsetenv %s' % VAR) def lcd(self, directory): """ lcd - change working directory Change the local directory, ie. the current directory of guestfish itself. """ return self.inner_cmd('lcd %s' % directory) def man(self): """ man - open the manual Opens the manual page for guestfish. """ return self.inner_cmd('man') def supported(self): """ supported - list supported groups of commands This command returns a list of the optional groups known to the daemon, and indicates which ones are supported by this build of the libguestfs appliance. """ return self.inner_cmd('supported') def extlinux(self, directory): """ extlinux - install the SYSLINUX bootloader on an ext2/3/4 or btrfs filesystem Install the SYSLINUX bootloader on the device mounted at "directory". Unlike "syslinux" which requires a FAT filesystem, this can be used on an ext2/3/4 or btrfs filesystem. """ return self.inner_cmd('extlinux %s' % directory) def syslinux(self, device, directory=None): """ syslinux - install the SYSLINUX bootloader Install the SYSLINUX bootloader on "device". """ cmd = 'syslinux %s' % device if directory: cmd += ' directory:%s' % directory return self.inner_cmd(cmd) def feature_available(self, groups): """ feature-available - test availability of some parts of the API This is the same as "available", but unlike that call it returns a simple true/false boolean result, instead of throwing an exception if a feature is not found. For other documentation see "available". """ return self.inner_cmd('feature_available %s' % groups) def get_program(self): """ get-program - get the program name Get the program name. See "set_program". """ return self.inner_cmd('get_program') def set_program(self, program): """ set-program - set the program name Set the program name. This is an informative string which the main program may optionally set in the handle. """ return self.inner_cmd('set_program %s' % program) def add_drive_scratch(self, size, name=None, label=None): """ add-drive-scratch - add a temporary scratch drive This command adds a temporary scratch drive to the handle. The "size" parameter is the virtual size (in bytes). The scratch drive is blank initially (all reads return zeroes until you start writing to it). The drive is deleted when the handle is closed. """ cmd = 'add_drive_scratch %s' % size if name: cmd += ' name:%s' % name if label: cmd += ' label:%s' % label return self.inner_cmd(cmd) def drop_caches(self, whattodrop): """ drop-caches - drop kernel page cache, dentries and inodes The "drop-caches" command instructs the guest kernel to drop its page cache, and/or dentries and inode caches. The parameter "whattodrop" tells the kernel what precisely to drop. """ return self.inner_cmd("drop-caches %s" % whattodrop) def case_sensitive_path(self, path): """ case-sensitive-path - return true path on case-insensitive filesystem The "drop-caches" command can be used to resolve case insensitive paths on a filesystem which is case sensitive. The use case is to resolve paths which you have read from Windows configuration files or the Windows Registry, to the true path. """ return self.inner_cmd("case-sensitive-path '%s'" % path) def command(self, cmd): """ command - run a command from the guest filesystem This call runs a command from the guest filesystem. The filesystem must be mounted, and must contain a compatible operating system (ie. something Linux, with the same or compatible processor architecture). """ return self.inner_cmd("command '%s'" % cmd) def command_lines(self, cmd): """ command-lines - run a command, returning lines This is the same as "command", but splits the result into a list of lines. """ return self.inner_cmd("command-lines '%s'" % cmd) def sh(self, cmd): """ sh - run a command via the shell This call runs a command from the guest filesystem via the guest's "/bin/sh". """ return self.inner_cmd("sh '%s'" % cmd) def sh_lines(self, cmd): """ sh-lines - run a command via the shell returning lines This is the same as "sh", but splits the result into a list of lines. """ return self.inner_cmd("sh-lines '%s'" % cmd) def zero(self, device): """ zero - write zeroes to the device This command writes zeroes over the first few blocks of "device". """ return self.inner_cmd("zero '%s'" % device) def zero_device(self, device): """ zero-device - write zeroes to an entire device This command writes zeroes over the entire "device". Compare with "zero" which just zeroes the first few blocks of a device. """ return self.inner_cmd("zero-device '%s'" % device) def grep(self, regex, path): """ grep - return lines matching a pattern This calls the external "grep" program and returns the matching lines. """ return self.inner_cmd("grep '%s' '%s'" % (regex, path)) def grepi(self, regex, path): """ grepi - return lines matching a pattern This calls the external "grep -i" program and returns the matching lines. """ return self.inner_cmd("grepi '%s' '%s'" % (regex, path)) def fgrep(self, pattern, path): """ fgrep - return lines matching a pattern This calls the external "fgrep" program and returns the matching lines. """ return self.inner_cmd("fgrep '%s' '%s'" % (pattern, path)) def fgrepi(self, pattern, path): """ fgrepi - return lines matching a pattern This calls the external "fgrep -i" program and returns the matching lines. """ return self.inner_cmd("fgrepi '%s' '%s'" % (pattern, path)) def egrep(self, regex, path): """ egrep - return lines matching a pattern This calls the external "egrep" program and returns the matching lines. """ return self.inner_cmd("egrep '%s' '%s'" % (regex, path)) def egrepi(self, regex, path): """ egrepi - return lines matching a pattern This calls the external "egrep -i" program and returns the matching lines. """ return self.inner_cmd("egrepi '%s' '%s'" % (regex, path)) def zgrep(self, regex, path): """ zgrep - return lines matching a pattern This calls the external "zgrep" program and returns the matching lines. """ return self.inner_cmd("zgrep '%s' '%s'" % (regex, path)) def zgrepi(self, regex, path): """ zgrepi - return lines matching a pattern This calls the external "zgrep -i" program and returns the matching lines. """ return self.inner_cmd("zgrepi '%s' '%s'" % (regex, path)) def zfgrep(self, pattern, path): """ zfgrep - return lines matching a pattern This calls the external "zfgrep" program and returns the matching lines. """ return self.inner_cmd("zfgrep '%s' '%s'" % (pattern, path)) def zfgrepi(self, pattern, path): """ zfgrepi - return lines matching a pattern This calls the external "zfgrep -i" program and returns the matching lines. """ return self.inner_cmd("zfgrepi '%s' '%s'" % (pattern, path)) def zegrep(self, regex, path): """ zegrep - return lines matching a pattern This calls the external "zegrep" program and returns the matching lines. """ return self.inner_cmd("zegrep '%s' '%s'" % (regex, path)) def zegrepi(self, regex, path): """ zegrepi - return lines matching a pattern This calls the external "zegrep -i" program and returns the matching lines. """ return self.inner_cmd("zegrepi '%s' '%s'" % (regex, path)) def compress_out(self, ctype, file, zfile): """ compress-out - output compressed file This command compresses "file" and writes it out to the local file "zfile". The compression program used is controlled by the "ctype" parameter. Currently this includes: "compress", "gzip", "bzip2", "xz" or "lzop". Some compression types may not be supported by particular builds of libguestfs, in which case you will get an error containing the substring "not supported". The optional "level" parameter controls compression level. The meaning and default for this parameter depends on the compression program being used. """ return self.inner_cmd("compress-out '%s' '%s' '%s'" % (ctype, file, zfile)) def compress_device_out(self, ctype, device, zdevice): """ compress-device-out - output compressed device This command compresses "device" and writes it out to the local file "zdevice". The "ctype" and optional "level" parameters have the same meaning as in "compress_out". """ return self.inner_cmd("compress-device-out '%s' '%s' '%s'" % (ctype, device, zdevice)) def glob(self, command, args): """ glob - expand wildcards in command Expand wildcards in any paths in the args list, and run "command" repeatedly on each matching path. """ return self.inner_cmd("glob '%s' '%s'" % (command, args)) def glob_expand(self, path): """ glob-expand - expand a wildcard path This command searches for all the pathnames matching "pattern" according to the wildcard expansion rules used by the shell. """ return self.inner_cmd("glob-expand '%s'" % path) def mkmountpoint(self, exemptpath): """ mkmountpoint - create a mountpoint "mkmountpoint" and "rmmountpoint" are specialized calls that can be used to create extra mountpoints before mounting the first filesystem. """ return self.inner_cmd("mkmountpoint '%s'" % exemptpath) def rmmountpoint(self, exemptpath): """ rmmountpoint - remove a mountpoint This calls removes a mountpoint that was previously created with "mkmountpoint". See "mkmountpoint" for full details. """ return self.inner_cmd("rmmountpoint '%s'" % exemptpath) def parse_environment(self): """ parse-environment - parse the environment and set handle flags accordingly Parse the program's environment and set flags in the handle accordingly. For example if "LIBGUESTFS_DEBUG=1" then the 'verbose' flag is set in the handle. """ return self.inner_cmd("parse_environment") def parse_environment_list(self, environment): """ parse-environment-list - parse the environment and set handle flags accordingly Parse the list of strings in the argument "environment" and set flags in the handle accordingly. For example if "LIBGUESTFS_DEBUG=1" is a string in the list, then the 'verbose' flag is set in the handle. """ return self.inner_cmd("parse_environment_list '%s'" % environment) def rsync(self, src, dest, args): """ rsync - synchronize the contents of two directories This call may be used to copy or synchronize two directories under the same libguestfs handle. This uses the rsync(1) program which uses a fast algorithm that avoids copying files unnecessarily. """ return self.inner_cmd("rsync %s %s %s" % (src, dest, args)) def rsync_in(self, src, dest, args): """ rsync-in - synchronize host or remote filesystem with filesystem This call may be used to copy or synchronize the filesystem on the host or on a remote computer with the filesystem within libguestfs. This uses the rsync(1) program which uses a fast algorithm that avoids copying files unnecessarily. """ return self.inner_cmd("rsync-in %s %s %s" % (src, dest, args)) def rsync_out(self, src, dest, args): """ rsync-out - synchronize filesystem with host or remote filesystem This call may be used to copy or synchronize the filesystem within libguestfs with a filesystem on the host or on a remote computer. This uses the rsync(1) program which uses a fast algorithm that avoids copying files unnecessarily. """ return self.inner_cmd("rsync-out %s %s %s" % (src, dest, args)) def utimens(self, path, atsecs, atnsecs, mtsecs, mtnsecs): """ utimens - set timestamp of a file with nanosecond precision This command sets the timestamps of a file with nanosecond precision. """ return self.inner_cmd("utimens '%s' '%s' '%s' '%s' '%s'" % (path, atsecs, atnsecs, mtsecs, mtnsecs)) def utsname(self): """ utsname - appliance kernel version This returns the kernel version of the appliance, where this is available. This information is only useful for debugging. Nothing in the returned structure is defined by the API. """ return self.inner_cmd("utsname") def grub_install(self, root, device): """ grub-install root device This command installs GRUB 1 (the Grand Unified Bootloader) on "device", with the root directory being "root". """ return self.inner_cmd("grub-install %s %s" % (root, device)) def initrd_cat(self, initrdpath, filename): """ initrd-cat - list the contents of a single file in an initrd This command unpacks the file "filename" from the initrd file called "initrdpath". The filename must be given *without* the initial "/" character. """ return self.inner_cmd("initrd-cat %s %s" % (initrdpath, filename)) def initrd_list(self, path): """ initrd-list - list files in an initrd This command lists out files contained in an initrd. """ return self.inner_cmd("initrd-list %s" % path) def aug_init(self, root, flags): """ aug-init - create a new Augeas handle Create a new Augeas handle for editing configuration files. If there was any previous Augeas handle associated with this guestfs session, then it is closed. """ return self.inner_cmd("aug-init %s %s" % (root, flags)) def aug_clear(self, augpath): """ aug-clear - clear Augeas path Set the value associated with "path" to "NULL". This is the same as the augtool(1) "clear" command. """ return self.inner_cmd("aug-clear %s" % augpath) def aug_set(self, augpath, val): """ aug-set - set Augeas path to value Set the value associated with "path" to "val". In the Augeas API, it is possible to clear a node by setting the value to NULL. Due to an oversight in the libguestfs API you cannot do that with this call. Instead you must use the "aug_clear" call. """ return self.inner_cmd("aug-set %s %s" % (augpath, val)) def aug_get(self, augpath): """ aug-get - look up the value of an Augeas path Look up the value associated with "path". If "path" matches exactly one node, the "value" is returned. """ return self.inner_cmd("aug-get %s" % augpath) def aug_close(self): """ aug-close - close the current Augeas handle and free up any resources used by it. After calling this, you have to call "aug_init" again before you can use any other Augeas functions. """ return self.inner_cmd("aug-close") def aug_defnode(self, node, expr, value): """ aug-defnode - defines a variable "name" whose value is the result of evaluating "expr". If "expr" evaluates to an empty nodeset, a node is created, equivalent to calling "aug_set" "expr", "value". "name" will be the nodeset containing that single node. On success this returns a pair containing the number of nodes in the nodeset, and a boolean flag if a node was created. """ return self.inner_cmd("aug-defnode %s %s %s" % (node, expr, value)) def aug_defvar(self, name, expr): """ aug-defvar - define an Augeas variable Defines an Augeas variable "name" whose value is the result of evaluating "expr". If "expr" is NULL, then "name" is undefined. On success this returns the number of nodes in "expr", or 0 if "expr" evaluates to something which is not a nodeset. """ return self.inner_cmd("aug-defvar %s %s" % (name, expr)) def aug_ls(self, augpath): """ aug-ls - list Augeas nodes under augpath This is just a shortcut for listing "aug_match" "path/\*" and sorting the resulting nodes into alphabetical order. """ return self.inner_cmd("aug-ls %s" % augpath) def aug_insert(self, augpath, label, before): """ aug-insert - insert a sibling Augeas node Create a new sibling "label" for "path", inserting it into the tree before or after "path" (depending on the boolean flag "before"). "path" must match exactly one existing node in the tree, and "label" must be a label, ie. not contain "/", "*" or end with a bracketed index "[N]". """ return self.inner_cmd("aug-insert %s %s %s" % (augpath, label, before)) def aug_match(self, augpath): """ aug-match - return Augeas nodes which match augpath Returns a list of paths which match the path expression "path". The returned paths are sufficiently qualified so that they match exactly one node in the current tree. """ return self.inner_cmd("aug-match %s" % augpath) def aug_mv(self, src, dest): """ aug-mv - move Augeas node Move the node "src" to "dest". "src" must match exactly one node. "dest" is overwritten if it exists. """ return self.inner_cmd("aug-mv %s %s" % (src, dest)) def aug_rm(self, augpath): """ aug-rm - remove an Augeas path Remove "path" and all of its children. On success this returns the number of entries which were removed. """ return self.inner_cmd("aug-rm %s" % augpath) def aug_label(self, augpath): """ aug-label - return the label from an Augeas path expression The label (name of the last element) of the Augeas path expression "augpath" is returned. "augpath" must match exactly one node, else this function returns an error. """ return self.inner_cmd("aug-label %s" % augpath) def aug_setm(self, base, sub, val): """ aug-setm - set multiple Augeas nodes """ return self.inner_cmd("aug-setm %s %s %s" % (base, sub, val)) def aug_load(self): """ aug-load - load files into the tree Load files into the tree. See "aug_load" in the Augeas documentation for the full gory details. """ return self.inner_cmd("aug-load") def aug_save(self): """ aug-save - write all pending Augeas changes to disk This writes all pending changes to disk. The flags which were passed to "aug_init" affect exactly how files are saved. """ return self.inner_cmd("aug-save") def libguest_test_tool_cmd(qemuarg=None, qemudirarg=None, timeoutarg=None, ignore_status=True, debug=False, timeout=60): """ Execute libguest-test-tool command. :param qemuarg: the qemu option :param qemudirarg: the qemudir option :param timeoutarg: the timeout option :return: a CmdResult object :raise: raise LibguestfsCmdError """ cmd = "libguestfs-test-tool" if qemuarg is not None: cmd += " --qemu '%s'" % qemuarg if qemudirarg is not None: cmd += " --qemudir '%s'" % qemudirarg if timeoutarg is not None: cmd += " --timeout %s" % timeoutarg # Allow to raise LibguestfsCmdError if ignore_status is False. return lgf_command(cmd, ignore_status, debug, timeout) def virt_edit_cmd(disk_or_domain, file_path, is_disk=False, disk_format=None, options=None, extra=None, expr=None, connect_uri=None, ignore_status=True, debug=False, timeout=60): """ Execute virt-edit command to check whether it is ok. Since virt-edit will need uses' interact, maintain and return a session if there is no raise after command has been executed. :param disk_or_domain: a img path or a domain name. :param file_path: the file need to be edited in img file. :param is_disk: whether disk_or_domain is disk or domain :param disk_format: when is_disk is true, add a format if it is set. :param options: the options of virt-edit. :param extra: additional suffix of command. :return: a session of executing virt-edit command. """ # disk_or_domain and file_path are necessary parameters. cmd = "virt-edit" if connect_uri is not None: cmd += " -c %s" % connect_uri if is_disk: # For latest version, --format must exist before -a if disk_format is not None: cmd += " --format=%s" % disk_format cmd += " -a %s" % disk_or_domain else: cmd += " -d %s" % disk_or_domain cmd += " %s" % file_path if options is not None: cmd += " %s" % options if extra is not None: cmd += " %s" % extra if expr is not None: cmd += " -e '%s'" % expr return lgf_command(cmd, ignore_status, debug, timeout) def virt_clone_cmd(original, newname=None, autoclone=False, **dargs): """ Clone existing virtual machine images. :param original: Name of the original guest to be cloned. :param newname: Name of the new guest virtual machine instance. :param autoclone: Generate a new guest name, and paths for new storage. :param dargs: Standardized function API keywords. There are many options not listed, they can be passed in dargs. """ def storage_config(cmd, options): """Configure options for storage""" # files should be a list files = options.get("files", []) if len(files): for file in files: cmd += " --file '%s'" % file if options.get("nonsparse") is not None: cmd += " --nonsparse" return cmd def network_config(cmd, options): """Configure options for network""" mac = options.get("mac") if mac is not None: cmd += " --mac '%s'" % mac return cmd cmd = "virt-clone --original '%s'" % original if newname is not None: cmd += " --name '%s'" % newname if autoclone is True: cmd += " --auto-clone" # Many more options can be added if necessary. cmd = storage_config(cmd, dargs) cmd = network_config(cmd, dargs) ignore_status = dargs.get("ignore_status", True) debug = dargs.get("debug", False) timeout = dargs.get("timeout", 180) return lgf_command(cmd, ignore_status, debug, float(timeout)) def virt_sparsify_cmd(indisk, outdisk, compress=False, convert=None, format=None, ignore_status=True, debug=False, timeout=60): """ Make a virtual machine disk sparse. :param indisk: The source disk to be sparsified. :param outdisk: The destination disk. """ cmd = "virt-sparsify" if compress is True: cmd += " --compress" if format is not None: cmd += " --format '%s'" % format cmd += " '%s'" % indisk if convert is not None: cmd += " --convert '%s'" % convert cmd += " '%s'" % outdisk # More options can be added if necessary. return lgf_command(cmd, ignore_status, debug, timeout) def virt_resize_cmd(indisk, outdisk, **dargs): """ Resize a virtual machine disk. :param indisk: The source disk to be resized :param outdisk: The destination disk. """ cmd = "virt-resize" ignore_status = dargs.get("ignore_status", True) debug = dargs.get("debug", False) timeout = dargs.get("timeout", 60) resize = dargs.get("resize") resized_size = dargs.get("resized_size", "0") expand = dargs.get("expand") shrink = dargs.get("shrink") ignore = dargs.get("ignore") delete = dargs.get("delete") if resize is not None: cmd += " --resize %s=%s" % (resize, resized_size) if expand is not None: cmd += " --expand %s" % expand if shrink is not None: cmd += " --shrink %s" % shrink if ignore is not None: cmd += " --ignore %s" % ignore if delete is not None: cmd += " --delete %s" % delete cmd += " %s %s" % (indisk, outdisk) return lgf_command(cmd, ignore_status, debug, timeout) def virt_list_partitions_cmd(disk_or_domain, long=False, total=False, human_readable=False, ignore_status=True, debug=False, timeout=60): """ "virt-list-partitions" is a command line tool to list the partitions that are contained in a virtual machine or disk image. :param disk_or_domain: a disk or a domain to be mounted """ cmd = "virt-list-partitions %s" % disk_or_domain if long is True: cmd += " --long" if total is True: cmd += " --total" if human_readable is True: cmd += " --human-readable" return lgf_command(cmd, ignore_status, debug, timeout) def guestmount(disk_or_domain, mountpoint, inspector=False, readonly=False, **dargs): """ guestmount - Mount a guest filesystem on the host using FUSE and libguestfs. :param disk_or_domain: a disk or a domain to be mounted If you need to mount a disk, set is_disk to True in dargs :param mountpoint: the mountpoint of filesystems :param inspector: mount all filesystems automatically :param readonly: if mount filesystem with readonly option """ def get_special_mountpoint(cmd, options): special_mountpoints = options.get("special_mountpoints", []) for mountpoint in special_mountpoints: cmd += " -m %s" % mountpoint return cmd cmd = "guestmount" ignore_status = dargs.get("ignore_status", True) debug = dargs.get("debug", False) timeout = dargs.get("timeout", 60) # If you need to mount a disk, set is_disk to True is_disk = dargs.get("is_disk", False) if is_disk is True: cmd += " -a %s" % disk_or_domain else: cmd += " -d %s" % disk_or_domain if inspector is True: cmd += " -i" if readonly is True: cmd += " --ro" cmd = get_special_mountpoint(cmd, dargs) cmd += " %s" % mountpoint return lgf_command(cmd, ignore_status, debug, timeout) def virt_filesystems(disk_or_domain, **dargs): """ virt-filesystems - List filesystems, partitions, block devices, LVM in a virtual machine or disk image :param disk_or_domain: a disk or a domain to be mounted If you need to mount a disk, set is_disk to True in dargs """ def get_display_type(cmd, options): all = options.get("all", False) filesystems = options.get("filesystems", False) extra = options.get("extra", False) partitions = options.get("partitions", False) block_devices = options.get("block_devices", False) logical_volumes = options.get("logical_volumes", False) volume_groups = options.get("volume_groups", False) physical_volumes = options.get("physical_volumes", False) long_format = options.get("long_format", False) human_readable = options.get("human_readable", False) if all is True: cmd += " --all" if filesystems is True: cmd += " --filesystems" if extra is True: cmd += " --extra" if partitions is True: cmd += " --partitions" if block_devices is True: cmd += " --block_devices" if logical_volumes is True: cmd += " --logical_volumes" if volume_groups is True: cmd += " --volume_groups" if physical_volumes is True: cmd += " --physical_volumes" if long_format is True: cmd += " --long" if human_readable is True: cmd += " -h" return cmd cmd = "virt-filesystems" # If you need to mount a disk, set is_disk to True is_disk = dargs.get("is_disk", False) ignore_status = dargs.get("ignore_status", True) debug = dargs.get("debug", False) timeout = dargs.get("timeout", 60) if is_disk is True: cmd += " -a %s" % disk_or_domain else: cmd += " -d %s" % disk_or_domain cmd = get_display_type(cmd, dargs) return lgf_command(cmd, ignore_status, debug, timeout) def virt_list_partitions(disk_or_domain, long=False, total=False, human_readable=False, ignore_status=True, debug=False, timeout=60): """ "virt-list-partitions" is a command line tool to list the partitions that are contained in a virtual machine or disk image. :param disk_or_domain: a disk or a domain to be mounted """ cmd = "virt-list-partitions %s" % disk_or_domain if long is True: cmd += " --long" if total is True: cmd += " --total" if human_readable is True: cmd += " --human-readable" return lgf_command(cmd, ignore_status, debug, timeout) def virt_list_filesystems(disk_or_domain, format=None, long=False, all=False, ignore_status=True, debug=False, timeout=60): """ "virt-list-filesystems" is a command line tool to list the filesystem
codeparrot/github-code-clean
''' Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d,avg_pool_2d, conv_3d, max_pool_3d, avg_pool_3d from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression from tflearn.layers.normalization import local_response_normalization from tflearn.layers.merge_ops import merge #used in v0.03-v0.06+ def otherception3(width, height, frame_count, lr, output=9, model_name = 'otherception.model', device = 'gpu', num = '0'): with tf.device('/{}:{}'.format(device,num)): network = input_data(shape=[None, width, height,3], name='input') conv1_7_7 = conv_2d(network, 64, 28, strides=4, activation='relu', name = 'conv1_7_7_s2') pool1_3_3 = max_pool_2d(conv1_7_7, 9,strides=4) pool1_3_3 = local_response_normalization(pool1_3_3) conv2_3_3_reduce = conv_2d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce') conv2_3_3 = conv_2d(conv2_3_3_reduce, 192,12, activation='relu', name='conv2_3_3') conv2_3_3 = local_response_normalization(conv2_3_3) pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=12, strides=2, name='pool2_3_3_s2') inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1') inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce') inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128,filter_size=12, activation='relu', name = 'inception_3a_3_3') inception_3a_5_5_reduce = conv_2d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' ) inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=15, activation='relu', name= 'inception_3a_5_5') inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=12, strides=1, ) inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1') # merge the inception_3a__ inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3) inception_3b_1_1 = conv_2d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' ) inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce') inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=9, activation='relu',name='inception_3b_3_3') inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce') inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=15, name = 'inception_3b_5_5') inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=12, strides=1, name='inception_3b_pool') inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1') #merge the inception_3b_* inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=3,name='inception_3b_output') pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3') inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce') inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3') inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce') inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5') inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool') inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1') inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output') inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce') inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3') inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce') inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5') inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool') inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1') inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output') inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1') inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce') inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3') inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce') inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5') inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1) inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1') inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=3,name='inception_4c_output') inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1') inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce') inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3') inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce') inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5') inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool') inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1') inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output') inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1') inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce') inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3') inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce') inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5') inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool') inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1') inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=3, mode='concat') pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3') inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1') inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce') inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3') inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce') inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5') inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool') inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1') inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=3,mode='concat') inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1') inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce') inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384, filter_size=3,activation='relu', name='inception_5b_3_3') inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce') inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce,128, filter_size=5, activation='relu', name='inception_5b_5_5' ) inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool') inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1') inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=3, mode='concat') pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1) pool5_7_7 = dropout(pool5_7_7, 0.4) loss = fully_connected(pool5_7_7, output,activation='softmax') network = regression(loss, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, max_checkpoints=0, tensorboard_verbose=0,tensorboard_dir='log') return model def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'): net = input_data(shape=[None, width, height, 3], name='input') net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001) net = tflearn.layers.conv.resnext_block(net, n, 16, 32) net = tflearn.resnext_block(net, 1, 32, 32, downsample=True) net = tflearn.resnext_block(net, n-1, 32, 32) net = tflearn.resnext_block(net, 1, 64, 32, downsample=True) net = tflearn.resnext_block(net, n-1, 64, 32) net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') net = tflearn.global_avg_pool(net) # Regression net = tflearn.fully_connected(net, output, activation='softmax') opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True) net = tflearn.regression(net, optimizer=opt, loss='categorical_crossentropy') model = tflearn.DNN(net, max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log') return model def sentnet_color_2d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'): network = input_data(shape=[None, width, height, 3], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log') return model def inception_v3(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'): network = input_data(shape=[None, width, height,3], name='input') conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name = 'conv1_7_7_s2') pool1_3_3 = max_pool_2d(conv1_7_7, 3,strides=2) pool1_3_3 = local_response_normalization(pool1_3_3) conv2_3_3_reduce = conv_2d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce') conv2_3_3 = conv_2d(conv2_3_3_reduce, 192,3, activation='relu', name='conv2_3_3') conv2_3_3 = local_response_normalization(conv2_3_3) pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2') inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1') inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce') inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128,filter_size=3, activation='relu', name = 'inception_3a_3_3') inception_3a_5_5_reduce = conv_2d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' ) inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name= 'inception_3a_5_5') inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=3, strides=1, ) inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1') # merge the inception_3a__ inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3) inception_3b_1_1 = conv_2d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' ) inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce') inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu',name='inception_3b_3_3') inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce') inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5, name = 'inception_3b_5_5') inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool') inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1') #merge the inception_3b_* inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=3,name='inception_3b_output') pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3') inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce') inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3') inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce') inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5') inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool') inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1') inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output') inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce') inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3') inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce') inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5') inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool') inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1') inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output') inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1') inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce') inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3') inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce') inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5') inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1) inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1') inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=3,name='inception_4c_output') inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1') inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce') inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3') inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce') inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5') inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool') inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1') inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output') inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1') inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce') inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3') inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce') inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5') inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool') inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1') inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=3, mode='concat') pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3') inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1') inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce') inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3') inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce') inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5') inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool') inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1') inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=3,mode='concat') inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1') inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce') inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384, filter_size=3,activation='relu', name='inception_5b_3_3') inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce') inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce,128, filter_size=5, activation='relu', name='inception_5b_5_5' ) inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool') inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1') inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=3, mode='concat') pool5_7_7 = avg_pool_2d(inception_5b_output, kernel_size=7, strides=1) pool5_7_7 = dropout(pool5_7_7, 0.4) loss = fully_connected(pool5_7_7, output,activation='softmax') network = regression(loss, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, max_checkpoints=0, tensorboard_verbose=0,tensorboard_dir='log') return model def inception_v3_3d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'): network = input_data(shape=[None, width, height,3, 1], name='input') conv1_7_7 = conv_3d(network, 64, 7, strides=2, activation='relu', name = 'conv1_7_7_s2') pool1_3_3 = max_pool_3d(conv1_7_7, 3,strides=2) #pool1_3_3 = local_response_normalization(pool1_3_3) conv2_3_3_reduce = conv_3d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce') conv2_3_3 = conv_3d(conv2_3_3_reduce, 192,3, activation='relu', name='conv2_3_3') #conv2_3_3 = local_response_normalization(conv2_3_3) pool2_3_3 = max_pool_3d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2') inception_3a_1_1 = conv_3d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1') inception_3a_3_3_reduce = conv_3d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce') inception_3a_3_3 = conv_3d(inception_3a_3_3_reduce, 128,filter_size=3, activation='relu', name = 'inception_3a_3_3') inception_3a_5_5_reduce = conv_3d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' ) inception_3a_5_5 = conv_3d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name= 'inception_3a_5_5') inception_3a_pool = max_pool_3d(pool2_3_3, kernel_size=3, strides=1, ) inception_3a_pool_1_1 = conv_3d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1') # merge the inception_3a__ inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=4) inception_3b_1_1 = conv_3d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' ) inception_3b_3_3_reduce = conv_3d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce') inception_3b_3_3 = conv_3d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu',name='inception_3b_3_3') inception_3b_5_5_reduce = conv_3d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce') inception_3b_5_5 = conv_3d(inception_3b_5_5_reduce, 96, filter_size=5, name = 'inception_3b_5_5') inception_3b_pool = max_pool_3d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool') inception_3b_pool_1_1 = conv_3d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1') #merge the inception_3b_* inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=4,name='inception_3b_output') pool3_3_3 = max_pool_3d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3') inception_4a_1_1 = conv_3d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4a_3_3_reduce = conv_3d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce') inception_4a_3_3 = conv_3d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3') inception_4a_5_5_reduce = conv_3d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce') inception_4a_5_5 = conv_3d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5') inception_4a_pool = max_pool_3d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool') inception_4a_pool_1_1 = conv_3d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1') inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=4, name='inception_4a_output') inception_4b_1_1 = conv_3d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1') inception_4b_3_3_reduce = conv_3d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce') inception_4b_3_3 = conv_3d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3') inception_4b_5_5_reduce = conv_3d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce') inception_4b_5_5 = conv_3d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5') inception_4b_pool = max_pool_3d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool') inception_4b_pool_1_1 = conv_3d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1') inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=4, name='inception_4b_output') inception_4c_1_1 = conv_3d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1') inception_4c_3_3_reduce = conv_3d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce') inception_4c_3_3 = conv_3d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3') inception_4c_5_5_reduce = conv_3d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce') inception_4c_5_5 = conv_3d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5') inception_4c_pool = max_pool_3d(inception_4b_output, kernel_size=3, strides=1) inception_4c_pool_1_1 = conv_3d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1') inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=4,name='inception_4c_output') inception_4d_1_1 = conv_3d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1') inception_4d_3_3_reduce = conv_3d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce') inception_4d_3_3 = conv_3d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3') inception_4d_5_5_reduce = conv_3d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce') inception_4d_5_5 = conv_3d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5') inception_4d_pool = max_pool_3d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool') inception_4d_pool_1_1 = conv_3d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1') inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=4, name='inception_4d_output') inception_4e_1_1 = conv_3d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1') inception_4e_3_3_reduce = conv_3d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce') inception_4e_3_3 = conv_3d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3') inception_4e_5_5_reduce = conv_3d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce') inception_4e_5_5 = conv_3d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5') inception_4e_pool = max_pool_3d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool') inception_4e_pool_1_1 = conv_3d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1') inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=4, mode='concat') pool4_3_3 = max_pool_3d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3') inception_5a_1_1 = conv_3d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1') inception_5a_3_3_reduce = conv_3d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce') inception_5a_3_3 = conv_3d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3') inception_5a_5_5_reduce = conv_3d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce') inception_5a_5_5 = conv_3d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5') inception_5a_pool = max_pool_3d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool') inception_5a_pool_1_1 = conv_3d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1') inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=4,mode='concat') inception_5b_1_1 = conv_3d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1') inception_5b_3_3_reduce = conv_3d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce') inception_5b_3_3 = conv_3d(inception_5b_3_3_reduce, 384, filter_size=3,activation='relu', name='inception_5b_3_3') inception_5b_5_5_reduce = conv_3d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce') inception_5b_5_5 = conv_3d(inception_5b_5_5_reduce,128, filter_size=5, activation='relu', name='inception_5b_5_5' ) inception_5b_pool = max_pool_3d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool') inception_5b_pool_1_1 = conv_3d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1') inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=4, mode='concat') pool5_7_7 = avg_pool_3d(inception_5b_output, kernel_size=7, strides=1) pool5_7_7 = dropout(pool5_7_7, 0.4) loss = fully_connected(pool5_7_7, output,activation='softmax') network = regression(loss, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path=model_name, max_checkpoints=1, tensorboard_verbose=0,tensorboard_dir='log') return model def sentnet_LSTM_gray(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height], name='input') #network = tflearn.input_data(shape=[None, 28, 28], name='input') network = tflearn.lstm(network, 128, return_seq=True) network = tflearn.lstm(network, 128) network = tflearn.fully_connected(network, 9, activation='softmax') network = tflearn.regression(network, optimizer='adam', loss='categorical_crossentropy', name="output1") model = tflearn.DNN(network, checkpoint_path='model_lstm', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model def sentnet_color(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'): network = input_data(shape=[None, width, height,3, 1], name='input') network = conv_3d(network, 96, 11, strides=4, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 256, 5, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) network = conv_3d(network, 256, 5, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path=model_name, max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model def sentnet_frames(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height,frame_count, 1], name='input') network = conv_3d(network, 96, 11, strides=4, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 256, 5, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) network = conv_3d(network, 256, 5, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model def sentnet2(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height, frame_count, 1], name='input') network = conv_3d(network, 96, 11, strides=4, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 256, 5, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 3, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model def sentnet(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height, frame_count, 1], name='input') network = conv_3d(network, 96, 11, strides=4, activation='relu') network = avg_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 256, 5, activation='relu') network = avg_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) network = conv_3d(network, 256, 5, activation='relu') network = avg_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = avg_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model def alexnet2(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model def sentnet_v0(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height, frame_count, 1], name='input') network = conv_3d(network, 96, 11, strides=4, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 256, 5, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, 3, activation='relu') network = conv_3d(network, 384, 3, 3, activation='relu') network = conv_3d(network, 256, 3, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model def alexnet(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
codeparrot/github-code-clean
import collections import typing from . import _inspect, _iterate, _update from . import duration as _duration from . import enums as _enums from . import format as _format from . import indicators as _indicators from . import iterate as iterate_ from . import markups as _markups from . import parentage as _parentage from . import pcollections as _pcollections from . import pitch as _pitch from . import score as _score from . import select as _select from . import tag as _tag from . import timespan as _timespan from . import typings as _typings def after_grace_container(argument): r""" Gets after grace containers attached to component. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... container = abjad.get.after_grace_container(component) ... print(f"{repr(component):30} {repr(container)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") None Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') None Note("c'4") None BeforeGraceContainer("cs'16") None Note("cs'16") None Note("d'4") None Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") None OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") None Chord("<e' g'>16") None Note("gs'16") None Note("a'16") None Note("as'16") None Voice("e'4", name='Music_Voice') None Note("e'4") None Note("f'4") AfterGraceContainer("fs'16") AfterGraceContainer("fs'16") None Note("fs'16") None """ return getattr(argument, "_after_grace_container", None) def annotation( argument, annotation: typing.Any, default: typing.Any = None, unwrap: bool = True, ) -> typing.Any: r""" Gets annotation. .. container:: example >>> staff = abjad.Staff("c'4 e' e' f'") >>> abjad.annotate(staff[0], 'default_instrument', abjad.Cello()) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { c'4 e'4 e'4 f'4 } >>> string = 'default_instrument' >>> abjad.get.annotation(staff[0], string) Cello() >>> abjad.get.annotation(staff[1], string) is None True >>> abjad.get.annotation(staff[2], string) is None True >>> abjad.get.annotation(staff[3], string) is None True Returns default when no annotation is found: >>> abjad.get.annotation(staff[3], string, abjad.Violin()) Violin() .. container:: example REGRESSION: annotation is not picked up as effective indicator: >>> prototype = abjad.Instrument >>> abjad.get.effective(staff[0], prototype) is None True >>> abjad.get.effective(staff[1], prototype) is None True >>> abjad.get.effective(staff[2], prototype) is None True >>> abjad.get.effective(staff[3], prototype) is None True """ return _inspect._get_annotation( argument, annotation, default=default, unwrap=unwrap ) def annotation_wrappers(argument): r""" Gets annotation wrappers. .. container:: example >>> staff = abjad.Staff("c'4 e' e' f'") >>> abjad.annotate(staff[0], 'default_instrument', abjad.Cello()) >>> abjad.annotate(staff[0], 'default_clef', abjad.Clef('tenor')) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { c'4 e'4 e'4 f'4 } >>> for wrapper in abjad.get.annotation_wrappers(staff[0]): wrapper Wrapper(annotation='default_instrument', context=None, deactivate=False, indicator=Cello(), synthetic_offset=None, tag=Tag()) Wrapper(annotation='default_clef', context=None, deactivate=False, indicator=Clef(name='tenor', hide=False), synthetic_offset=None, tag=Tag()) """ return _inspect._get_annotation_wrappers(argument) def bar_line_crossing(argument) -> bool: r""" Is true when ``argument`` crosses bar line. .. container:: example >>> staff = abjad.Staff("c'4 d'4 e'4") >>> time_signature = abjad.TimeSignature((3, 8)) >>> abjad.attach(time_signature, staff[0]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \time 3/8 c'4 d'4 e'4 } >>> for note in staff: ... result = abjad.get.bar_line_crossing(note) ... print(note, result) ... c'4 False d'4 True e'4 False """ if not isinstance(argument, _score.Component): raise Exception("can only get indicator on component.") time_signature = _inspect._get_effective(argument, _indicators.TimeSignature) if time_signature is None: time_signature_duration = _duration.Duration(4, 4) else: time_signature_duration = time_signature.duration partial = getattr(time_signature, "partial", 0) partial = partial or 0 start_offset = timespan(argument).start_offset shifted_start = start_offset - partial shifted_start %= time_signature_duration stop_offset = argument._get_duration() + shifted_start if time_signature_duration < stop_offset: return True return False def before_grace_container(argument): r""" Gets before-grace container attached to leaf. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... container = abjad.get.before_grace_container(component) ... print(f"{repr(component):30} {repr(container)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") None Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') None Note("c'4") None BeforeGraceContainer("cs'16") None Note("cs'16") None Note("d'4") BeforeGraceContainer("cs'16") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") None OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") None Chord("<e' g'>16") None Note("gs'16") None Note("a'16") None Note("as'16") None Voice("e'4", name='Music_Voice') None Note("e'4") None Note("f'4") None AfterGraceContainer("fs'16") None Note("fs'16") None """ return getattr(argument, "_before_grace_container", None) def contents(argument) -> list[_score.Component]: r""" Gets contents. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... contents = abjad.get.contents(component) ... print(f"{repr(component)}:") ... for component_ in contents: ... print(f" {repr(component_)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice'): Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Note("c'4") Note("d'4") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Note("f'4") Note("c'4"): Note("c'4") BeforeGraceContainer("cs'16"): BeforeGraceContainer("cs'16") Note("cs'16") Note("cs'16"): Note("cs'16") Note("d'4"): Note("d'4") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }"): Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Voice("e'4", name='Music_Voice') OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16"): OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Chord("<e' g'>16") Note("gs'16") Note("a'16") Note("as'16") Chord("<e' g'>16"): Chord("<e' g'>16") Note("gs'16"): Note("gs'16") Note("a'16"): Note("a'16") Note("as'16"): Note("as'16") Voice("e'4", name='Music_Voice'): Voice("e'4", name='Music_Voice') Note("e'4") Note("e'4"): Note("e'4") Note("f'4"): Note("f'4") AfterGraceContainer("fs'16"): AfterGraceContainer("fs'16") Note("fs'16") Note("fs'16"): Note("fs'16") .. container:: example REGRESSSION. Works with tremolo containers: >>> staff = abjad.Staff() >>> staff.append(abjad.TremoloContainer(2, "c'16 e'")) >>> staff.append("cs'4") >>> staff.append(abjad.TremoloContainer(2, "d'16 f'")) >>> staff.append("ds'4") >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \repeat tremolo 2 { c'16 e'16 } cs'4 \repeat tremolo 2 { d'16 f'16 } ds'4 } >>> for component in abjad.select.components(staff): ... contents = abjad.get.contents(component) ... print(f"{repr(component)}:") ... for component_ in contents: ... print(f" {repr(component_)}") Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"): Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") TremoloContainer("c'16 e'16") Note("cs'4") TremoloContainer("d'16 f'16") Note("ds'4") TremoloContainer("c'16 e'16"): TremoloContainer("c'16 e'16") Note("c'16") Note("e'16") Note("c'16"): Note("c'16") Note("e'16"): Note("e'16") Note("cs'4"): Note("cs'4") TremoloContainer("d'16 f'16"): TremoloContainer("d'16 f'16") Note("d'16") Note("f'16") Note("d'16"): Note("d'16") Note("f'16"): Note("f'16") Note("ds'4"): Note("ds'4") """ if not isinstance(argument, _score.Component): raise Exception("can only get contents of component.") result = [] result.append(argument) result.extend(getattr(argument, "components", [])) return result def descendants(argument) -> list[_score.Component]: r""" Gets descendants. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... descendants = abjad.get.descendants(component) ... print(f"{repr(component)}:") ... for component_ in descendants: ... print(f" {repr(component_)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Note("c'4") BeforeGraceContainer("cs'16") Note("cs'16") Note("d'4") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Chord("<e' g'>16") Note("gs'16") Note("a'16") Note("as'16") Voice("e'4", name='Music_Voice') Note("e'4") Note("f'4") AfterGraceContainer("fs'16") Note("fs'16") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice'): Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Note("c'4") BeforeGraceContainer("cs'16") Note("cs'16") Note("d'4") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Chord("<e' g'>16") Note("gs'16") Note("a'16") Note("as'16") Voice("e'4", name='Music_Voice') Note("e'4") Note("f'4") AfterGraceContainer("fs'16") Note("fs'16") Note("c'4"): Note("c'4") BeforeGraceContainer("cs'16"): BeforeGraceContainer("cs'16") Note("cs'16") Note("cs'16"): Note("cs'16") Note("d'4"): Note("d'4") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }"): Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Chord("<e' g'>16") Note("gs'16") Note("a'16") Note("as'16") Voice("e'4", name='Music_Voice') Note("e'4") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16"): OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Chord("<e' g'>16") Note("gs'16") Note("a'16") Note("as'16") Chord("<e' g'>16"): Chord("<e' g'>16") Note("gs'16"): Note("gs'16") Note("a'16"): Note("a'16") Note("as'16"): Note("as'16") Voice("e'4", name='Music_Voice'): Voice("e'4", name='Music_Voice') Note("e'4") Note("e'4"): Note("e'4") Note("f'4"): Note("f'4") AfterGraceContainer("fs'16"): AfterGraceContainer("fs'16") Note("fs'16") Note("fs'16"): Note("fs'16") """ if isinstance(argument, _score.Component): argument = [argument] components = [] for item in argument: generator = _iterate._iterate_descendants(item) for component in generator: if component not in components: components.append(component) return components def duration( argument, in_seconds: bool = False, preprolated: bool = False ) -> _duration.Duration: r""" Gets duration. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... duration = abjad.get.duration(component) ... print(f"{repr(component):30} {repr(duration)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Duration(1, 1) Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Duration(1, 1) Note("c'4") Duration(1, 4) BeforeGraceContainer("cs'16") Duration(1, 16) Note("cs'16") Duration(1, 16) Note("d'4") Duration(1, 4) Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Duration(1, 4) OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Duration(1, 4) Chord("<e' g'>16") Duration(1, 16) Note("gs'16") Duration(1, 16) Note("a'16") Duration(1, 16) Note("as'16") Duration(1, 16) Voice("e'4", name='Music_Voice') Duration(1, 4) Note("e'4") Duration(1, 4) Note("f'4") Duration(1, 4) AfterGraceContainer("fs'16") Duration(1, 16) Note("fs'16") Duration(1, 16) .. container:: example REGRESSSION. Works with tremolo containers: >>> staff = abjad.Staff() >>> staff.append(abjad.TremoloContainer(2, "c'16 e'")) >>> staff.append("cs'4") >>> staff.append(abjad.TremoloContainer(2, "d'16 f'")) >>> staff.append("ds'4") >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \repeat tremolo 2 { c'16 e'16 } cs'4 \repeat tremolo 2 { d'16 f'16 } ds'4 } >>> for component in abjad.select.components(staff): ... duration = abjad.get.duration(component) ... print(f"{repr(component):30} {repr(duration)}") Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") Duration(1, 1) TremoloContainer("c'16 e'16") Duration(1, 4) Note("c'16") Duration(1, 8) Note("e'16") Duration(1, 8) Note("cs'4") Duration(1, 4) TremoloContainer("d'16 f'16") Duration(1, 4) Note("d'16") Duration(1, 8) Note("f'16") Duration(1, 8) Note("ds'4") Duration(1, 4) .. container:: example REGRESSION. Works with selections: >>> staff = abjad.Staff("c'4 d' e' f'") >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { c'4 d'4 e'4 f'4 } >>> selection = staff[:3] >>> abjad.get.duration(selection) Duration(3, 4) .. container:: example Gets preprolated duration: >>> staff = abjad.Staff(r"\times 2/3 { c'4 ~ c' } \times 2/3 { d' ~ d' }") >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \tweak edge-height #'(0.7 . 0) \times 2/3 { c'4 ~ c'4 } \tweak edge-height #'(0.7 . 0) \times 2/3 { d'4 ~ d'4 } } >>> for lt in abjad.select.logical_ties(staff): ... duration = abjad.get.duration(lt) ... preprolated = abjad.get.duration(lt, preprolated=True) ... lt, duration, preprolated (LogicalTie(items=[Note("c'4"), Note("c'4")]), Duration(1, 3), Duration(1, 2)) (LogicalTie(items=[Note("d'4"), Note("d'4")]), Duration(1, 3), Duration(1, 2)) """ return _inspect._get_duration( argument, in_seconds=in_seconds, preprolated=preprolated ) def effective( argument, prototype: _typings.Prototype, *, attributes: typing.Dict = None, default: typing.Any = None, n: int = 0, unwrap: bool = True, ) -> typing.Any: r""" Gets effective indicator. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Clef("alto"), container[0]) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \clef "alto" \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... clef = abjad.get.effective(component, abjad.Clef) ... print(f"{repr(component):30} {repr(clef)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") None Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') None Note("c'4") None BeforeGraceContainer("cs'16") None Note("cs'16") None Note("d'4") None Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Clef(name='alto', hide=False) OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Clef(name='alto', hide=False) Chord("<e' g'>16") Clef(name='alto', hide=False) Note("gs'16") Clef(name='alto', hide=False) Note("a'16") Clef(name='alto', hide=False) Note("as'16") Clef(name='alto', hide=False) Voice("e'4", name='Music_Voice') Clef(name='alto', hide=False) Note("e'4") Clef(name='alto', hide=False) Note("f'4") Clef(name='alto', hide=False) AfterGraceContainer("fs'16") Clef(name='alto', hide=False) Note("fs'16") Clef(name='alto', hide=False) .. container:: example REGRESSSION. Works with tremolo containers: >>> staff = abjad.Staff() >>> staff.append(abjad.TremoloContainer(2, "c'16 e'")) >>> staff.append("cs'4") >>> staff.append(abjad.TremoloContainer(2, "d'16 f'")) >>> abjad.attach(abjad.Clef("alto"), staff[-1][0]) >>> staff.append("ds'4") >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \repeat tremolo 2 { c'16 e'16 } cs'4 \repeat tremolo 2 { \clef "alto" d'16 f'16 } ds'4 } >>> for component in abjad.select.components(staff): ... clef = abjad.get.effective(component, abjad.Clef) ... print(f"{repr(component):30} {repr(clef)}") Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") None TremoloContainer("c'16 e'16") None Note("c'16") None Note("e'16") None Note("cs'4") None TremoloContainer("d'16 f'16") Clef(name='alto', hide=False) Note("d'16") Clef(name='alto', hide=False) Note("f'16") Clef(name='alto', hide=False) Note("ds'4") Clef(name='alto', hide=False) .. container:: example Arbitrary objects (like strings) can be contexted: >>> staff = abjad.Staff("c'8 d'8 e'8 f'8") >>> abjad.attach('color', staff[1], context='Staff') >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { c'8 d'8 e'8 f'8 } >>> for component in abjad.iterate.components(staff): ... string = abjad.get.effective(component, str) ... print(component, repr(string)) ... Staff("c'8 d'8 e'8 f'8") None c'8 None d'8 'color' e'8 'color' f'8 'color' .. container:: example Scans forwards or backwards when ``n`` is set: >>> staff = abjad.Staff("c'8 d'8 e'8 f'8 g'8") >>> abjad.attach('red', staff[0], context='Staff') >>> abjad.attach('blue', staff[2], context='Staff') >>> abjad.attach('yellow', staff[4], context='Staff') >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { c'8 d'8 e'8 f'8 g'8 } >>> for n in (-1, 0, 1): ... color = abjad.get.effective(staff[0], str, n=n) ... print(n, repr(color)) ... -1 None 0 'red' 1 'blue' >>> for n in (-1, 0, 1): ... color = abjad.get.effective(staff[1], str, n=n) ... print(n, repr(color)) ... -1 None 0 'red' 1 'blue' >>> for n in (-1, 0, 1): ... color = abjad.get.effective(staff[2], str, n=n) ... print(n, repr(color)) ... -1 'red' 0 'blue' 1 'yellow' >>> for n in (-1, 0, 1): ... color = abjad.get.effective(staff[3], str, n=n) ... print(n, repr(color)) ... -1 'red' 0 'blue' 1 'yellow' >>> for n in (-1, 0, 1): ... color = abjad.get.effective(staff[4], str, n=n) ... print(n, repr(color)) ... -1 'blue' 0 'yellow' 1 None .. container:: example Use synthetic offsets to hide a clef before the start of a staff like this: >>> staff = abjad.Staff("c'4 d'4 e'4 f'4") >>> abjad.attach( ... abjad.Clef("treble", hide=True), ... staff[0], ... synthetic_offset=-1, ... ) >>> abjad.attach(abjad.Clef("alto"), staff[0]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \clef "alto" c'4 d'4 e'4 f'4 } >>> for leaf in staff: ... clef = abjad.get.effective(leaf, abjad.Clef) ... (leaf, clef) ... (Note("c'4"), Clef(name='alto', hide=False)) (Note("d'4"), Clef(name='alto', hide=False)) (Note("e'4"), Clef(name='alto', hide=False)) (Note("f'4"), Clef(name='alto', hide=False)) >>> abjad.get.effective(staff[0], abjad.Clef) Clef(name='alto', hide=False) >>> abjad.get.effective(staff[0], abjad.Clef, n=-1) Clef(name='treble', hide=True) >>> abjad.get.effective(staff[0], abjad.Clef, n=-2) is None True Note that ``hide=True`` is set on the offset clef to prevent duplicate clef commands in LilyPond output. Note also that the order of attachment (offset versus non-offset) makes no difference. .. container:: example Here's how to hide a clef after the end of a staff: >>> staff = abjad.Staff("c'4 d'4 e'4 f'4") >>> abjad.attach(abjad.Clef("treble"), staff[0]) >>> abjad.attach( ... abjad.Clef("alto", hide=True), ... staff[-1], ... synthetic_offset=1, ... ) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \clef "treble" c'4 d'4 e'4 f'4 } >>> for leaf in staff: ... clef = abjad.get.effective(leaf, abjad.Clef) ... (leaf, clef) ... (Note("c'4"), Clef(name='treble', hide=False)) (Note("d'4"), Clef(name='treble', hide=False)) (Note("e'4"), Clef(name='treble', hide=False)) (Note("f'4"), Clef(name='treble', hide=False)) >>> abjad.get.effective(staff[-1], abjad.Clef) Clef(name='treble', hide=False) >>> abjad.get.effective(staff[-1], abjad.Clef, n=1) Clef(name='alto', hide=True) >>> abjad.get.effective(staff[-1], abjad.Clef, n=2) is None True .. container:: example Gets effective time signature: >>> staff = abjad.Staff("c'4 d' e' f'") >>> leaves = abjad.select.leaves(staff) >>> abjad.attach(abjad.TimeSignature((3, 8)), leaves[0]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \time 3/8 c'4 d'4 e'4 f'4 } >>> prototype = abjad.TimeSignature >>> for component in abjad.iterate.components(staff): ... time_signature = abjad.get.effective(component, prototype) ... print(component, time_signature) ... Staff("c'4 d'4 e'4 f'4") 3/8 c'4 3/8 d'4 3/8 e'4 3/8 f'4 3/8 .. container:: example Test attributes like this: >>> voice = abjad.Voice("c'4 d' e' f'") >>> staff = abjad.Staff([voice]) >>> start_text_span = abjad.StartTextSpan() >>> abjad.attach(start_text_span, voice[0]) >>> stop_text_span = abjad.StopTextSpan() >>> abjad.attach(stop_text_span, voice[2]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \new Voice { c'4 \startTextSpan d'4 e'4 \stopTextSpan f'4 } } >>> for note in abjad.select.notes(staff): ... note, abjad.get.effective(note, abjad.StartTextSpan) ... (Note("c'4"), StartTextSpan(command='\\startTextSpan', concat_hspace_left=0.5, concat_hspace_right=None, direction=None, left_broken_text=None, left_text=None, right_padding=None, right_text=None, style=None, tweaks=None)) (Note("d'4"), StartTextSpan(command='\\startTextSpan', concat_hspace_left=0.5, concat_hspace_right=None, direction=None, left_broken_text=None, left_text=None, right_padding=None, right_text=None, style=None, tweaks=None)) (Note("e'4"), StartTextSpan(command='\\startTextSpan', concat_hspace_left=0.5, concat_hspace_right=None, direction=None, left_broken_text=None, left_text=None, right_padding=None, right_text=None, style=None, tweaks=None)) (Note("f'4"), StartTextSpan(command='\\startTextSpan', concat_hspace_left=0.5, concat_hspace_right=None, direction=None, left_broken_text=None, left_text=None, right_padding=None, right_text=None, style=None, tweaks=None)) >>> for note in abjad.select.notes(staff): ... note, abjad.get.effective(note, abjad.StopTextSpan) ... (Note("c'4"), None) (Note("d'4"), None) (Note("e'4"), StopTextSpan(command='\\stopTextSpan', leak=False)) (Note("f'4"), StopTextSpan(command='\\stopTextSpan', leak=False)) >>> attributes = {'parameter': 'TEXT_SPANNER'} >>> for note in abjad.select.notes(staff): ... indicator = abjad.get.effective( ... note, ... object, ... attributes=attributes, ... ) ... note, indicator ... (Note("c'4"), StartTextSpan(command='\\startTextSpan', concat_hspace_left=0.5, concat_hspace_right=None, direction=None, left_broken_text=None, left_text=None, right_padding=None, right_text=None, style=None, tweaks=None)) (Note("d'4"), StartTextSpan(command='\\startTextSpan', concat_hspace_left=0.5, concat_hspace_right=None, direction=None, left_broken_text=None, left_text=None, right_padding=None, right_text=None, style=None, tweaks=None)) (Note("e'4"), StopTextSpan(command='\\stopTextSpan', leak=False)) (Note("f'4"), StopTextSpan(command='\\stopTextSpan', leak=False)) .. container:: example REGRESSION. Matching start-beam and stop-beam indicators work correctly: >>> voice = abjad.Voice("c'8 d'8 e'8 f'8 g'4 a'4") >>> abjad.attach(abjad.StartBeam(direction=None, tweaks=None), voice[0]) >>> abjad.attach(abjad.StopBeam(), voice[3]) >>> abjad.show(voice) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(voice) >>> print(string) \new Voice { c'8 [ d'8 e'8 f'8 ] g'4 a'4 } >>> for leaf in abjad.select.leaves(voice): ... start_beam = abjad.get.effective(leaf, abjad.StartBeam) ... stop_beam = abjad.get.effective(leaf, abjad.StopBeam) ... leaf, start_beam, stop_beam (Note("c'8"), StartBeam(direction=None, tweaks=None), None) (Note("d'8"), StartBeam(direction=None, tweaks=None), None) (Note("e'8"), StartBeam(direction=None, tweaks=None), None) (Note("f'8"), StartBeam(direction=None, tweaks=None), StopBeam(leak=False)) (Note("g'4"), StartBeam(direction=None, tweaks=None), StopBeam(leak=False)) (Note("a'4"), StartBeam(direction=None, tweaks=None), StopBeam(leak=False)) # TODO: make this work. .. container:: example REGRESSION. Bar lines work like this: >>> voice = abjad.Voice("c'2 d'2 e'2 f'2") >>> score = abjad.Score([voice]) >>> abjad.attach(abjad.BarLine("||"), voice[1]) >>> abjad.show(score) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(score) >>> print(string) \new Score << \new Voice { c'2 d'2 \bar "||" e'2 f'2 } >> >>> for leaf in abjad.select.leaves(score): ... bar_line = abjad.get.effective(leaf, abjad.BarLine) ... leaf, bar_line (Note("c'2"), None) (Note("d'2"), BarLine(abbreviation='||', format_slot='after')) (Note("e'2"), BarLine(abbreviation='||', format_slot='after')) (Note("f'2"), BarLine(abbreviation='||', format_slot='after')) """ if not isinstance(argument, _score.Component): raise Exception("can only get effective on components.") if attributes is not None: assert isinstance(attributes, dict), repr(attributes) result = _inspect._get_effective( argument, prototype, attributes=attributes, n=n, unwrap=unwrap ) if result is None: result = default return result def effective_staff(argument) -> typing.Optional["_score.Staff"]: r""" Gets effective staff. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... staff = abjad.get.effective_staff(component) ... print(f"{repr(component):30} {repr(staff)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("c'4") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") BeforeGraceContainer("cs'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("cs'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("d'4") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Chord("<e' g'>16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("gs'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("a'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("as'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("e'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("e'4") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("f'4") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") AfterGraceContainer("fs'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("fs'16") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") """ if not isinstance(argument, _score.Component): raise Exception("can only get effective staff on components.") staff_change = _inspect._get_effective(argument, _indicators.StaffChange) if staff_change is not None: for component in argument._get_parentage(): root = component effective_staff = root[staff_change.staff] return effective_staff for component in argument._get_parentage(): if isinstance(component, _score.Staff): effective_staff = component break return effective_staff def effective_wrapper( argument, prototype: _typings.Prototype, *, attributes: typing.Dict = None, n: int = 0, ): r""" Gets effective wrapper. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Clef("alto"), container[0]) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \clef "alto" \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... wrapper = abjad.get.effective_wrapper(component, abjad.Clef) ... print(f"{repr(component):}") ... print(f" {repr(wrapper)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") None Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') None Note("c'4") None BeforeGraceContainer("cs'16") None Note("cs'16") None Note("d'4") None Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag()) OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag()) Chord("<e' g'>16") Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag()) Note("gs'16") Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag()) Note("a'16") Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag()) Note("as'16") Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag()) Voice("e'4", name='Music_Voice') Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag()) Note("e'4") Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag()) Note("f'4") Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag()) AfterGraceContainer("fs'16") Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag()) Note("fs'16") Wrapper(annotation=None, context='Staff', deactivate=False, indicator=Clef(name='alto', hide=False), synthetic_offset=None, tag=Tag()) """ if attributes is not None: assert isinstance(attributes, dict), repr(attributes) return effective(argument, prototype, attributes=attributes, n=n, unwrap=False) def grace(argument) -> bool: r""" Is true when ``argument`` is grace music. Grace music defined equal to grace container, after-grace container and contents of those containers. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... result = abjad.get.grace(component) ... print(f"{repr(component):30} {repr(result)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") False Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') False Note("c'4") False BeforeGraceContainer("cs'16") True Note("cs'16") True Note("d'4") False Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") False OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") True Chord("<e' g'>16") True Note("gs'16") True Note("a'16") True Note("as'16") True Voice("e'4", name='Music_Voice') False Note("e'4") False Note("f'4") False AfterGraceContainer("fs'16") True Note("fs'16") True """ return _inspect._get_grace_container(argument) def has_effective_indicator( argument, prototype: _typings.Prototype = None, *, attributes: typing.Dict = None, ) -> bool: r""" Is true when ``argument`` has effective indicator. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Clef("alto"), container[0]) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \clef "alto" \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... function = abjad.get.has_effective_indicator ... result = function(component, abjad.Clef) ... print(f"{repr(component):30} {repr(result)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") False Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') False Note("c'4") False BeforeGraceContainer("cs'16") False Note("cs'16") False Note("d'4") False Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") True OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") True Chord("<e' g'>16") True Note("gs'16") True Note("a'16") True Note("as'16") True Voice("e'4", name='Music_Voice') True Note("e'4") True Note("f'4") True AfterGraceContainer("fs'16") True Note("fs'16") True .. container:: example REGRESSSION. Works with tremolo containers: >>> staff = abjad.Staff() >>> staff.append(abjad.TremoloContainer(2, "c'16 e'")) >>> staff.append("cs'4") >>> staff.append(abjad.TremoloContainer(2, "d'16 f'")) >>> abjad.attach(abjad.Clef("alto"), staff[-1][0]) >>> staff.append("ds'4") >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \repeat tremolo 2 { c'16 e'16 } cs'4 \repeat tremolo 2 { \clef "alto" d'16 f'16 } ds'4 } >>> for component in abjad.select.components(staff): ... function = abjad.get.has_effective_indicator ... result = function(component, abjad.Clef) ... print(f"{repr(component):30} {repr(result)}") Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") False TremoloContainer("c'16 e'16") False Note("c'16") False Note("e'16") False Note("cs'4") False TremoloContainer("d'16 f'16") True Note("d'16") True Note("f'16") True Note("ds'4") True """ if not isinstance(argument, _score.Component): raise Exception("can only get effective indicator on component.") if attributes is not None: assert isinstance(attributes, dict), repr(attributes) indicator = _inspect._get_effective(argument, prototype, attributes=attributes) return indicator is not None def has_indicator( argument, prototype: typing.Union[str, _typings.Prototype] = None, *, attributes: typing.Dict = None, ) -> bool: r""" Is true when ``argument`` has one or more indicators. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Clef("alto"), container[0]) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \clef "alto" \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... result = abjad.get.has_indicator(component, abjad.Clef) ... print(f"{repr(component):30} {repr(result)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") False Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') False Note("c'4") False BeforeGraceContainer("cs'16") False Note("cs'16") False Note("d'4") False Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") False OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") False Chord("<e' g'>16") True Note("gs'16") False Note("a'16") False Note("as'16") False Voice("e'4", name='Music_Voice') False Note("e'4") False Note("f'4") False AfterGraceContainer("fs'16") False Note("fs'16") False .. container:: example REGRESSSION. Works with tremolo containers: >>> staff = abjad.Staff() >>> staff.append(abjad.TremoloContainer(2, "c'16 e'")) >>> staff.append("cs'4") >>> staff.append(abjad.TremoloContainer(2, "d'16 f'")) >>> abjad.attach(abjad.Clef("alto"), staff[-1][0]) >>> staff.append("ds'4") >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \repeat tremolo 2 { c'16 e'16 } cs'4 \repeat tremolo 2 { \clef "alto" d'16 f'16 } ds'4 } >>> for component in abjad.select.components(staff): ... result = abjad.get.has_indicator(component, abjad.Clef) ... print(f"{repr(component):30} {repr(result)}") Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") False TremoloContainer("c'16 e'16") False Note("c'16") False Note("e'16") False Note("cs'4") False TremoloContainer("d'16 f'16") False Note("d'16") True Note("f'16") False Note("ds'4") False .. container:: example Set ``attributes`` dictionary to test indicator attributes: >>> voice = abjad.Voice("c'4 c'4 c'4 c'4") >>> abjad.attach(abjad.Clef('treble'), voice[0]) >>> abjad.attach(abjad.Clef('alto'), voice[2]) >>> abjad.show(voice) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(voice) >>> print(string) \new Voice { \clef "treble" c'4 c'4 \clef "alto" c'4 c'4 } >>> attributes = {'name': 'alto'} >>> abjad.get.has_indicator(voice[0], abjad.Clef) True >>> abjad.get.has_indicator( ... voice[0], ... abjad.Clef, ... attributes=attributes, ... ) False >>> abjad.get.has_indicator(voice[2], abjad.Clef) True >>> abjad.get.has_indicator( ... voice[2], ... abjad.Clef, ... attributes=attributes, ... ) True """ if isinstance(prototype, _tag.Tag): raise Exception("do not attach tags; use tag=None keyword.") if not isinstance(argument, _score.Component): raise Exception("can only get indicator on component.") if attributes is not None: assert isinstance(attributes, dict), repr(attributes) return argument._has_indicator(prototype=prototype, attributes=attributes) def indicator( argument, prototype: _typings.Prototype = None, *, default: typing.Any = None, unwrap: bool = True, ) -> typing.Any: r""" Gets indicator. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Clef("alto"), container[0]) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \clef "alto" \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... result = abjad.get.indicator(component, abjad.Clef) ... print(f"{repr(component):30} {repr(result)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") None Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') None Note("c'4") None BeforeGraceContainer("cs'16") None Note("cs'16") None Note("d'4") None Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") None OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") None Chord("<e' g'>16") Clef(name='alto', hide=False) Note("gs'16") None Note("a'16") None Note("as'16") None Voice("e'4", name='Music_Voice') None Note("e'4") None Note("f'4") None AfterGraceContainer("fs'16") None Note("fs'16") None .. container:: example REGRESSSION. Works with tremolo containers: >>> staff = abjad.Staff() >>> staff.append(abjad.TremoloContainer(2, "c'16 e'")) >>> staff.append("cs'4") >>> staff.append(abjad.TremoloContainer(2, "d'16 f'")) >>> abjad.attach(abjad.Clef("alto"), staff[-1][0]) >>> staff.append("ds'4") >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \repeat tremolo 2 { c'16 e'16 } cs'4 \repeat tremolo 2 { \clef "alto" d'16 f'16 } ds'4 } >>> for component in abjad.select.components(staff): ... result = abjad.get.indicator(component, abjad.Clef) ... print(f"{repr(component):30} {repr(result)}") Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") None TremoloContainer("c'16 e'16") None Note("c'16") None Note("e'16") None Note("cs'4") None TremoloContainer("d'16 f'16") None Note("d'16") Clef(name='alto', hide=False) Note("f'16") None Note("ds'4") None Raises exception when more than one indicator of ``prototype`` attach to ``argument``. Returns default when no indicator of ``prototype`` attaches to ``argument``. """ return _inspect._get_indicator(argument, prototype, default=default, unwrap=unwrap) def indicators( argument, prototype: _typings.Prototype = None, *, attributes: typing.Dict = None, unwrap: bool = True, ) -> typing.List: r""" Get indicators. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Clef("alto"), container[0]) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> for note in abjad.select.notes(staff): ... abjad.attach(abjad.Articulation("."), note) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 - \staccato \grace { cs'16 - \staccato } d'4 - \staccato << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \clef "alto" \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 - \staccato a'16 - \staccato as'16 - \staccato ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 - \staccato } >> \oneVoice \afterGrace f'4 - \staccato { fs'16 - \staccato } } } >>> for component in abjad.select.components(staff): ... result = abjad.get.indicators(component) ... print(f"{repr(component):30} {repr(result)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") [] Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') [] Note("c'4") [Articulation(name='.', direction=None, tweaks=None)] BeforeGraceContainer("cs'16") [] Note("cs'16") [Articulation(name='.', direction=None, tweaks=None)] Note("d'4") [Articulation(name='.', direction=None, tweaks=None)] Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") [] OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") [LilyPondLiteral(argument='\\set fontSize = #-3', format_slot='opening', directed=False, tweaks=None)] Chord("<e' g'>16") [StartBeam(direction=None, tweaks=None), LilyPondLiteral(argument='\\slash', format_slot='opening', directed=False, tweaks=None), StartSlur(direction=None, tweaks=None), LilyPondLiteral(argument='\\voiceOne', format_slot='opening', directed=False, tweaks=None), Clef(name='alto', hide=False), Articulation(name='>', direction=None, tweaks=None)] Note("gs'16") [Articulation(name='.', direction=None, tweaks=None)] Note("a'16") [Articulation(name='.', direction=None, tweaks=None)] Note("as'16") [StopBeam(leak=False), StopSlur(leak=False), Articulation(name='.', direction=None, tweaks=None)] Voice("e'4", name='Music_Voice') [] Note("e'4") [LilyPondLiteral(argument='\\voiceTwo', format_slot='opening', directed=False, tweaks=None), Articulation(name='.', direction=None, tweaks=None)] Note("f'4") [LilyPondLiteral(argument='\\oneVoice', format_slot='absolute_before', directed=False, tweaks=None), Articulation(name='.', direction=None, tweaks=None)] AfterGraceContainer("fs'16") [] Note("fs'16") [Articulation(name='.', direction=None, tweaks=None)] .. container:: example REGRESSSION. Works with tremolo containers: >>> staff = abjad.Staff() >>> staff.append(abjad.TremoloContainer(2, "c'16 e'")) >>> staff.append("cs'4") >>> staff.append(abjad.TremoloContainer(2, "d'16 f'")) >>> abjad.attach(abjad.Clef("alto"), staff[-1][0]) >>> staff.append("ds'4") >>> for note in abjad.select.notes(staff): ... abjad.attach(abjad.Articulation("."), note) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \repeat tremolo 2 { c'16 - \staccato e'16 - \staccato } cs'4 - \staccato \repeat tremolo 2 { \clef "alto" d'16 - \staccato f'16 - \staccato } ds'4 - \staccato } >>> for component in abjad.select.components(staff): ... result = abjad.get.indicators(component) ... print(f"{repr(component):30} {repr(result)}") Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") [] TremoloContainer("c'16 e'16") [] Note("c'16") [Articulation(name='.', direction=None, tweaks=None)] Note("e'16") [Articulation(name='.', direction=None, tweaks=None)] Note("cs'4") [Articulation(name='.', direction=None, tweaks=None)] TremoloContainer("d'16 f'16") [] Note("d'16") [Clef(name='alto', hide=False), Articulation(name='.', direction=None, tweaks=None)] Note("f'16") [Articulation(name='.', direction=None, tweaks=None)] Note("ds'4") [Articulation(name='.', direction=None, tweaks=None)] """ # TODO: extend to any non-none argument if not isinstance(argument, _score.Component): message = "can only get indicators on component" message += f" (not {argument!r})." raise Exception(message) if attributes is not None: assert isinstance(attributes, dict), repr(attributes) result = argument._get_indicators( prototype=prototype, attributes=attributes, unwrap=unwrap ) return list(result) def leaf(argument, n: int = 0) -> typing.Optional["_score.Leaf"]: r""" Gets leaf ``n``. ``n`` constrained to -1, 0, 1 for previous, current, next leaf. .. container:: example >>> staff = abjad.Staff() >>> staff.append(abjad.Voice("c'8 d'8 e'8 f'8")) >>> staff.append(abjad.Voice("g'8 a'8 b'8 c''8")) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \new Voice { c'8 d'8 e'8 f'8 } \new Voice { g'8 a'8 b'8 c''8 } } .. container:: example Gets leaf **FROM** ``argument`` when ``argument`` is a leaf: >>> leaf = staff[0][1] >>> abjad.get.leaf(leaf, -1) Note("c'8") >>> abjad.get.leaf(leaf, 0) Note("d'8") >>> abjad.get.leaf(leaf, 1) Note("e'8") .. container:: example Gets leaf **IN** ``argument`` when ``argument`` is a container: >>> voice = staff[0] >>> abjad.get.leaf(voice, -1) Note("f'8") >>> abjad.get.leaf(voice, 0) Note("c'8") >>> abjad.get.leaf(voice, 1) Note("d'8") .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Clef("alto"), container[0]) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \clef "alto" \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for current_leaf in abjad.select.leaves(staff): ... previous_leaf = abjad.get.leaf(current_leaf, -1) ... next_leaf = abjad.get.leaf(current_leaf, 1) ... print(f"previous leaf: {repr(previous_leaf)}") ... print(f"current leaf: {repr(current_leaf)}") ... print(f"next leaf: {repr(next_leaf)}") ... print("---") previous leaf: None current leaf: Note("c'4") next leaf: Note("cs'16") --- previous leaf: Note("c'4") current leaf: Note("cs'16") next leaf: Note("d'4") --- previous leaf: Note("cs'16") current leaf: Note("d'4") next leaf: Chord("<e' g'>16") --- previous leaf: Note("d'4") current leaf: Chord("<e' g'>16") next leaf: Note("gs'16") --- previous leaf: Chord("<e' g'>16") current leaf: Note("gs'16") next leaf: Note("a'16") --- previous leaf: Note("gs'16") current leaf: Note("a'16") next leaf: Note("as'16") --- previous leaf: Note("a'16") current leaf: Note("as'16") next leaf: Note("e'4") --- previous leaf: Note("as'16") current leaf: Note("e'4") next leaf: Note("f'4") --- previous leaf: Note("e'4") current leaf: Note("f'4") next leaf: Note("fs'16") --- previous leaf: Note("f'4") current leaf: Note("fs'16") next leaf: None --- .. container:: example REGRESSSION. Works with tremolo containers: >>> staff = abjad.Staff() >>> staff.append(abjad.TremoloContainer(2, "c'16 e'")) >>> staff.append("cs'4") >>> staff.append(abjad.TremoloContainer(2, "d'16 f'")) >>> staff.append("ds'4") >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \repeat tremolo 2 { c'16 e'16 } cs'4 \repeat tremolo 2 { d'16 f'16 } ds'4 } >>> for current_leaf in abjad.select.leaves(staff): ... previous_leaf = abjad.get.leaf(current_leaf, -1) ... next_leaf = abjad.get.leaf(current_leaf, 1) ... print(f"previous leaf: {repr(previous_leaf)}") ... print(f"current leaf: {repr(current_leaf)}") ... print(f"next leaf: {repr(next_leaf)}") ... print("---") previous leaf: None current leaf: Note("c'16") next leaf: Note("e'16") --- previous leaf: Note("c'16") current leaf: Note("e'16") next leaf: Note("cs'4") --- previous leaf: Note("e'16") current leaf: Note("cs'4") next leaf: Note("d'16") --- previous leaf: Note("cs'4") current leaf: Note("d'16") next leaf: Note("f'16") --- previous leaf: Note("d'16") current leaf: Note("f'16") next leaf: Note("ds'4") --- previous leaf: Note("f'16") current leaf: Note("ds'4") next leaf: None --- """ return _iterate._get_leaf(argument, n=n) def lineage(argument) -> "Lineage": r""" Gets lineage. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... lineage = abjad.get.lineage(component) ... print(f"{repr(component)}:") ... for component_ in lineage: ... print(f" {repr(component_)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Note("c'4") BeforeGraceContainer("cs'16") Note("cs'16") Note("d'4") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Chord("<e' g'>16") Note("gs'16") Note("a'16") Note("as'16") Voice("e'4", name='Music_Voice') Note("e'4") Note("f'4") AfterGraceContainer("fs'16") Note("fs'16") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice'): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Note("c'4") BeforeGraceContainer("cs'16") Note("cs'16") Note("d'4") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Chord("<e' g'>16") Note("gs'16") Note("a'16") Note("as'16") Voice("e'4", name='Music_Voice') Note("e'4") Note("f'4") AfterGraceContainer("fs'16") Note("fs'16") Note("c'4"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Note("c'4") BeforeGraceContainer("cs'16"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') BeforeGraceContainer("cs'16") Note("cs'16") Note("cs'16"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') BeforeGraceContainer("cs'16") Note("cs'16") Note("d'4"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Note("d'4") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Chord("<e' g'>16") Note("gs'16") Note("a'16") Note("as'16") Voice("e'4", name='Music_Voice') Note("e'4") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Chord("<e' g'>16") Note("gs'16") Note("a'16") Note("as'16") Chord("<e' g'>16"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Chord("<e' g'>16") Note("gs'16"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Note("gs'16") Note("a'16"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Note("a'16") Note("as'16"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Note("as'16") Voice("e'4", name='Music_Voice'): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Voice("e'4", name='Music_Voice') Note("e'4") Note("e'4"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Voice("e'4", name='Music_Voice') Note("e'4") Note("f'4"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Note("f'4") AfterGraceContainer("fs'16"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') AfterGraceContainer("fs'16") Note("fs'16") Note("fs'16"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') AfterGraceContainer("fs'16") Note("fs'16") """ if not isinstance(argument, _score.Component): raise Exception("can only get lineage on component.") return Lineage(argument) def logical_tie(argument) -> "_select.LogicalTie": r""" Gets logical tie. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for leaf in abjad.select.leaves(staff): ... lt = abjad.get.logical_tie(leaf) ... print(f"{repr(leaf):30} {repr(lt)}") Note("c'4") LogicalTie(items=[Note("c'4")]) Note("cs'16") LogicalTie(items=[Note("cs'16")]) Note("d'4") LogicalTie(items=[Note("d'4")]) Chord("<e' g'>16") LogicalTie(items=[Chord("<e' g'>16")]) Note("gs'16") LogicalTie(items=[Note("gs'16")]) Note("a'16") LogicalTie(items=[Note("a'16")]) Note("as'16") LogicalTie(items=[Note("as'16")]) Note("e'4") LogicalTie(items=[Note("e'4")]) Note("f'4") LogicalTie(items=[Note("f'4")]) Note("fs'16") LogicalTie(items=[Note("fs'16")]) .. container:: example REGRESSSION. Works with tremolo containers: >>> staff = abjad.Staff() >>> staff.append(abjad.TremoloContainer(2, "c'16 e'")) >>> staff.append("cs'4") >>> staff.append(abjad.TremoloContainer(2, "d'16 f'")) >>> staff.append("ds'4") >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \repeat tremolo 2 { c'16 e'16 } cs'4 \repeat tremolo 2 { d'16 f'16 } ds'4 } >>> for leaf in abjad.select.leaves(staff): ... lt = abjad.get.logical_tie(leaf) ... print(f"{repr(leaf):30} {repr(lt)}") Note("c'16") LogicalTie(items=[Note("c'16")]) Note("e'16") LogicalTie(items=[Note("e'16")]) Note("cs'4") LogicalTie(items=[Note("cs'4")]) Note("d'16") LogicalTie(items=[Note("d'16")]) Note("f'16") LogicalTie(items=[Note("f'16")]) Note("ds'4") LogicalTie(items=[Note("ds'4")]) .. container:: example REGRESSSION. Omits spurious rest when user ties from note to rest: >>> staff = abjad.Staff("c'4 r4") >>> # user error; shouldn't tie note to rest: >>> abjad.attach(abjad.Tie(), staff[0]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { c'4 ~ r4 } >>> abjad.get.logical_tie(staff[0]) LogicalTie(items=[Note("c'4")]) >>> abjad.get.logical_tie(staff[1]) LogicalTie(items=[Rest('r4')]) Omits spurious rest when user repeat-ties into rest from note: >>> staff = abjad.Staff("r4 c'4") >>> # user error; shouldn't tie note to rest: >>> abjad.attach(abjad.RepeatTie(), staff[1]) >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { r4 c'4 \repeatTie } >>> abjad.get.logical_tie(staff[0]) LogicalTie(items=[Rest('r4')]) >>> abjad.get.logical_tie(staff[1]) LogicalTie(items=[Note("c'4")]) """ if not isinstance(argument, _score.Leaf): raise Exception("can only get logical tie on leaf.") leaves = _iterate._get_logical_tie_leaves(argument) return _select.LogicalTie(leaves) def markup( argument, *, direction: _enums.VerticalAlignment = None ) -> typing.List[_markups.Markup]: """ Gets markup. """ # TODO: extend to any non-none argument if not isinstance(argument, _score.Component): raise Exception("can only get markup on component.") result = argument._get_markup(direction=direction) return list(result) def measure_number(argument) -> int: r""" Gets measure number. .. container:: example REGRESSION. Works with grace notes (and containers): >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... measure_number = abjad.get.measure_number(component) ... print(f"{repr(component):30} {measure_number}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") 1 Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') 1 Note("c'4") 1 BeforeGraceContainer("cs'16") 1 Note("cs'16") 1 Note("d'4") 1 Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") 1 OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") 1 Chord("<e' g'>16") 1 Note("gs'16") 1 Note("a'16") 1 Note("as'16") 1 Voice("e'4", name='Music_Voice') 1 Note("e'4") 1 Note("f'4") 1 AfterGraceContainer("fs'16") 1 Note("fs'16") 1 .. container:: example REGRESSION. Measure number of score-initial grace notes is set equal to 0: >>> voice = abjad.Voice("c'4 d' e' f'") >>> container = abjad.BeforeGraceContainer("b16") >>> abjad.attach(container, voice[0]) >>> abjad.show(voice) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(voice) >>> print(string) \new Voice { \grace { b16 } c'4 d'4 e'4 f'4 } >>> for component in abjad.select.components(voice): ... measure_number = abjad.get.measure_number(component) ... print(f"{repr(component):30} {measure_number}") Voice("c'4 d'4 e'4 f'4") 1 BeforeGraceContainer('b16') 0 Note('b16') 0 Note("c'4") 1 Note("d'4") 1 Note("e'4") 1 Note("f'4") 1 .. container:: example REGRESSSION. Works with tremolo containers: >>> staff = abjad.Staff() >>> staff.append(abjad.TremoloContainer(2, "c'16 e'")) >>> staff.append("cs'4") >>> staff.append(abjad.TremoloContainer(2, "d'16 f'")) >>> staff.append("ds'4") >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \repeat tremolo 2 { c'16 e'16 } cs'4 \repeat tremolo 2 { d'16 f'16 } ds'4 } >>> for component in abjad.select.components(staff): ... measure_number = abjad.get.measure_number(component) ... print(f"{repr(component):30} {measure_number}") Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4") 1 TremoloContainer("c'16 e'16") 1 Note("c'16") 1 Note("e'16") 1 Note("cs'4") 1 TremoloContainer("d'16 f'16") 1 Note("d'16") 1 Note("f'16") 1 Note("ds'4") 1 """ if not isinstance(argument, _score.Component): raise Exception("can only get measure number on component.") _update._update_measure_numbers(argument) assert isinstance(argument._measure_number, int) return argument._measure_number def parentage(argument) -> "_parentage.Parentage": r""" Gets parentage. .. container:: example >>> music_voice = abjad.Voice("c'4 d' e' f'", name="Music_Voice") >>> container = abjad.BeforeGraceContainer("cs'16") >>> abjad.attach(container, music_voice[1]) >>> container = abjad.on_beat_grace_container( ... "g'16 gs' a' as'", music_voice[2:3] ... ) >>> abjad.attach(abjad.Articulation(">"), container[0]) >>> container = abjad.AfterGraceContainer("fs'16") >>> abjad.attach(container, music_voice[3]) >>> staff = abjad.Staff([music_voice]) >>> lilypond_file = abjad.LilyPondFile([r'\include "abjad.ily"', staff]) >>> abjad.show(lilypond_file) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \context Voice = "Music_Voice" { c'4 \grace { cs'16 } d'4 << \context Voice = "On_Beat_Grace_Container" { \set fontSize = #-3 \slash \voiceOne < \tweak font-size 0 \tweak transparent ##t e' g' >16 - \accent [ ( gs'16 a'16 as'16 ) ] } \context Voice = "Music_Voice" { \voiceTwo e'4 } >> \oneVoice \afterGrace f'4 { fs'16 } } } >>> for component in abjad.select.components(staff): ... parentage = abjad.get.parentage(component) ... print(f"{repr(component)}:") ... for component_ in parentage[:]: ... print(f" {repr(component_)}") Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }"): Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice'): Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("c'4"): Note("c'4") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") BeforeGraceContainer("cs'16"): BeforeGraceContainer("cs'16") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("cs'16"): Note("cs'16") BeforeGraceContainer("cs'16") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("d'4"): Note("d'4") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }"): Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16"): OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Chord("<e' g'>16"): Chord("<e' g'>16") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("gs'16"): Note("gs'16") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("a'16"): Note("a'16") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("as'16"): Note("as'16") OnBeatGraceContainer("<e' g'>16 gs'16 a'16 as'16") Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Voice("e'4", name='Music_Voice'): Voice("e'4", name='Music_Voice') Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("e'4"): Note("e'4") Voice("e'4", name='Music_Voice') Container("{ <e' g'>16 gs'16 a'16 as'16 } { e'4 }") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("f'4"): Note("f'4") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") AfterGraceContainer("fs'16"): AfterGraceContainer("fs'16") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") Note("fs'16"): Note("fs'16") AfterGraceContainer("fs'16") Voice("c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4", name='Music_Voice') Staff("{ c'4 d'4 { { <e' g'>16 gs'16 a'16 as'16 } { e'4 } } f'4 }") .. container:: example REGRESSSION. Works with tremolo containers: >>> staff = abjad.Staff() >>> staff.append(abjad.TremoloContainer(2, "c'16 e'")) >>> staff.append("cs'4") >>> staff.append(abjad.TremoloContainer(2, "d'16 f'")) >>> staff.append("ds'4") >>> abjad.show(staff) # doctest: +SKIP .. docs:: >>> string = abjad.lilypond(staff) >>> print(string) \new Staff { \repeat tremolo 2 { c'16 e'16 } cs'4 \repeat tremolo 2 { d'16 f'16 } ds'4 } >>> for component in abjad.select.components(staff): ... parentage = abjad.get.parentage(component) ... print(f"{repr(component)}:") ... print(f" {repr(parentage[:])}") Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"): (Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4"),) TremoloContainer("c'16 e'16"): (TremoloContainer("c'16 e'16"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4")) Note("c'16"): (Note("c'16"), TremoloContainer("c'16 e'16"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4")) Note("e'16"): (Note("e'16"), TremoloContainer("c'16 e'16"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4")) Note("cs'4"): (Note("cs'4"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4")) TremoloContainer("d'16 f'16"): (TremoloContainer("d'16 f'16"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4")) Note("d'16"): (Note("d'16"), TremoloContainer("d'16 f'16"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4")) Note("f'16"): (Note("f'16"), TremoloContainer("d'16 f'16"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4")) Note("ds'4"): (Note("ds'4"), Staff("{ c'16 e'16 } cs'4 { d'16 f'16 } ds'4")) """ if not isinstance(argument, _score.Component): message = "can only get parentage on component
codeparrot/github-code-clean
""" Module contains tools for processing files into DataFrames or other objects """ from __future__ import print_function from pandas.compat import range, lrange, StringIO, lzip, zip, string_types, map from pandas import compat import re import csv import numpy as np from pandas.core.index import Index, MultiIndex from pandas.core.frame import DataFrame import datetime import pandas.core.common as com from pandas.core.config import get_option from pandas.io.date_converters import generic_parser from pandas.io.common import get_filepath_or_buffer from pandas.tseries import tools from pandas.util.decorators import Appender import pandas.lib as lib import pandas.tslib as tslib import pandas.parser as _parser _parser_params = """Also supports optionally iterating or breaking of the file into chunks. Parameters ---------- filepath_or_buffer : string or file handle / StringIO. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. For instance, a local file could be file ://localhost/path/to/table.csv %s lineterminator : string (length 1), default None Character to break file into lines. Only valid with C parser quotechar : string (length 1) The character used to denote the start and end of a quoted item. Quoted items can include the delimiter and it will be ignored. quoting : int or csv.QUOTE_* instance, default None Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3). Default (None) results in QUOTE_MINIMAL behavior. skipinitialspace : boolean, default False Skip spaces after delimiter escapechar : string dtype : Type name or dict of column -> type Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32} compression : {'gzip', 'bz2', None}, default None For on-the-fly decompression of on-disk data dialect : string or csv.Dialect instance, default None If None defaults to Excel dialect. Ignored if sep longer than 1 char See csv.Dialect documentation for more details header : int row number(s) to use as the column names, and the start of the data. Defaults to 0 if no ``names`` passed, otherwise ``None``. Explicitly pass ``header=0`` to be able to replace existing names. The header can be a list of integers that specify row locations for a multi-index on the columns E.g. [0,1,3]. Intervening rows that are not specified will be skipped. (E.g. 2 in this example are skipped) skiprows : list-like or integer Row numbers to skip (0-indexed) or number of rows to skip (int) at the start of the file index_col : int or sequence or False, default None Column to use as the row labels of the DataFrame. If a sequence is given, a MultiIndex is used. If you have a malformed file with delimiters at the end of each line, you might consider index_col=False to force pandas to _not_ use the first column as the index (row names) names : array-like List of column names to use. If file contains no header row, then you should explicitly pass header=None prefix : string or None (default) Prefix to add to column numbers when no header, e.g 'X' for X0, X1, ... na_values : list-like or dict, default None Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values true_values : list Values to consider as True false_values : list Values to consider as False keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to parse_dates : boolean, list of ints or names, list of lists, or dict If True -> try parsing the index. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call result 'foo' A fast-path exists for iso8601-formatted dates. keep_date_col : boolean, default False If True and parse_dates specifies combining multiple columns then keep the original columns. date_parser : function Function to use for converting a sequence of string columns to an array of datetime instances. The default uses dateutil.parser.parser to do the conversion. dayfirst : boolean, default False DD/MM format dates, international and European format thousands : str, default None Thousands separator comment : str, default None Indicates remainder of line should not be parsed Does not support line commenting (will return empty line) decimal : str, default '.' Character to recognize as decimal point. E.g. use ',' for European data nrows : int, default None Number of rows of file to read. Useful for reading pieces of large files iterator : boolean, default False Return TextFileReader object chunksize : int, default None Return TextFileReader object for iteration skipfooter : int, default 0 Number of line at bottom of file to skip converters : dict. optional Dict of functions for converting values in certain columns. Keys can either be integers or column labels verbose : boolean, default False Indicate number of NA values placed in non-numeric columns delimiter : string, default None Alternative argument name for sep. Regular expressions are accepted. encoding : string, default None Encoding to use for UTF when reading/writing (ex. 'utf-8') squeeze : boolean, default False If the parsed data only contains one column then return a Series na_filter: boolean, default True Detect missing value markers (empty strings and the value of na_values). In data without any NAs, passing na_filter=False can improve the performance of reading a large file usecols : array-like Return a subset of the columns. Results in much faster parsing time and lower memory usage. mangle_dupe_cols: boolean, default True Duplicate columns will be specified as 'X.0'...'X.N', rather than 'X'...'X' tupleize_cols: boolean, default False Leave a list of tuples on columns as is (default is to convert to a Multi Index on the columns) error_bad_lines: boolean, default True Lines with too many fields (e.g. a csv line with too many commas) will by default cause an exception to be raised, and no DataFrame will be returned. If False, then these "bad lines" will dropped from the DataFrame that is returned. (Only valid with C parser). warn_bad_lines: boolean, default True If error_bad_lines is False, and warn_bad_lines is True, a warning for each "bad line" will be output. (Only valid with C parser). infer_datetime_format : boolean, default False If True and parse_dates is enabled for a column, attempt to infer the datetime format to speed up the processing Returns ------- result : DataFrame or TextParser """ _csv_sep = """sep : string, default ',' Delimiter to use. If sep is None, will try to automatically determine this. Regular expressions are accepted. """ _table_sep = """sep : string, default \\t (tab-stop) Delimiter to use. Regular expressions are accepted.""" _read_csv_doc = """ Read CSV (comma-separated) file into DataFrame %s """ % (_parser_params % _csv_sep) _read_table_doc = """ Read general delimited file into DataFrame %s """ % (_parser_params % _table_sep) _fwf_widths = """\ colspecs : list of pairs (int, int) or 'infer'. optional A list of pairs (tuples) giving the extents of the fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data (default='infer'). widths : list of ints. optional A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. """ _read_fwf_doc = """ Read a table of fixed-width formatted lines into DataFrame %s Also, 'delimiter' is used to specify the filler character of the fields if it is not spaces (e.g., '~'). """ % (_parser_params % _fwf_widths) def _read(filepath_or_buffer, kwds): "Generic reader of line files." encoding = kwds.get('encoding', None) skipfooter = kwds.pop('skipfooter', None) if skipfooter is not None: kwds['skip_footer'] = skipfooter filepath_or_buffer, _ = get_filepath_or_buffer(filepath_or_buffer, encoding) if kwds.get('date_parser', None) is not None: if isinstance(kwds['parse_dates'], bool): kwds['parse_dates'] = True # Extract some of the arguments (pass chunksize on). iterator = kwds.get('iterator', False) nrows = kwds.pop('nrows', None) chunksize = kwds.get('chunksize', None) # Create the parser. parser = TextFileReader(filepath_or_buffer, **kwds) if nrows is not None: return parser.read(nrows) elif chunksize or iterator: return parser return parser.read() _parser_defaults = { 'delimiter': None, 'doublequote': True, 'escapechar': None, 'quotechar': '"', 'quoting': csv.QUOTE_MINIMAL, 'skipinitialspace': False, 'lineterminator': None, 'header': 'infer', 'index_col': None, 'names': None, 'prefix': None, 'skiprows': None, 'na_values': None, 'true_values': None, 'false_values': None, 'skip_footer': 0, 'converters': None, 'keep_default_na': True, 'thousands': None, 'comment': None, # 'engine': 'c', 'parse_dates': False, 'keep_date_col': False, 'dayfirst': False, 'date_parser': None, 'usecols': None, # 'nrows': None, # 'iterator': False, 'chunksize': None, 'verbose': False, 'encoding': None, 'squeeze': False, 'compression': None, 'mangle_dupe_cols': True, 'tupleize_cols': False, 'infer_datetime_format': False, } _c_parser_defaults = { 'delim_whitespace': False, 'as_recarray': False, 'na_filter': True, 'compact_ints': False, 'use_unsigned': False, 'low_memory': True, 'memory_map': False, 'buffer_lines': None, 'error_bad_lines': True, 'warn_bad_lines': True, 'dtype': None, 'decimal': b'.' } _fwf_defaults = { 'colspecs': 'infer', 'widths': None, } _c_unsupported = set(['skip_footer']) _python_unsupported = set(_c_parser_defaults.keys()) def _make_parser_function(name, sep=','): def parser_f(filepath_or_buffer, sep=sep, dialect=None, compression=None, doublequote=True, escapechar=None, quotechar='"', quoting=csv.QUOTE_MINIMAL, skipinitialspace=False, lineterminator=None, header='infer', index_col=None, names=None, prefix=None, skiprows=None, skipfooter=None, skip_footer=0, na_values=None, na_fvalues=None, true_values=None, false_values=None, delimiter=None, converters=None, dtype=None, usecols=None, engine='c', delim_whitespace=False, as_recarray=False, na_filter=True, compact_ints=False, use_unsigned=False, low_memory=_c_parser_defaults['low_memory'], buffer_lines=None, warn_bad_lines=True, error_bad_lines=True, keep_default_na=True, thousands=None, comment=None, decimal=b'.', parse_dates=False, keep_date_col=False, dayfirst=False, date_parser=None, memory_map=False, nrows=None, iterator=False, chunksize=None, verbose=False, encoding=None, squeeze=False, mangle_dupe_cols=True, tupleize_cols=False, infer_datetime_format=False): # Alias sep -> delimiter. if delimiter is None: delimiter = sep kwds = dict(delimiter=delimiter, engine=engine, dialect=dialect, compression=compression, doublequote=doublequote, escapechar=escapechar, quotechar=quotechar, quoting=quoting, skipinitialspace=skipinitialspace, lineterminator=lineterminator, header=header, index_col=index_col, names=names, prefix=prefix, skiprows=skiprows, na_values=na_values, na_fvalues=na_fvalues, true_values=true_values, false_values=false_values, keep_default_na=keep_default_na, thousands=thousands, comment=comment, decimal=decimal, parse_dates=parse_dates, keep_date_col=keep_date_col, dayfirst=dayfirst, date_parser=date_parser, nrows=nrows, iterator=iterator, chunksize=chunksize, skipfooter=skipfooter or skip_footer, converters=converters, dtype=dtype, usecols=usecols, verbose=verbose, encoding=encoding, squeeze=squeeze, memory_map=memory_map, na_filter=na_filter, compact_ints=compact_ints, use_unsigned=use_unsigned, delim_whitespace=delim_whitespace, as_recarray=as_recarray, warn_bad_lines=warn_bad_lines, error_bad_lines=error_bad_lines, low_memory=low_memory, buffer_lines=buffer_lines, mangle_dupe_cols=mangle_dupe_cols, tupleize_cols=tupleize_cols, infer_datetime_format=infer_datetime_format) return _read(filepath_or_buffer, kwds) parser_f.__name__ = name return parser_f read_csv = _make_parser_function('read_csv', sep=',') read_csv = Appender(_read_csv_doc)(read_csv) read_table = _make_parser_function('read_table', sep='\t') read_table = Appender(_read_table_doc)(read_table) @Appender(_read_fwf_doc) def read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds): # Check input arguments. if colspecs is None and widths is None: raise ValueError("Must specify either colspecs or widths") elif colspecs not in (None, 'infer') and widths is not None: raise ValueError("You must specify only one of 'widths' and " "'colspecs'") # Compute 'colspecs' from 'widths', if specified. if widths is not None: colspecs, col = [], 0 for w in widths: colspecs.append((col, col + w)) col += w kwds['colspecs'] = colspecs kwds['engine'] = 'python-fwf' return _read(filepath_or_buffer, kwds) # common NA values # no longer excluding inf representations # '1.#INF','-1.#INF', '1.#INF000000', _NA_VALUES = set([ '-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN', '-NaN', 'nan', '-nan', '' ]) class TextFileReader(object): """ Passed dialect overrides any of the related parser options """ def __init__(self, f, engine='python', **kwds): self.f = f if kwds.get('dialect') is not None: dialect = kwds['dialect'] kwds['delimiter'] = dialect.delimiter kwds['doublequote'] = dialect.doublequote kwds['escapechar'] = dialect.escapechar kwds['skipinitialspace'] = dialect.skipinitialspace kwds['quotechar'] = dialect.quotechar kwds['quoting'] = dialect.quoting if kwds.get('header', 'infer') == 'infer': kwds['header'] = 0 if kwds.get('names') is None else None self.orig_options = kwds # miscellanea self.engine = engine self._engine = None options = self._get_options_with_defaults(engine) self.chunksize = options.pop('chunksize', None) self.squeeze = options.pop('squeeze', False) # might mutate self.engine self.options, self.engine = self._clean_options(options, engine) if 'has_index_names' in kwds: self.options['has_index_names'] = kwds['has_index_names'] self._make_engine(self.engine) def _get_options_with_defaults(self, engine): kwds = self.orig_options options = {} for argname, default in compat.iteritems(_parser_defaults): options[argname] = kwds.get(argname, default) for argname, default in compat.iteritems(_c_parser_defaults): if argname in kwds: value = kwds[argname] if engine != 'c' and value != default: raise ValueError('The %r option is not supported with the' ' %r engine' % (argname, engine)) else: value = default options[argname] = value if engine == 'python-fwf': for argname, default in compat.iteritems(_fwf_defaults): options[argname] = kwds.get(argname, default) return options def _clean_options(self, options, engine): result = options.copy() sep = options['delimiter'] delim_whitespace = options['delim_whitespace'] if sep is None and not delim_whitespace: if engine == 'c': print('Using Python parser to sniff delimiter') engine = 'python' elif sep is not None and len(sep) > 1: # wait until regex engine integrated if engine not in ('python', 'python-fwf'): engine = 'python' # C engine not supported yet if engine == 'c': if options['skip_footer'] > 0: engine = 'python' if engine == 'c': for arg in _c_unsupported: del result[arg] if 'python' in engine: for arg in _python_unsupported: del result[arg] index_col = options['index_col'] names = options['names'] converters = options['converters'] na_values = options['na_values'] skiprows = options['skiprows'] # really delete this one keep_default_na = result.pop('keep_default_na') if _is_index_col(index_col): if not isinstance(index_col, (list, tuple, np.ndarray)): index_col = [index_col] result['index_col'] = index_col names = list(names) if names is not None else names # type conversion-related if converters is not None: if not isinstance(converters, dict): raise TypeError('Type converters must be a dict or' ' subclass, input was ' 'a {0!r}'.format(type(converters).__name__)) else: converters = {} # Converting values to NA na_values, na_fvalues = _clean_na_values(na_values, keep_default_na) if com.is_integer(skiprows): skiprows = lrange(skiprows) skiprows = set() if skiprows is None else set(skiprows) # put stuff back result['names'] = names result['converters'] = converters result['na_values'] = na_values result['na_fvalues'] = na_fvalues result['skiprows'] = skiprows return result, engine def __iter__(self): try: if self.chunksize: while True: yield self.read(self.chunksize) else: yield self.read() except StopIteration: pass def _make_engine(self, engine='c'): if engine == 'c': self._engine = CParserWrapper(self.f, **self.options) else: if engine == 'python': klass = PythonParser elif engine == 'python-fwf': klass = FixedWidthFieldParser self._engine = klass(self.f, **self.options) def _failover_to_python(self): raise NotImplementedError def read(self, nrows=None): if nrows is not None: if self.options.get('skip_footer'): raise ValueError('skip_footer not supported for iteration') ret = self._engine.read(nrows) if self.options.get('as_recarray'): return ret # May alter columns / col_dict index, columns, col_dict = self._create_index(ret) df = DataFrame(col_dict, columns=columns, index=index) if self.squeeze and len(df.columns) == 1: return df[df.columns[0]] return df def _create_index(self, ret): index, columns, col_dict = ret return index, columns, col_dict def get_chunk(self, size=None): if size is None: size = self.chunksize return self.read(nrows=size) def _is_index_col(col): return col is not None and col is not False class ParserBase(object): def __init__(self, kwds): self.names = kwds.get('names') self.orig_names = None self.prefix = kwds.pop('prefix', None) self.index_col = kwds.get('index_col', None) self.index_names = None self.col_names = None self.parse_dates = kwds.pop('parse_dates', False) self.date_parser = kwds.pop('date_parser', None) self.dayfirst = kwds.pop('dayfirst', False) self.keep_date_col = kwds.pop('keep_date_col', False) self.na_values = kwds.get('na_values') self.na_fvalues = kwds.get('na_fvalues') self.true_values = kwds.get('true_values') self.false_values = kwds.get('false_values') self.tupleize_cols = kwds.get('tupleize_cols', False) self.infer_datetime_format = kwds.pop('infer_datetime_format', False) self._date_conv = _make_date_converter( date_parser=self.date_parser, dayfirst=self.dayfirst, infer_datetime_format=self.infer_datetime_format ) # validate header options for mi self.header = kwds.get('header') if isinstance(self.header, (list, tuple, np.ndarray)): if kwds.get('as_recarray'): raise ValueError("cannot specify as_recarray when " "specifying a multi-index header") if kwds.get('usecols'): raise ValueError("cannot specify usecols when " "specifying a multi-index header") if kwds.get('names'): raise ValueError("cannot specify names when " "specifying a multi-index header") # validate index_col that only contains integers if self.index_col is not None: is_sequence = isinstance(self.index_col, (list, tuple, np.ndarray)) if not (is_sequence and all(map(com.is_integer, self.index_col)) or com.is_integer(self.index_col)): raise ValueError("index_col must only contain row numbers " "when specifying a multi-index header") self._name_processed = False @property def _has_complex_date_col(self): return (isinstance(self.parse_dates, dict) or (isinstance(self.parse_dates, list) and len(self.parse_dates) > 0 and isinstance(self.parse_dates[0], list))) def _should_parse_dates(self, i): if isinstance(self.parse_dates, bool): return self.parse_dates else: name = self.index_names[i] j = self.index_col[i] if np.isscalar(self.parse_dates): return (j == self.parse_dates) or (name == self.parse_dates) else: return (j in self.parse_dates) or (name in self.parse_dates) def _extract_multi_indexer_columns(self, header, index_names, col_names, passed_names=False): """ extract and return the names, index_names, col_names header is a list-of-lists returned from the parsers """ if len(header) < 2: return header[0], index_names, col_names, passed_names # the names are the tuples of the header that are not the index cols # 0 is the name of the index, assuming index_col is a list of column # numbers ic = self.index_col if ic is None: ic = [] if not isinstance(ic, (list, tuple, np.ndarray)): ic = [ic] sic = set(ic) # clean the index_names index_names = header.pop(-1) index_names, names, index_col = _clean_index_names(index_names, self.index_col) # extract the columns field_count = len(header[0]) def extract(r): return tuple([r[i] for i in range(field_count) if i not in sic]) columns = lzip(*[extract(r) for r in header]) names = ic + columns # if we find 'Unnamed' all of a single level, then our header was too # long for n in range(len(columns[0])): if all(['Unnamed' in c[n] for c in columns]): raise _parser.CParserError( "Passed header=[%s] are too many rows for this " "multi_index of columns" % ','.join([str(x) for x in self.header]) ) # clean the column names (if we have an index_col) if len(ic): col_names = [r[0] if len(r[0]) and 'Unnamed' not in r[0] else None for r in header] else: col_names = [None] * len(header) passed_names = True return names, index_names, col_names, passed_names def _maybe_make_multi_index_columns(self, columns, col_names=None): # possibly create a column mi here if (not self.tupleize_cols and len(columns) and not isinstance(columns, MultiIndex) and all([isinstance(c, tuple) for c in columns])): columns = MultiIndex.from_tuples(columns, names=col_names) return columns def _make_index(self, data, alldata, columns, indexnamerow=False): if not _is_index_col(self.index_col) or not self.index_col: index = None elif not self._has_complex_date_col: index = self._get_simple_index(alldata, columns) index = self._agg_index(index) elif self._has_complex_date_col: if not self._name_processed: (self.index_names, _, self.index_col) = _clean_index_names(list(columns), self.index_col) self._name_processed = True index = self._get_complex_date_index(data, columns) index = self._agg_index(index, try_parse_dates=False) # add names for the index if indexnamerow: coffset = len(indexnamerow) - len(columns) index = index.set_names(indexnamerow[:coffset]) # maybe create a mi on the columns columns = self._maybe_make_multi_index_columns(columns, self.col_names) return index, columns _implicit_index = False def _get_simple_index(self, data, columns): def ix(col): if not isinstance(col, compat.string_types): return col raise ValueError('Index %s invalid' % col) index = None to_remove = [] index = [] for idx in self.index_col: i = ix(idx) to_remove.append(i) index.append(data[i]) # remove index items from content and columns, don't pop in # loop for i in reversed(sorted(to_remove)): data.pop(i) if not self._implicit_index: columns.pop(i) return index def _get_complex_date_index(self, data, col_names): def _get_name(icol): if isinstance(icol, compat.string_types): return icol if col_names is None: raise ValueError(('Must supply column order to use %s as ' 'index') % str(icol)) for i, c in enumerate(col_names): if i == icol: return c index = None to_remove = [] index = [] for idx in self.index_col: name = _get_name(idx) to_remove.append(name) index.append(data[name]) # remove index items from content and columns, don't pop in # loop for c in reversed(sorted(to_remove)): data.pop(c) col_names.remove(c) return index def _agg_index(self, index, try_parse_dates=True): arrays = [] for i, arr in enumerate(index): if (try_parse_dates and self._should_parse_dates(i)): arr = self._date_conv(arr) col_na_values = self.na_values col_na_fvalues = self.na_fvalues if isinstance(self.na_values, dict): col_name = self.index_names[i] if col_name is not None: col_na_values, col_na_fvalues = _get_na_values( col_name, self.na_values, self.na_fvalues) arr, _ = self._convert_types(arr, col_na_values | col_na_fvalues) arrays.append(arr) index = MultiIndex.from_arrays(arrays, names=self.index_names) return index def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False, converters=None): result = {} for c, values in compat.iteritems(dct): conv_f = None if converters is None else converters.get(c, None) col_na_values, col_na_fvalues = _get_na_values(c, na_values, na_fvalues) coerce_type = True if conv_f is not None: values = lib.map_infer(values, conv_f) coerce_type = False cvals, na_count = self._convert_types( values, set(col_na_values) | col_na_fvalues, coerce_type) result[c] = cvals if verbose and na_count: print('Filled %d NA values in column %s' % (na_count, str(c))) return result def _convert_types(self, values, na_values, try_num_bool=True): na_count = 0 if issubclass(values.dtype.type, (np.number, np.bool_)): mask = lib.ismember(values, na_values) na_count = mask.sum() if na_count > 0: if com.is_integer_dtype(values): values = values.astype(np.float64) np.putmask(values, mask, np.nan) return values, na_count if try_num_bool: try: result = lib.maybe_convert_numeric(values, na_values, False) except Exception: result = values if values.dtype == np.object_: na_count = lib.sanitize_objects(result, na_values, False) else: result = values if values.dtype == np.object_: na_count = lib.sanitize_objects(values, na_values, False) if result.dtype == np.object_ and try_num_bool: result = lib.maybe_convert_bool(values, true_values=self.true_values, false_values=self.false_values) return result, na_count def _do_date_conversions(self, names, data): # returns data, columns if self.parse_dates is not None: data, names = _process_date_conversion( data, self._date_conv, self.parse_dates, self.index_col, self.index_names, names, keep_date_col=self.keep_date_col) return names, data class CParserWrapper(ParserBase): """ """ def __init__(self, src, **kwds): self.kwds = kwds kwds = kwds.copy() self.as_recarray = kwds.get('as_recarray', False) ParserBase.__init__(self, kwds) if 'utf-16' in (kwds.get('encoding') or ''): if isinstance(src, compat.string_types): src = open(src, 'rb') src = com.UTF8Recoder(src, kwds['encoding']) kwds['encoding'] = 'utf-8' # #2442 kwds['allow_leading_cols'] = self.index_col is not False self._reader = _parser.TextReader(src, **kwds) # XXX self.usecols = self._reader.usecols passed_names = self.names is None if self._reader.header is None: self.names = None else: if len(self._reader.header) > 1: # we have a multi index in the columns self.names, self.index_names, self.col_names, passed_names = ( self._extract_multi_indexer_columns( self._reader.header, self.index_names, self.col_names, passed_names ) ) else: self.names = list(self._reader.header[0]) if self.names is None: if self.prefix: self.names = ['%s%d' % (self.prefix, i) for i in range(self._reader.table_width)] else: self.names = lrange(self._reader.table_width) # If the names were inferred (not passed by user) and usedcols is # defined, then ensure names refers to the used columns, not the # document's columns. if self.usecols and passed_names: col_indices = [] for u in self.usecols: if isinstance(u, string_types): col_indices.append(self.names.index(u)) else: col_indices.append(u) self.names = [n for i, n in enumerate(self.names) if i in col_indices] if len(self.names) < len(self.usecols): raise ValueError("Usecols do not match names.") self._set_noconvert_columns() self.orig_names = self.names if not self._has_complex_date_col: if (self._reader.leading_cols == 0 and _is_index_col(self.index_col)): self._name_processed = True (index_names, self.names, self.index_col) = _clean_index_names(self.names, self.index_col) if self.index_names is None: self.index_names = index_names if self._reader.header is None and not passed_names: self.index_names = [None] * len(self.index_names) self._implicit_index = self._reader.leading_cols > 0 def _set_noconvert_columns(self): names = self.names def _set(x): if com.is_integer(x): self._reader.set_noconvert(x) else: self._reader.set_noconvert(names.index(x)) if isinstance(self.parse_dates, list): for val in self.parse_dates: if isinstance(val, list): for k in val: _set(k) else: _set(val) elif isinstance(self.parse_dates, dict): for val in self.parse_dates.values(): if isinstance(val, list): for k in val: _set(k) else: _set(val) def set_error_bad_lines(self, status): self._reader.set_error_bad_lines(int(status)) def read(self, nrows=None): if self.as_recarray: # what to do if there are leading columns? return self._reader.read(nrows) try: data = self._reader.read(nrows) except StopIteration: if nrows is None: return None, self.names, {} else: raise names = self.names if self._reader.leading_cols: if self._has_complex_date_col: raise NotImplementedError('file structure not yet supported') # implicit index, no index names arrays = [] for i in range(self._reader.leading_cols): if self.index_col is None: values = data.pop(i) else: values = data.pop(self.index_col[i]) values = self._maybe_parse_dates(values, i, try_parse_dates=True) arrays.append(values) index = MultiIndex.from_arrays(arrays) if self.usecols is not None: names = self._filter_usecols(names) # rename dict keys data = sorted(data.items()) data = dict((k, v) for k, (i, v) in zip(names, data)) names, data = self._do_date_conversions(names, data) else: # rename dict keys data = sorted(data.items()) # ugh, mutation names = list(self.orig_names) if self.usecols is not None: names = self._filter_usecols(names) # columns as list alldata = [x[1] for x in data] data = dict((k, v) for k, (i, v) in zip(names, data)) names, data = self._do_date_conversions(names, data) index, names = self._make_index(data, alldata, names) # maybe create a mi on the columns names = self._maybe_make_multi_index_columns(names, self.col_names) return index, names, data def _filter_usecols(self, names): # hackish if self.usecols is not None and len(names) != len(self.usecols): names = [name for i, name in enumerate(names) if i in self.usecols or name in self.usecols] return names def _get_index_names(self): names = list(self._reader.header[0]) idx_names = None if self._reader.leading_cols == 0 and self.index_col is not None: (idx_names, names, self.index_col) = _clean_index_names(names, self.index_col) return names, idx_names def _maybe_parse_dates(self, values, index, try_parse_dates=True): if try_parse_dates and self._should_parse_dates(index): values = self._date_conv(values) return values def TextParser(*args, **kwds): """ Converts lists of lists/tuples into DataFrames with proper type inference and optional (e.g. string to datetime) conversion. Also enables iterating lazily over chunks of large files Parameters ---------- data : file-like object or list delimiter : separator character to use dialect : str or csv.Dialect instance, default None Ignored if delimiter is longer than 1 character names : sequence, default header : int, default 0 Row to use to parse column labels. Defaults to the first row. Prior rows will be discarded index_col : int or list, default None Column or columns to use as the (possibly hierarchical) index has_index_names: boolean, default False True if the cols defined in index_col have an index name and are not in the header na_values : iterable, default None Custom NA values keep_default_na : bool, default True thousands : str, default None Thousands separator comment : str, default None Comment out remainder of line parse_dates : boolean, default False keep_date_col : boolean, default False date_parser : function, default None skiprows : list of integers Row numbers to skip skip_footer : int Number of line at bottom of file to skip encoding : string, default None Encoding to use for UTF when reading/writing (ex. 'utf-8') squeeze : boolean, default False returns Series if only one column infer_datetime_format: boolean, default False If True and `parse_dates` is True for a column, try to infer the datetime format based on the first datetime string. If the format can be inferred, there often will be a large parsing speed-up. """ kwds['engine'] = 'python' return TextFileReader(*args, **kwds) def count_empty_vals(vals): return sum([1 for v in vals if v == '' or v is None]) def _wrap_compressed(f, compression, encoding=None): """wraps compressed fileobject in a decompressing fileobject NOTE: For all files in Python 3.2 and for bzip'd files under all Python versions, this means reading in the entire file and then re-wrapping it in StringIO. """ compression = compression.lower() encoding = encoding or get_option('display.encoding') if compression == 'gzip': import gzip f = gzip.GzipFile(fileobj=f) if compat.PY3_2: # 3.2's gzip doesn't support read1 f = StringIO(f.read().decode(encoding)) elif compat.PY3: from io import TextIOWrapper f = TextIOWrapper(f) return f elif compression == 'bz2': import bz2 # bz2 module can't take file objects, so have to run through decompress # manually data = bz2.decompress(f.read()) if compat.PY3: data = data.decode(encoding) f = StringIO(data) return f else: raise ValueError('do not recognize compression method %s' % compression) class PythonParser(ParserBase): def __init__(self, f, **kwds): """ Workhorse function for processing nested list into DataFrame Should be replaced by np.genfromtxt eventually? """ ParserBase.__init__(self, kwds) self.data = None self.buf = [] self.pos = 0 self.encoding = kwds['encoding'] self.compression = kwds['compression'] self.skiprows = kwds['skiprows'] self.skip_footer = kwds['skip_footer'] self.delimiter = kwds['delimiter'] self.quotechar = kwds['quotechar'] self.escapechar = kwds['escapechar'] self.doublequote = kwds['doublequote'] self.skipinitialspace = kwds['skipinitialspace'] self.lineterminator = kwds['lineterminator'] self.quoting = kwds['quoting'] self.mangle_dupe_cols = kwds.get('mangle_dupe_cols', True) self.usecols = kwds['usecols'] self.names_passed = kwds['names'] or None self.has_index_names = False if 'has_index_names' in kwds: self.has_index_names = kwds['has_index_names'] self.verbose = kwds['verbose'] self.converters = kwds['converters'] self.thousands = kwds['thousands'] self.comment = kwds['comment'] self._comment_lines = [] if isinstance(f, compat.string_types): f = com._get_handle(f, 'r', encoding=self.encoding, compression=self.compression) elif self.compression: f = _wrap_compressed(f, self.compression, self.encoding) # in Python 3, convert BytesIO or fileobjects passed with an encoding elif compat.PY3 and isinstance(f, compat.BytesIO): from io import TextIOWrapper f = TextIOWrapper(f, encoding=self.encoding) # Set self.data to something that can read lines. if hasattr(f, 'readline'): self._make_reader(f) else: self.data = f # Get columns in two steps: infer from data, then # infer column indices from self.usecols if is is specified. self._col_indices = None self.columns, self.num_original_columns = self._infer_columns() # Now self.columns has the set of columns that we will process. # The original set is stored in self.original_columns. if len(self.columns) > 1: # we are processing a multi index column self.columns, self.index_names, self.col_names, _ = ( self._extract_multi_indexer_columns( self.columns, self.index_names, self.col_names ) ) # Update list of original names to include all indices. self.num_original_columns = len(self.columns) else: self.columns = self.columns[0] # get popped off for index self.orig_names = list(self.columns) # needs to be cleaned/refactored # multiple date column thing turning into a real spaghetti factory if not self._has_complex_date_col: (index_names, self.orig_names, columns_) = self._get_index_name(self.columns) self._name_processed = True if self.index_names is None: self.index_names = index_names self._first_chunk = True if self.parse_dates: self._no_thousands_columns = self._set_no_thousands_columns() else: self._no_thousands_columns = None def _set_no_thousands_columns(self): # Create a set of column ids that are not to be stripped of thousands # operators. noconvert_columns = set() def _set(x): if com.is_integer(x): noconvert_columns.add(x) else: noconvert_columns.add(self.columns.index(x)) if isinstance(self.parse_dates, list): for val in self.parse_dates: if isinstance(val, list): for k in val: _set(k) else: _set(val) elif isinstance(self.parse_dates, dict): for val in self.parse_dates.values(): if isinstance(val, list): for k in val: _set(k) else: _set(val) return noconvert_columns def _make_reader(self, f): sep = self.delimiter if sep is None or len(sep) == 1: if self.lineterminator: raise ValueError('Custom line terminators not supported in ' 'python parser (yet)') class MyDialect(csv.Dialect): delimiter = self.delimiter quotechar = self.quotechar escapechar = self.escapechar doublequote = self.doublequote skipinitialspace = self.skipinitialspace quoting = self.quoting lineterminator = '\n' dia = MyDialect sniff_sep = True if sep is not None: sniff_sep = False dia.delimiter = sep # attempt to sniff the delimiter if sniff_sep: line = f.readline() while self.pos in self.skiprows: self.pos += 1 line = f.readline() line = self._check_comments([line])[0] self.pos += 1 sniffed = csv.Sniffer().sniff(line) dia.delimiter = sniffed.delimiter if self.encoding is not None: self.buf.extend(list( com.UnicodeReader(StringIO(line), dialect=dia, encoding=self.encoding))) else: self.buf.extend(list(csv.reader(StringIO(line), dialect=dia))) if self.encoding is not None: reader = com.UnicodeReader(f, dialect=dia, encoding=self.encoding, strict=True) else: reader = csv.reader(f, dialect=dia, strict=True) else: def _read(): line = next(f) pat = re.compile(sep) yield pat.split(line.strip()) for line in f: yield pat.split(line.strip()) reader = _read() self.data = reader def read(self, rows=None): try: content = self._get_lines(rows) except StopIteration: if self._first_chunk: content = [] else: raise # done with first read, next time raise StopIteration self._first_chunk = False columns = list(self.orig_names) if not len(content): # pragma: no cover # DataFrame with the right metadata, even though it's length 0 return _get_empty_meta(self.orig_names, self.index_col, self.index_names) # handle new style for names in index count_empty_content_vals = count_empty_vals(content[0]) indexnamerow = None if self.has_index_names and count_empty_content_vals == len(columns): indexnamerow = content[0] content = content[1:] alldata = self._rows_to_cols(content) data = self._exclude_implicit_index(alldata) columns, data = self._do_date_conversions(self.columns, data) data = self._convert_data(data) index, columns = self._make_index(data, alldata, columns, indexnamerow) return index, columns, data def _exclude_implicit_index(self, alldata): if self._implicit_index: excl_indices = self.index_col data = {} offset = 0 for i, col in enumerate(self.orig_names): while i + offset in excl_indices: offset += 1 data[col] = alldata[i + offset] else: data = dict((k, v) for k, v in zip(self.orig_names, alldata)) return data # legacy def get_chunk(self, size=None): if size is None: size = self.chunksize return self.read(nrows=size) def _convert_data(self, data): # apply converters clean_conv = {} for col, f in compat.iteritems(self.converters): if isinstance(col, int) and col not in self.orig_names: col = self.orig_names[col] clean_conv[col] = f return self._convert_to_ndarrays(data, self.na_values, self.na_fvalues, self.verbose, clean_conv) def _infer_columns(self): names = self.names num_original_columns = 0 clear_buffer = True if self.header is not None: header = self.header # we have a mi columns, so read and extra line if isinstance(header, (list, tuple, np.ndarray)): have_mi_columns = True header = list(header) + [header[-1] + 1] else: have_mi_columns = False header = [header] columns = [] for level, hr in enumerate(header): line = self._buffered_line() while self.pos <= hr: line = self._next_line() unnamed_count = 0 this_columns = [] for i, c in enumerate(line): if c == '': if have_mi_columns: this_columns.append('Unnamed: %d_level_%d' % (i, level)) else: this_columns.append('Unnamed: %d' % i) unnamed_count += 1 else: this_columns.append(c) if not have_mi_columns and self.mangle_dupe_cols: counts = {} for i, col in enumerate(this_columns): cur_count = counts.get(col, 0) if cur_count > 0: this_columns[i] = '%s.%d' % (col, cur_count) counts[col] = cur_count + 1 elif have_mi_columns: # if we have grabbed an extra line, but its not in our # format so save in the buffer, and create an blank extra # line for the rest of the parsing code if hr == header[-1]: lc = len(this_columns) ic = (len(self.index_col) if self.index_col is not None else 0) if lc != unnamed_count and lc-ic > unnamed_count: clear_buffer = False this_columns = [None] * lc self.buf = [self.buf[-1]] columns.append(this_columns) if len(columns) == 1: num_original_columns = len(this_columns) if clear_buffer: self._clear_buffer() if names is not None: if ((self.usecols is not None and len(names) != len(self.usecols)) or (self.usecols is None and len(names) != len(columns[0]))): raise ValueError('Number of passed names did not match ' 'number of header fields in the file') if len(columns) > 1: raise TypeError('Cannot pass names with multi-index ' 'columns') if self.usecols is not None: # Set _use_cols. We don't store columns because they are # overwritten. self._handle_usecols(columns, names) else: self._col_indices = None num_original_columns = len(names) columns = [names] else: columns = self._handle_usecols(columns, columns[0]) else: # header is None line = self._buffered_line() ncols = len(line) num_original_columns = ncols if not names: if self.prefix: columns = [['%s%d' % (self.prefix,i) for i in range(ncols)]] else: columns = [lrange(ncols)] columns = self._handle_usecols(columns, columns[0]) else: if self.usecols is None or len(names) == num_original_columns: columns = self._handle_usecols([names], names) num_original_columns = len(names) else: if self.usecols and len(names) != len(self.usecols): raise ValueError( 'Number of passed names did not match number of ' 'header fields in the file' ) # Ignore output but set used columns. self._handle_usecols([names], names) columns = [names] num_original_columns = ncols return columns, num_original_columns def _handle_usecols(self, columns, usecols_key): """ Sets self._col_indices usecols_key is used if there are string usecols. """ if self.usecols is not None: if any([isinstance(u, string_types) for u in self.usecols]): if len(columns) > 1: raise ValueError("If using multiple headers, usecols must " "be integers.") col_indices = [] for u in self.usecols: if isinstance(u, string_types): col_indices.append(usecols_key.index(u)) else: col_indices.append(u) else: col_indices = self.usecols columns = [[n for i, n in enumerate(column) if i in col_indices] for column in columns] self._col_indices = col_indices return columns def _buffered_line(self): """ Return a line from buffer, filling buffer if required. """ if len(self.buf) > 0: return self.buf[0] else: return self._next_line() def _next_line(self): if isinstance(self.data, list): while self.pos in self.skiprows: self.pos += 1 try: line = self.data[self.pos] except IndexError: raise StopIteration else: while self.pos in self.skiprows: next(self.data) self.pos += 1 line = next(self.data) line = self._check_comments([line])[0] self.pos += 1 self.buf.append(line) return line def _check_comments(self, lines): if self.comment is None: return lines ret = [] for l in lines: rl = [] for x in l: if (not isinstance(x, compat.string_types) or self.comment not in x): rl.append(x) else: x = x[:x.find(self.comment)] if len(x) > 0: rl.append(x) break ret.append(rl) return ret def _check_thousands(self, lines): if self.thousands is None: return lines nonnum = re.compile('[^-^0-9^%s^.]+' % self.thousands) ret = [] for l in lines: rl = [] for i, x in enumerate(l): if (not isinstance(x, compat.string_types) or self.thousands not in x or (self._no_thousands_columns and i in self._no_thousands_columns) or nonnum.search(x.strip())): rl.append(x) else: rl.append(x.replace(self.thousands, '')) ret.append(rl) return ret def _clear_buffer(self): self.buf = [] _implicit_index = False def _get_index_name(self, columns): """ Try several cases to get lines: 0) There are headers on row 0 and row 1 and their total summed lengths equals the length of the next line. Treat row 0 as columns and row 1 as indices 1) Look for implicit index: there are more columns on row 1 than row 0. If this is true, assume that row 1 lists index columns and row 0 lists normal columns. 2) Get index from the columns if it was listed. """ orig_names = list(columns) columns = list(columns) try: line = self._next_line() except StopIteration: line = None try: next_line = self._next_line() except StopIteration: next_line = None # implicitly index_col=0 b/c 1 fewer column names implicit_first_cols = 0 if line is not None: # leave it 0, #2442 # Case 1 if self.index_col is not False: implicit_first_cols = len(line) - self.num_original_columns # Case 0 if next_line is not None: if len(next_line) == len(line) + self.num_original_columns: # column and index names on diff rows self.index_col = lrange(len(line)) self.buf = self.buf[1:] for c in reversed(line): columns.insert(0, c) # Update list of original names to include all indices. self.num_original_columns = len(next_line) return line, columns, orig_names if implicit_first_cols > 0: # Case 1 self._implicit_index = True if self.index_col is None: self.index_col = lrange(implicit_first_cols) index_name = None else: # Case 2 (index_name, columns, self.index_col) = _clean_index_names(columns, self.index_col) return index_name, orig_names, columns def _rows_to_cols(self, content): zipped_content = list(lib.to_object_array(content).T) col_len = self.num_original_columns zip_len = len(zipped_content) if self._implicit_index: col_len += len(self.index_col) if self.skip_footer < 0: raise ValueError('skip footer cannot be negative') # Loop through rows to verify lengths are correct. if col_len != zip_len and self.index_col is not False: i = 0 for (i, l) in enumerate(content): if len(l) != col_len: break footers = 0 if self.skip_footer: footers = self.skip_footer row_num = self.pos - (len(content) - i + footers) msg = ('Expected %d fields in line %d, saw %d' % (col_len, row_num + 1, zip_len)) raise ValueError(msg) if self.usecols: if self._implicit_index: zipped_content = [ a for i, a in enumerate(zipped_content) if (i < len(self.index_col) or i - len(self.index_col) in self._col_indices) ] else: zipped_content = [a for i, a in enumerate(zipped_content) if i in self._col_indices] return zipped_content def _get_lines(self, rows=None): source = self.data lines = self.buf new_rows = None # already fetched some number if rows is not None: # we already have the lines in the buffer if len(self.buf) >= rows: new_rows, self.buf = self.buf[:rows], self.buf[rows:] # need some lines else: rows -= len(self.buf) if new_rows is None: if isinstance(source, list): if self.pos > len(source): raise StopIteration if rows is None: new_rows = source[self.pos:] new_pos = len(source) else: new_rows = source[self.pos:self.pos + rows] new_pos = self.pos + rows # Check for stop rows. n.b.: self.skiprows is a set. if self.skiprows: new_rows = [row for i, row in enumerate(new_rows) if i + self.pos not in self.skiprows] lines.extend(new_rows) self.pos = new_pos else: new_rows = [] try: if rows is not None: for _ in range(rows): new_rows.append(next(source)) lines.extend(new_rows) else: rows = 0 while True: try: new_rows.append(next(source)) rows += 1 except csv.Error as inst: if 'newline inside string' in str(inst): row_num = str(self.pos + rows) msg = ('EOF inside string starting with ' 'line ' + row_num) raise Exception(msg) raise except StopIteration: if self.skiprows: new_rows = [row for i, row in enumerate(new_rows) if self.pos + i not in self.skiprows] lines.extend(new_rows) if len(lines) == 0: raise self.pos += len(new_rows) self.buf = [] else: lines = new_rows if self.skip_footer: lines = lines[:-self.skip_footer] lines = self._check_comments(lines) return self._check_thousands(lines) def _make_date_converter(date_parser=None, dayfirst=False, infer_datetime_format=False): def converter(*date_cols): if date_parser is None: strs = _concat_date_cols(date_cols) try: return tools.to_datetime( com._ensure_object(strs), utc=None, box=False, dayfirst=dayfirst, infer_datetime_format=infer_datetime_format ) except: return lib.try_parse_dates(strs, dayfirst=dayfirst) else: try: result = date_parser(*date_cols) if isinstance(result, datetime.datetime): raise Exception('scalar parser') return result except Exception: try: return lib.try_parse_dates(_concat_date_cols(date_cols), parser=date_parser, dayfirst=dayfirst) except Exception: return generic_parser(date_parser, *date_cols) return converter def _process_date_conversion(data_dict, converter, parse_spec, index_col, index_names, columns, keep_date_col=False): def _isindex(colspec): return ((isinstance(index_col, list) and colspec in index_col) or (isinstance(index_names, list) and colspec in index_names)) new_cols = [] new_data = {} orig_names = columns columns = list(columns) date_cols = set() if parse_spec is None or isinstance(parse_spec, bool): return data_dict, columns if isinstance(parse_spec, list): # list of column lists for colspec in parse_spec: if np.isscalar(colspec): if isinstance(colspec, int) and colspec not in data_dict: colspec = orig_names[colspec] if _isindex(colspec): continue data_dict[colspec] = converter(data_dict[colspec]) else: new_name, col, old_names = _try_convert_dates( converter, colspec, data_dict, orig_names) if new_name in data_dict: raise ValueError('New date column already in dict %s' % new_name) new_data[new_name] = col new_cols.append(new_name) date_cols.update(old_names) elif isinstance(parse_spec, dict): # dict of new name to column list for new_name, colspec in compat.iteritems(parse_spec): if new_name in data_dict: raise ValueError('Date column %s already in dict' % new_name) _, col, old_names = _try_convert_dates(converter, colspec, data_dict, orig_names) new_data[new_name] = col new_cols.append(new_name) date_cols.update(old_names) data_dict.update(new_data) new_cols.extend(columns) if not keep_date_col: for c in list(date_cols): data_dict.pop(c) new_cols.remove(c) return data_dict, new_cols def _try_convert_dates(parser, colspec, data_dict, columns): colset = set(columns) colnames = [] for c in colspec: if c in colset: colnames.append(c) elif isinstance(c, int) and c not in columns: colnames.append(str(columns[c])) else: colnames.append(c) new_name = '_'.join([str(x) for x in colnames]) to_parse = [data_dict[c] for c in colnames if c in data_dict] new_col = parser(*to_parse) return new_name, new_col, colnames def _clean_na_values(na_values, keep_default_na=True): if na_values is None: if keep_default_na: na_values = _NA_VALUES else: na_values = [] na_fvalues = set() elif isinstance(na_values, dict): if keep_default_na: for k, v in compat.iteritems(na_values): v = set(list(v)) | _NA_VALUES na_values[k] = v na_fvalues = dict([ (k, _floatify_na_values(v)) for k, v in na_values.items() ]) else: if not com.is_list_like(na_values): na_values = [na_values] na_values = _stringify_na_values(na_values) if keep_default_na: na_values = na_values | _NA_VALUES na_fvalues = _floatify_na_values(na_values) return na_values, na_fvalues def _clean_index_names(columns, index_col): if not _is_index_col(index_col): return None, columns, index_col columns = list(columns) cp_cols = list(columns) index_names = [] # don't mutate index_col = list(index_col) for i, c in enumerate(index_col): if isinstance(c, compat.string_types): index_names.append(c) for j, name in enumerate(cp_cols): if name == c: index_col[i] = j columns.remove(name) break else: name = cp_cols[c] columns.remove(name) index_names.append(name) # hack if isinstance(index_names[0], compat.string_types)\ and 'Unnamed' in index_names[0]: index_names[0] = None return index_names, columns, index_col def _get_empty_meta(columns, index_col, index_names): columns = list(columns) if index_col is not None: index = MultiIndex.from_arrays([[]] * len(index_col), names=index_names) for n in index_col: columns.pop(n) else: index = Index([]) return index, columns, {} def _floatify_na_values(na_values): # create float versions of the na_values result = set() for v in na_values: try: v = float(v) if not np.isnan(v): result.add(v) except: pass return result def _stringify_na_values(na_values): """ return a stringified and numeric for these values """ result = [] for x in na_values: result.append(str(x)) result.append(x) try: v = float(x) # we are like 999 here if v == int(v): v = int(v) result.append("%s.0" % v) result.append(str(v)) result.append(v) except: pass try: result.append(int(x)) except: pass return set(result) def _get_na_values(col, na_values, na_fvalues): if isinstance(na_values, dict): if col in na_values: values = na_values[col] fvalues = na_fvalues[col] return na_values[col], na_fvalues[col] else: return _NA_VALUES, set() else: return na_values, na_fvalues def _get_col_names(colspec, columns): colset = set(columns) colnames = [] for c in colspec: if c in colset: colnames.append(c) elif isinstance(c, int): colnames.append(columns[c]) return colnames def _concat_date_cols(date_cols): if len(date_cols) == 1: if compat.PY3: return np.array([compat.text_type(x) for x in date_cols[0]], dtype=object) else: return np.array([ str(x) if not isinstance(x, compat.string_types) else x for x in date_cols[0] ], dtype=object) rs = np.array([' '.join([compat.text_type(y) for y in x]) for x in zip(*date_cols)], dtype=object) return rs class FixedWidthReader(object): """ A reader of fixed-width lines. """ def __init__(self, f, colspecs, delimiter, comment): self.f = f self.buffer = None self.delimiter = '\r\n' + delimiter if delimiter else '\n\r\t ' self.comment = comment if colspecs == 'infer': self.colspecs = self.detect_colspecs() else: self.colspecs = colspecs if not isinstance(self.colspecs, (tuple, list)): raise TypeError("column specifications must be a list or tuple, " "input was a %r" % type(colspecs).__name__) for colspec in self.colspecs: if not (isinstance(colspec, (tuple, list)) and len(colspec) == 2 and isinstance(colspec[0], (int, np.integer)) and isinstance(colspec[1], (int, np.integer))): raise TypeError('Each column specification must be ' '2 element tuple or list of integers') def get_rows(self, n): rows = [] for i, row in enumerate(self.f, 1): rows.append(row) if i >= n: break self.buffer = iter(rows) return rows def detect_colspecs(self, n=100): # Regex escape the delimiters delimiters = ''.join([r'\%s' % x for x in self.delimiter]) pattern = re.compile('([^%s]+)' % delimiters) rows = self.get_rows(n) max_len = max(map(len, rows)) mask = np.zeros(max_len + 1, dtype=int) if self.comment is not None: rows = [row.partition(self.comment)[0] for row in rows] for row in rows: for m in pattern.finditer(row): mask[m.start():m.end()] = 1 shifted = np.roll(mask, 1) shifted[0] = 0 edges = np.where((mask ^ shifted) == 1)[0] return list(zip(edges[::2], edges[1::2])) def next(self): if self.buffer is not None: try: line = next(self.buffer) except StopIteration: self.buffer = None line = next(self.f) else: line = next(self.f) # Note: 'colspecs' is a sequence of half-open intervals. return [line[fromm:to].strip(self.delimiter) for (fromm, to) in self.colspecs] # Iterator protocol in Python 3 uses __next__() __next__ = next class FixedWidthFieldParser(PythonParser): """ Specialization that Converts fixed-width fields into DataFrames. See PythonParser for details. """ def __init__(self, f, **kwds): # Support iterators, convert to a list. self.colspecs = kwds.pop('colspecs') PythonParser.__init__(self, f, **kwds) def _make_reader(self, f): self.data = FixedWidthReader(f, self.colspecs, self.delimiter, self.comment)
codeparrot/github-code-clean
# $HeadURL$ __RCSID__ = "$Id$" import datetime, time import types import threading import random from DIRAC.Core.Base.DB import DB from DIRAC import S_OK, S_ERROR, gConfig from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor from DIRAC.Core.Utilities import List, ThreadSafe, Time, DEncode from DIRAC.AccountingSystem.private.TypeLoader import TypeLoader from DIRAC.Core.Utilities.ThreadPool import ThreadPool gSynchro = ThreadSafe.Synchronizer() class AccountingDB( DB ): def __init__( self, name = 'Accounting/AccountingDB', readOnly = False ): DB.__init__( self, 'AccountingDB', name ) self.maxBucketTime = 604800 #1 w self.autoCompact = False self.__readOnly = readOnly self.__doingCompaction = False self.__oldBucketMethod = False self.__doingPendingLockTime = 0 self.__deadLockRetries = 2 self.__queuedRecordsLock = ThreadSafe.Synchronizer() self.__queuedRecordsToInsert = [] self.dbCatalog = {} self.dbBucketsLength = {} self.__keysCache = {} maxParallelInsertions = self.getCSOption( "ParallelRecordInsertions", 10 ) self.__threadPool = ThreadPool( 1, maxParallelInsertions ) self.__threadPool.daemonize() self.catalogTableName = _getTableName( "catalog", "Types" ) self._createTables( { self.catalogTableName : { 'Fields' : { 'name' : "VARCHAR(64) UNIQUE NOT NULL", 'keyFields' : "VARCHAR(255) NOT NULL", 'valueFields' : "VARCHAR(255) NOT NULL", 'bucketsLength' : "VARCHAR(255) NOT NULL", }, 'PrimaryKey' : 'name' } } ) self.__loadCatalogFromDB() gMonitor.registerActivity( "registeradded", "Register added", "Accounting", "entries", gMonitor.OP_ACUM ) gMonitor.registerActivity( "insertiontime", "Record insertion time", "Accounting", "seconds", gMonitor.OP_MEAN ) gMonitor.registerActivity( "querytime", "Records query time", "Accounting", "seconds", gMonitor.OP_MEAN ) self.__compactTime = datetime.time( hour = 2, minute = random.randint( 0, 59 ), second = random.randint( 0, 59 ) ) lcd = Time.dateTime() lcd.replace( hour = self.__compactTime.hour + 1, minute = 0, second = 0 ) self.__lastCompactionEpoch = Time.toEpoch( lcd ) self.__registerTypes() def __loadTablesCreated( self ): result = self._query( "show tables" ) if not result[ 'OK' ]: return result return S_OK( [ f[0] for f in result[ 'Value' ] ] ) def autoCompactDB( self ): self.autoCompact = True th = threading.Thread( target = self.__periodicAutoCompactDB ) th.setDaemon( 1 ) th.start() def __periodicAutoCompactDB( self ): while self.autoCompact: nct = Time.dateTime() if nct.hour >= self.__compactTime.hour: nct = nct + datetime.timedelta( days = 1 ) nct = nct.replace( hour = self.__compactTime.hour, minute = self.__compactTime.minute, second = self.__compactTime.second ) self.log.info( "Next db compaction will be at %s" % nct ) sleepTime = Time.toEpoch( nct ) - Time.toEpoch() time.sleep( sleepTime ) self.compactBuckets() def __registerTypes( self ): """ Register all types """ retVal = gConfig.getSections( "/DIRAC/Setups" ) if not retVal[ 'OK' ]: return S_ERROR( "Can't get a list of setups: %s" % retVal[ 'Message' ] ) setupsList = retVal[ 'Value' ] objectsLoaded = TypeLoader().getTypes() #Load the files for pythonClassName in sorted( objectsLoaded ): typeClass = objectsLoaded[ pythonClassName ] for setup in setupsList: typeName = "%s_%s" % ( setup, pythonClassName ) typeDef = typeClass().getDefinition() #dbTypeName = "%s_%s" % ( setup, typeName ) definitionKeyFields, definitionAccountingFields, bucketsLength = typeDef[1:] #If already defined check the similarities if typeName in self.dbCatalog: bucketsLength.sort() if bucketsLength != self.dbBucketsLength[ typeName ]: bucketsLength = self.dbBucketsLength[ typeName ] self.log.warn( "Bucket length has changed for type %s" % typeName ) keyFields = [ f[0] for f in definitionKeyFields ] if keyFields != self.dbCatalog[ typeName ][ 'keys' ]: keyFields = self.dbCatalog[ typeName ][ 'keys' ] self.log.error( "Definition fields have changed", "Type %s" % typeName ) valueFields = [ f[0] for f in definitionAccountingFields ] if valueFields != self.dbCatalog[ typeName ][ 'values' ]: valueFields = self.dbCatalog[ typeName ][ 'values' ] self.log.error( "Accountable fields have changed", "Type %s" % typeName ) #Try to re register to check all the tables are there retVal = self.registerType( typeName, definitionKeyFields, definitionAccountingFields, bucketsLength ) if not retVal[ 'OK' ]: self.log.error( "Can't register type", "%s: %s" % ( typeName, retVal[ 'Message' ] ) ) #If it has been properly registered, update info elif retVal[ 'Value' ]: #Set the timespan self.dbCatalog[ typeName ][ 'dataTimespan' ] = typeClass().getDataTimespan() self.dbCatalog[ typeName ][ 'definition' ] = { 'keys' : definitionKeyFields, 'values' : definitionAccountingFields } return S_OK() def __loadCatalogFromDB( self ): retVal = self._query( "SELECT `name`, `keyFields`, `valueFields`, `bucketsLength` FROM `%s`" % self.catalogTableName ) if not retVal[ 'OK' ]: raise Exception( retVal[ 'Message' ] ) for typesEntry in retVal[ 'Value' ]: typeName = typesEntry[0] keyFields = List.fromChar( typesEntry[1], "," ) valueFields = List.fromChar( typesEntry[2], "," ) bucketsLength = DEncode.decode( typesEntry[3] )[0] self.__addToCatalog( typeName, keyFields, valueFields, bucketsLength ) def getWaitingRecordsLifeTime( self ): """ Get the time records can live in the IN tables without no retry """ return self.getCSOption( "RecordMaxWaitingTime", 86400 ) def markAllPendingRecordsAsNotTaken( self ): """ Mark all records to be processed as not taken NOTE: ONLY EXECUTE THIS AT THE BEGINNING OF THE DATASTORE SERVICE! """ self.log.always( "Marking all records to be processed as not taken" ) for typeName in self.dbCatalog: sqlTableName = _getTableName( "in", typeName ) result = self._update( "UPDATE `%s` SET taken=0" % sqlTableName ) if not result[ 'OK' ]: return result return S_OK() def loadPendingRecords( self ): """ Load all records pending to insertion and generate threaded jobs """ gSynchro.lock() try: now = time.time() if now - self.__doingPendingLockTime <= 3600: return S_OK() self.__doingPendingLockTime = now finally: gSynchro.unlock() self.log.info( "[PENDING] Loading pending records for insertion" ) pending = 0 now = Time.toEpoch() recordsPerSlot = self.getCSOption( "RecordsPerSlot", 100 ) for typeName in self.dbCatalog: self.log.info( "[PENDING] Checking %s" % typeName ) pendingInQueue = self.__threadPool.pendingJobs() emptySlots = max( 0, 3000 - pendingInQueue ) self.log.info( "[PENDING] %s in the queue, %d empty slots" % ( pendingInQueue, emptySlots ) ) if emptySlots < 1: continue emptySlots = min( 100, emptySlots ) sqlTableName = _getTableName( "in", typeName ) sqlFields = [ 'id' ] + self.dbCatalog[ typeName ][ 'typeFields' ] sqlCond = "WHERE taken = 0 or TIMESTAMPDIFF( SECOND, takenSince, UTC_TIMESTAMP() ) > %s" % self.getWaitingRecordsLifeTime() result = self._query( "SELECT %s FROM `%s` %s ORDER BY id ASC LIMIT %d" % ( ", ".join( [ "`%s`" % f for f in sqlFields ] ), sqlTableName, sqlCond, emptySlots * recordsPerSlot ) ) if not result[ 'OK' ]: self.log.error( "[PENDING] Error when trying to get pending records", "for %s : %s" % ( typeName, result[ 'Message' ] ) ) return result self.log.info( "[PENDING] Got %s pending records for type %s" % ( len( result[ 'Value' ] ), typeName ) ) dbData = result[ 'Value' ] idList = [ str( r[0] ) for r in dbData ] #If nothing to do, continue if not idList: continue result = self._update( "UPDATE `%s` SET taken=1, takenSince=UTC_TIMESTAMP() WHERE id in (%s)" % ( sqlTableName, ", ".join( idList ) ) ) if not result[ 'OK' ]: self.log.error( "[PENDING] Error when trying set state to waiting records", "for %s : %s" % ( typeName, result[ 'Message' ] ) ) self.__doingPendingLockTime = 0 return result #Group them in groups of 10 recordsToProcess = [] for record in dbData: pending += 1 iD = record[ 0 ] startTime = record[ -2 ] endTime = record[ -1 ] valuesList = list( record[ 1:-2 ] ) recordsToProcess.append( ( iD, typeName, startTime, endTime, valuesList, now ) ) if len( recordsToProcess ) % recordsPerSlot == 0: self.__threadPool.generateJobAndQueueIt( self.__insertFromINTable , args = ( recordsToProcess, ) ) recordsToProcess = [] if recordsToProcess: self.__threadPool.generateJobAndQueueIt( self.__insertFromINTable , args = ( recordsToProcess, ) ) self.log.info( "[PENDING] Got %s records requests for all types" % pending ) self.__doingPendingLockTime = 0 return S_OK() def __addToCatalog( self, typeName, keyFields, valueFields, bucketsLength ): """ Add type to catalog """ self.log.verbose( "Adding to catalog type %s" % typeName, "with length %s" % str( bucketsLength ) ) self.dbCatalog[ typeName ] = { 'keys' : keyFields , 'values' : valueFields, 'typeFields' : [], 'bucketFields' : [], 'dataTimespan' : 0 } self.dbCatalog[ typeName ][ 'typeFields' ].extend( keyFields ) self.dbCatalog[ typeName ][ 'typeFields' ].extend( valueFields ) self.dbCatalog[ typeName ][ 'bucketFields' ] = list( self.dbCatalog[ typeName ][ 'typeFields' ] ) self.dbCatalog[ typeName ][ 'typeFields' ].extend( [ 'startTime', 'endTime' ] ) self.dbCatalog[ typeName ][ 'bucketFields' ].extend( [ 'entriesInBucket', 'startTime', 'bucketLength' ] ) self.dbBucketsLength[ typeName ] = bucketsLength #ADRI: TEST COMPACT BUCKETS #self.dbBucketsLength[ typeName ] = [ ( 31104000, 3600 ) ] def changeBucketsLength( self, typeName, bucketsLength ): gSynchro.lock() try: if not typeName in self.dbCatalog: return S_ERROR( "%s is not a valid type name" % typeName ) bucketsLength.sort() bucketsEncoding = DEncode.encode( bucketsLength ) retVal = self._update( "UPDATE `%s` set bucketsLength = '%s' where name = '%s'" % ( self.catalogTableName, bucketsEncoding, typeName ) ) if not retVal[ 'OK' ]: return retVal self.dbBucketsLength[ typeName ] = bucketsLength finally: gSynchro.unlock() return self.regenerateBuckets( typeName ) @gSynchro def registerType( self, name, definitionKeyFields, definitionAccountingFields, bucketsLength ): """ Register a new type """ gMonitor.registerActivity( "registerwaiting:%s" % name, "Records waiting for insertion for %s" % " ".join( name.split( "_" ) ), "Accounting", "records", gMonitor.OP_MEAN ) gMonitor.registerActivity( "registeradded:%s" % name, "Register added for %s" % " ".join( name.split( "_" ) ), "Accounting", "entries", gMonitor.OP_ACUM ) result = self.__loadTablesCreated() if not result[ 'OK' ]: return result tablesInThere = result[ 'Value' ] keyFieldsList = [] valueFieldsList = [] for key in definitionKeyFields: keyFieldsList.append( key[0] ) for value in definitionAccountingFields: valueFieldsList.append( value[0] ) for field in definitionKeyFields: if field in valueFieldsList: return S_ERROR( "Key field %s is also in the list of value fields" % field ) for field in definitionAccountingFields: if field in keyFieldsList: return S_ERROR( "Value field %s is also in the list of key fields" % field ) for bucket in bucketsLength: if type( bucket ) != types.TupleType: return S_ERROR( "Length of buckets should be a list of tuples" ) if len( bucket ) != 2: return S_ERROR( "Length of buckets should have 2d tuples" ) updateDBCatalog = True if name in self.dbCatalog: updateDBCatalog = False tables = {} for key in definitionKeyFields: keyTableName = _getTableName( "key", name, key[0] ) if keyTableName not in tablesInThere: self.log.info( "Table for key %s has to be created" % key[0] ) tables[ keyTableName ] = { 'Fields' : { 'id' : 'INTEGER NOT NULL AUTO_INCREMENT', 'value' : '%s NOT NULL' % key[1] }, 'UniqueIndexes' : { 'valueindex' : [ 'value' ] }, 'PrimaryKey' : 'id' } #Registering type fieldsDict = {} bucketFieldsDict = {} inbufferDict = { 'id' : 'INTEGER NOT NULL AUTO_INCREMENT' } bucketIndexes = { 'startTimeIndex' : [ 'startTime' ], 'bucketLengthIndex' : [ 'bucketLength' ] } uniqueIndexFields = ['startTime'] for field in definitionKeyFields: bucketIndexes[ "%sIndex" % field[0] ] = [ field[0] ] uniqueIndexFields.append( field[ 0 ] ) fieldsDict[ field[0] ] = "INTEGER NOT NULL" bucketFieldsDict[ field[0] ] = "INTEGER NOT NULL" inbufferDict[ field[0] ] = field[1] + " NOT NULL" for field in definitionAccountingFields: fieldsDict[ field[0] ] = field[1] + " NOT NULL" bucketFieldsDict[ field[0] ] = "DECIMAL(30,10) NOT NULL" inbufferDict[ field[0] ] = field[1] + " NOT NULL" fieldsDict[ 'startTime' ] = "INT UNSIGNED NOT NULL" fieldsDict[ 'endTime' ] = "INT UNSIGNED NOT NULL" bucketFieldsDict[ 'entriesInBucket' ] = "DECIMAL(30,10) NOT NULL" bucketFieldsDict[ 'startTime' ] = "INT UNSIGNED NOT NULL" inbufferDict[ 'startTime' ] = "INT UNSIGNED NOT NULL" inbufferDict[ 'endTime' ] = "INT UNSIGNED NOT NULL" inbufferDict[ 'taken' ] = "TINYINT(1) DEFAULT 1 NOT NULL" inbufferDict[ 'takenSince' ] = "DATETIME NOT NULL" bucketFieldsDict[ 'bucketLength' ] = "MEDIUMINT UNSIGNED NOT NULL" uniqueIndexFields.append( 'bucketLength' ) bucketTableName = _getTableName( "bucket", name ) if bucketTableName not in tablesInThere: tables[ bucketTableName ] = { 'Fields' : bucketFieldsDict, 'UniqueIndexes' : { 'UniqueConstraint' : uniqueIndexFields } } typeTableName = _getTableName( "type", name ) if typeTableName not in tablesInThere: tables[ typeTableName ] = { 'Fields' : fieldsDict } inTableName = _getTableName( "in", name ) if inTableName not in tablesInThere: tables[ inTableName ] = { 'Fields' : inbufferDict, 'PrimaryKey' : 'id' } if self.__readOnly: if tables: self.log.notice( "ReadOnly mode: Skipping create of tables for %s. Removing from memory catalog" % name ) self.log.verbose( "Skipping creation of tables %s" % ", ".join( [ tn for tn in tables ] ) ) try: self.dbCatalog.pop( name ) except KeyError: pass else: self.log.notice( "ReadOnly mode: %s is OK" % name ) return S_OK( not updateDBCatalog ) if tables: retVal = self._createTables( tables ) if not retVal[ 'OK' ]: self.log.error( "Can't create type", "%s: %s" % ( name, retVal[ 'Message' ] ) ) return S_ERROR( "Can't create type %s: %s" % ( name, retVal[ 'Message' ] ) ) if updateDBCatalog: bucketsLength.sort() bucketsEncoding = DEncode.encode( bucketsLength ) self.insertFields( self.catalogTableName, [ 'name', 'keyFields', 'valueFields', 'bucketsLength' ], [ name, ",".join( keyFieldsList ), ",".join( valueFieldsList ), bucketsEncoding ] ) self.__addToCatalog( name, keyFieldsList, valueFieldsList, bucketsLength ) self.log.info( "Registered type %s" % name ) return S_OK( True ) def getRegisteredTypes( self ): """ Get list of registered types """ retVal = self._query( "SELECT `name`, `keyFields`, `valueFields`, `bucketsLength` FROM `%s`" % self.catalogTableName ) if not retVal[ 'OK' ]: return retVal typesList = [] for typeInfo in retVal[ 'Value' ]: typesList.append( [ typeInfo[0], List.fromChar( typeInfo[1] ), List.fromChar( typeInfo[2] ), DEncode.decode( typeInfo[3] ) ] ) return S_OK( typesList ) def getKeyValues( self, typeName, condDict, connObj = False ): """ Get all values for a given key field in a type """ keyValuesDict = {} keyTables = [] sqlCond = [] mainTable = "`%s`" % _getTableName( "bucket", typeName ) typeKeysList = self.dbCatalog[ typeName ][ 'keys' ] for keyName in condDict: if keyName in typeKeysList: keyTable = "`%s`" % _getTableName( "key", typeName, keyName ) if not keyTable in keyTables: keyTables.append( keyTable ) sqlCond.append( "%s.id = %s.`%s`" % ( keyTable, mainTable, keyName ) ) for value in condDict[ keyName ]: sqlCond.append( "%s.value = %s" % ( keyTable, self._escapeString( value )[ 'Value' ] ) ) for keyName in typeKeysList: keyTable = "`%s`" % _getTableName( "key", typeName, keyName ) allKeyTables = keyTables if not keyTable in allKeyTables: allKeyTables = list( keyTables ) allKeyTables.append( keyTable ) cmd = "SELECT DISTINCT %s.value FROM %s" % ( keyTable, ", ".join( allKeyTables ) ) if sqlCond: sqlValueLink = "%s.id = %s.`%s`" % ( keyTable, mainTable, keyName ) cmd += ", %s WHERE %s AND %s" % ( mainTable, sqlValueLink, " AND ".join( sqlCond ) ) retVal = self._query( cmd, conn = connObj ) if not retVal[ 'OK' ]: return retVal keyValuesDict[ keyName ] = [ r[0] for r in retVal[ 'Value' ] ] return S_OK( keyValuesDict ) @gSynchro def deleteType( self, typeName ): """ Deletes a type """ if self.__readOnly: return S_ERROR( "ReadOnly mode enabled. No modification allowed" ) if typeName not in self.dbCatalog: return S_ERROR( "Type %s does not exist" % typeName ) self.log.info( "Deleting type", typeName ) tablesToDelete = [] for keyField in self.dbCatalog[ typeName ][ 'keys' ]: tablesToDelete.append( "`%s`" % _getTableName( "key", typeName, keyField ) ) tablesToDelete.insert( 0, "`%s`" % _getTableName( "type", typeName ) ) tablesToDelete.insert( 0, "`%s`" % _getTableName( "bucket", typeName ) ) tablesToDelete.insert( 0, "`%s`" % _getTableName( "in", typeName ) ) retVal = self._query( "DROP TABLE %s" % ", ".join( tablesToDelete ) ) if not retVal[ 'OK' ]: return retVal retVal = self._update( "DELETE FROM `%s` WHERE name='%s'" % ( _getTableName( "catalog", "Types" ), typeName ) ) del( self.dbCatalog[ typeName ] ) return S_OK() def __getIdForKeyValue( self, typeName, keyName, keyValue, conn = False ): """ Finds id number for value in a key table """ retVal = self._escapeString( keyValue ) if not retVal[ 'OK' ]: return retVal keyValue = retVal[ 'Value' ] retVal = self._query( "SELECT `id` FROM `%s` WHERE `value`=%s" % ( _getTableName( "key", typeName, keyName ), keyValue ), conn = conn ) if not retVal[ 'OK' ]: return retVal if len( retVal[ 'Value' ] ) > 0: return S_OK( retVal[ 'Value' ][0][0] ) return S_ERROR( "Key id %s for value %s does not exist although it shoud" % ( keyName, keyValue ) ) def __addKeyValue( self, typeName, keyName, keyValue ): """ Adds a key value to a key table if not existant """ #Cast to string just in case if type( keyValue ) != types.StringType: keyValue = str( keyValue ) #No more than 64 chars for keys if len( keyValue ) > 64: keyValue = keyValue[:64] #Look into the cache if typeName not in self.__keysCache: self.__keysCache[ typeName ] = {} typeCache = self.__keysCache[ typeName ] if keyName not in typeCache: typeCache[ keyName ] = {} keyCache = typeCache[ keyName ] if keyValue in keyCache: return S_OK( keyCache[ keyValue ] ) #Retrieve key keyTable = _getTableName( "key", typeName, keyName ) retVal = self.__getIdForKeyValue( typeName, keyName, keyValue ) if retVal[ 'OK' ]: keyCache[ keyValue ] = retVal[ 'Value' ] return retVal #Key is not in there retVal = self._getConnection() if not retVal[ 'OK' ]: return retVal connection = retVal[ 'Value' ] self.log.info( "Value %s for key %s didn't exist, inserting" % ( keyValue, keyName ) ) retVal = self.insertFields( keyTable, [ 'id', 'value' ], [ 0, keyValue ], connection ) if not retVal[ 'OK' ] and retVal[ 'Message' ].find( "Duplicate key" ) == -1: return retVal result = self.__getIdForKeyValue( typeName, keyName, keyValue, connection ) if not result[ 'OK' ]: return result keyCache[ keyValue ] = result[ 'Value' ] return result def calculateBucketLengthForTime( self, typeName, now, when ): """ Get the expected bucket time for a moment in time """ for granuT in self.dbBucketsLength[ typeName ]: nowBucketed = now - now % granuT[1] dif = max( 0, nowBucketed - when ) if dif <= granuT[0]: return granuT[1] return self.maxBucketTime def calculateBuckets( self, typeName, startTime, endTime, nowEpoch = False ): """ Magic function for calculating buckets between two times and the proportional part for each bucket """ if not nowEpoch: nowEpoch = int( Time.toEpoch( Time.dateTime() ) ) bucketTimeLength = self.calculateBucketLengthForTime( typeName, nowEpoch, startTime ) currentBucketStart = startTime - startTime % bucketTimeLength if startTime == endTime: return [ ( currentBucketStart, 1, bucketTimeLength ) ] buckets = [] totalLength = endTime - startTime while currentBucketStart < endTime: start = max( currentBucketStart, startTime ) end = min( currentBucketStart + bucketTimeLength, endTime ) proportion = float( end - start ) / totalLength buckets.append( ( currentBucketStart, proportion, bucketTimeLength ) ) currentBucketStart += bucketTimeLength bucketTimeLength = self.calculateBucketLengthForTime( typeName, nowEpoch, currentBucketStart ) return buckets def __insertInQueueTable( self, typeName, startTime, endTime, valuesList ): sqlFields = [ 'id', 'taken', 'takenSince' ] + self.dbCatalog[ typeName ][ 'typeFields' ] sqlValues = [ '0', '0', 'UTC_TIMESTAMP()' ] + valuesList + [ startTime, endTime ] if len( sqlFields ) != len( sqlValues ): numRcv = len( valuesList ) + 2 numExp = len( self.dbCatalog[ typeName ][ 'typeFields' ] ) return S_ERROR( "Fields mismatch for record %s. %s fields and %s expected" % ( typeName, numRcv, numExp ) ) retVal = self.insertFields( _getTableName( "in", typeName ), sqlFields, sqlValues ) if not retVal[ 'OK' ]: return retVal return S_OK( retVal[ 'lastRowId' ] ) def insertRecordBundleThroughQueue( self, recordsToQueue ) : if self.__readOnly: return S_ERROR( "ReadOnly mode enabled. No modification allowed" ) recordsToProcess = [] now = Time.toEpoch() for record in recordsToQueue: typeName, startTime, endTime, valuesList = record result = self.__insertInQueueTable( typeName, startTime, endTime, valuesList ) if not result[ 'OK' ]: return result iD = result[ 'Value' ] recordsToProcess.append( ( iD, typeName, startTime, endTime, valuesList, now ) ) return S_OK() def insertRecordThroughQueue( self, typeName, startTime, endTime, valuesList ): """ Insert a record in the intable to be really insterted afterwards """ if self.__readOnly: return S_ERROR( "ReadOnly mode enabled. No modification allowed" ) self.log.info( "Adding record to queue", "for type %s\n [%s -> %s]" % ( typeName, Time.fromEpoch( startTime ), Time.fromEpoch( endTime ) ) ) if not typeName in self.dbCatalog: return S_ERROR( "Type %s has not been defined in the db" % typeName ) result = self.__insertInQueueTable( typeName, startTime, endTime, valuesList ) if not result[ '0K' ]: return result return S_OK() def __insertFromINTable( self, recordTuples ): """ Do the real insert and delete from the in buffer table """ self.log.verbose( "Received bundle to process", "of %s elements" % len( recordTuples ) ) for record in recordTuples: iD, typeName, startTime, endTime, valuesList, insertionEpoch = record result = self.insertRecordDirectly( typeName, startTime, endTime, valuesList ) if not result[ 'OK' ]: self._update( "UPDATE `%s` SET taken=0 WHERE id=%s" % ( _getTableName( "in", typeName ), iD ) ) self.log.error( "Can't insert row", result[ 'Message' ] ) continue result = self._update( "DELETE FROM `%s` WHERE id=%s" % ( _getTableName( "in", typeName ), iD ) ) if not result[ 'OK' ]: self.log.error( "Can't delete row from the IN table", result[ 'Message' ] ) gMonitor.addMark( "insertiontime", Time.toEpoch() - insertionEpoch ) def insertRecordDirectly( self, typeName, startTime, endTime, valuesList ): """ Add an entry to the type contents """ if self.__readOnly: return S_ERROR( "ReadOnly mode enabled. No modification allowed" ) gMonitor.addMark( "registeradded", 1 ) gMonitor.addMark( "registeradded:%s" % typeName, 1 ) self.log.info( "Adding record", "for type %s\n [%s -> %s]" % ( typeName, Time.fromEpoch( startTime ), Time.fromEpoch( endTime ) ) ) if not typeName in self.dbCatalog: return S_ERROR( "Type %s has not been defined in the db" % typeName ) #Discover key indexes for keyPos in range( len( self.dbCatalog[ typeName ][ 'keys' ] ) ): keyName = self.dbCatalog[ typeName ][ 'keys' ][ keyPos ] keyValue = valuesList[ keyPos ] retVal = self.__addKeyValue( typeName, keyName, keyValue ) if not retVal[ 'OK' ]: return retVal self.log.verbose( "Value %s for key %s has id %s" % ( keyValue, keyName, retVal[ 'Value' ] ) ) valuesList[ keyPos ] = retVal[ 'Value' ] insertList = list( valuesList ) insertList.append( startTime ) insertList.append( endTime ) retVal = self._getConnection() if not retVal[ 'OK' ]: return retVal connObj = retVal[ 'Value' ] try: retVal = self.insertFields( _getTableName( "type", typeName ), self.dbCatalog[ typeName ][ 'typeFields' ], insertList, conn = connObj ) if not retVal[ 'OK' ]: return retVal #HACK: One more record to split in the buckets to be able to count total entries valuesList.append( 1 ) retVal = self.__startTransaction( connObj ) if not retVal[ 'OK' ]: return retVal retVal = self.__splitInBuckets( typeName, startTime, endTime, valuesList, connObj = connObj ) if not retVal[ 'OK' ]: self.__rollbackTransaction( connObj ) return retVal return self.__commitTransaction( connObj ) finally: connObj.close() def deleteRecord( self, typeName, startTime, endTime, valuesList ): """ Add an entry to the type contents """ if self.__readOnly: return S_ERROR( "ReadOnly mode enabled. No modification allowed" ) self.log.info( "Deleting record record", "for type %s\n [%s -> %s]" % ( typeName, Time.fromEpoch( startTime ), Time.fromEpoch( endTime ) ) ) if not typeName in self.dbCatalog: return S_ERROR( "Type %s has not been defined in the db" % typeName ) sqlValues = [] sqlValues.extend( valuesList ) #Discover key indexes for keyPos in range( len( self.dbCatalog[ typeName ][ 'keys' ] ) ): keyName = self.dbCatalog[ typeName ][ 'keys' ][ keyPos ] keyValue = sqlValues[ keyPos ] retVal = self.__addKeyValue( typeName, keyName, keyValue ) if not retVal[ 'OK' ]: return retVal self.log.verbose( "Value %s for key %s has id %s" % ( keyValue, keyName, retVal[ 'Value' ] ) ) sqlValues[ keyPos ] = retVal[ 'Value' ] sqlCond = [] mainTable = _getTableName( "type", typeName ) sqlValues.extend( [ startTime, endTime ] ) numKeyFields = len( self.dbCatalog[ typeName ][ 'keys' ] ) numValueFields = len( self.dbCatalog[ typeName ][ 'values' ] ) for i in range( len( sqlValues ) ): needToRound = False if i >= numKeyFields and i - numKeyFields < numValueFields: vIndex = i - numKeyFields if self.dbCatalog[ typeName ][ 'definition' ][ 'values' ][vIndex][1].find( "FLOAT" ) > -1: needToRound = True if needToRound: compVal = [ "`%s`.`%s`" % ( mainTable, self.dbCatalog[ typeName ][ 'typeFields' ][i] ), "%f" % sqlValues[i] ] compVal = [ "CEIL( %s * 1000 )" % v for v in compVal ] compVal = "ABS( %s ) <= 1 " % " - ".join( compVal ) else: sqlCond.append( "`%s`.`%s`=%s" % ( mainTable, self.dbCatalog[ typeName ][ 'typeFields' ][i], sqlValues[i] ) ) retVal = self._getConnection() if not retVal[ 'OK' ]: return retVal connObj = retVal[ 'Value' ] retVal = self.__startTransaction( connObj ) if not retVal[ 'OK' ]: return retVal retVal = self._update( "DELETE FROM `%s` WHERE %s" % ( mainTable, " AND ".join( sqlCond ) ), conn = connObj ) if not retVal[ 'OK' ]: return retVal numInsertions = retVal[ 'Value' ] #Deleted from type, now the buckets #HACK: One more record to split in the buckets to be able to count total entries if numInsertions == 0: return S_OK( 0 ) sqlValues.append( 1 ) retVal = self.__deleteFromBuckets( typeName, startTime, endTime, sqlValues, numInsertions, connObj = connObj ) if not retVal[ 'OK' ]: self.__rollbackTransaction( connObj ) return retVal retVal = self.__commitTransaction( connObj ) if not retVal[ 'OK' ]: self.__rollbackTransaction( connObj ) return retVal return S_OK( numInsertions ) def __splitInBuckets( self, typeName, startTime, endTime, valuesList, connObj = False ): """ Bucketize a record """ #Calculate amount of buckets buckets = self.calculateBuckets( typeName, startTime, endTime ) #Separate key values from normal values numKeys = len( self.dbCatalog[ typeName ][ 'keys' ] ) keyValues = valuesList[ :numKeys ] valuesList = valuesList[ numKeys: ] self.log.verbose( "Splitting entry", " in %s buckets" % len( buckets ) ) return self.__writeBuckets( typeName, buckets, keyValues, valuesList, connObj = connObj ) def __deleteFromBuckets( self, typeName, startTime, endTime, valuesList, numInsertions, connObj = False ): """ DeBucketize a record """ #Calculate amount of buckets buckets = self.calculateBuckets( typeName, startTime, endTime, self.__lastCompactionEpoch ) #Separate key values from normal values numKeys = len( self.dbCatalog[ typeName ][ 'keys' ] ) keyValues = valuesList[ :numKeys ] valuesList = valuesList[ numKeys: ] self.log.verbose( "Deleting bucketed entry", "from %s buckets" % len( buckets ) ) for bucketInfo in buckets: bucketStartTime = bucketInfo[0] bucketProportion = bucketInfo[1] bucketLength = bucketInfo[2] for _i in range( max( 1, self.__deadLockRetries ) ): retVal = self.__extractFromBucket( typeName, bucketStartTime, bucketLength, keyValues, valuesList, bucketProportion * numInsertions, connObj = connObj ) if not retVal[ 'OK' ]: #If failed because of dead lock try restarting if retVal[ 'Message' ].find( "try restarting transaction" ): continue return retVal #If OK, break loop if retVal[ 'OK' ]: break return S_OK() def getBucketsDef( self, typeName ): return self.dbBucketsLength[ typeName ] def __generateSQLConditionForKeys( self, typeName, keyValues ): """ Generate sql condition for buckets, values are indexes to real values """ realCondList = [] for keyPos in range( len( self.dbCatalog[ typeName ][ 'keys' ] ) ): keyField = self.dbCatalog[ typeName ][ 'keys' ][ keyPos ] keyValue = keyValues[ keyPos ] retVal = self._escapeString( keyValue ) if not retVal[ 'OK' ]: return retVal keyValue = retVal[ 'Value' ] realCondList.append( "`%s`.`%s` = %s" % ( _getTableName( "bucket", typeName ), keyField, keyValue ) ) return " AND ".join( realCondList ) def __getBucketFromDB( self, typeName, startTime, bucketLength, keyValues, connObj = False ): """ Get a bucket from the DB """ tableName = _getTableName( "bucket", typeName ) sqlFields = [] for valueField in self.dbCatalog[ typeName ][ 'values' ]: sqlFields.append( "`%s`.`%s`" % ( tableName, valueField ) ) sqlFields.append( "`%s`.`entriesInBucket`" % ( tableName ) ) cmd = "SELECT %s FROM `%s`" % ( ", ".join( sqlFields ), _getTableName( "bucket", typeName ) ) cmd += " WHERE `%s`.`startTime`='%s' AND `%s`.`bucketLength`='%s' AND " % ( tableName, startTime, tableName, bucketLength ) cmd += self.__generateSQLConditionForKeys( typeName, keyValues ) return self._query( cmd, conn = connObj ) def __extractFromBucket( self, typeName, startTime, bucketLength, keyValues, bucketValues, proportion, connObj = False ): """ Update a bucket when coming from the raw insert """ tableName = _getTableName( "bucket", typeName ) cmd = "UPDATE `%s` SET " % tableName sqlValList = [] for pos in range( len( self.dbCatalog[ typeName ][ 'values' ] ) ): valueField = self.dbCatalog[ typeName ][ 'values' ][ pos ] value = bucketValues[ pos ] fullFieldName = "`%s`.`%s`" % ( tableName, valueField ) sqlValList.append( "%s=GREATEST(0,%s-(%s*%s))" % ( fullFieldName, fullFieldName, value, proportion ) ) sqlValList.append( "`%s`.`entriesInBucket`=GREATEST(0,`%s`.`entriesInBucket`-(%s*%s))" % ( tableName, tableName, bucketValues[-1], proportion ) ) cmd += ", ".join( sqlValList ) cmd += " WHERE `%s`.`startTime`='%s' AND `%s`.`bucketLength`='%s' AND " % ( tableName, startTime, tableName, bucketLength ) cmd += self.__generateSQLConditionForKeys( typeName, keyValues ) return self._update( cmd, conn = connObj ) def __writeBuckets( self, typeName, buckets, keyValues, valuesList, connObj = False ): """ Insert or update a bucket """ # tableName = _getTableName( "bucket", typeName ) #INSERT PART OF THE QUERY sqlFields = [ '`startTime`', '`bucketLength`', '`entriesInBucket`' ] for keyPos in range( len( self.dbCatalog[ typeName ][ 'keys' ] ) ): sqlFields.append( "`%s`" % self.dbCatalog[ typeName ][ 'keys' ][ keyPos ] ) sqlUpData = [ "`entriesInBucket`=`entriesInBucket`+VALUES(`entriesInBucket`)" ] for valPos in range( len( self.dbCatalog[ typeName ][ 'values' ] ) ): valueField = "`%s`" % self.dbCatalog[ typeName ][ 'values' ][ valPos ] sqlFields.append( valueField ) sqlUpData.append( "%s=%s+VALUES(%s)" % ( valueField, valueField, valueField ) ) valuesGroups = [] for bucketInfo in buckets: bStartTime = bucketInfo[0] bProportion = bucketInfo[1] bLength = bucketInfo[2] sqlValues = [ bStartTime, bLength, "(%s*%s)" % ( valuesList[-1], bProportion )] for keyPos in range( len( self.dbCatalog[ typeName ][ 'keys' ] ) ): sqlValues.append( keyValues[ keyPos ] ) for valPos in range( len( self.dbCatalog[ typeName ][ 'values' ] ) ): # value = valuesList[ valPos ] sqlValues.append( "(%s*%s)" % ( valuesList[ valPos ], bProportion ) ) valuesGroups.append( "( %s )" % ",".join( str( val ) for val in sqlValues ) ) cmd = "INSERT INTO `%s` ( %s ) " % ( _getTableName( "bucket", typeName ), ", ".join( sqlFields ) ) cmd += "VALUES %s " % ", ".join( valuesGroups) cmd += "ON DUPLICATE KEY UPDATE %s" % ", ".join( sqlUpData ) for _i in range( max( 1, self.__deadLockRetries ) ): result = self._update( cmd, conn = connObj ) if not result[ 'OK' ]: #If failed because of dead lock try restarting if result[ 'Message' ].find( "try restarting transaction" ): continue return result #If OK, break loopo if result[ 'OK' ]: return result return S_ERROR( "Cannot update bucket: %s" % result[ 'Message' ] ) def __checkFieldsExistsInType( self, typeName, fields, tableType ): """ Check wether a list of fields exist for a given typeName """ missing = [] tableFields = self.dbCatalog[ typeName ][ '%sFields' % tableType ] for key in fields: if key not in tableFields: missing.append( key ) return missing def __checkIncomingFieldsForQuery( self, typeName, selectFields, condDict, groupFields, orderFields, tableType ): missing = self.__checkFieldsExistsInType( typeName, selectFields[1], tableType ) if missing: return S_ERROR( "Value keys %s are not defined" % ", ".join( missing ) ) missing = self.__checkFieldsExistsInType( typeName, condDict, tableType ) if missing: return S_ERROR( "Condition keys %s are not defined" % ", ".join( missing ) ) if groupFields: missing = self.__checkFieldsExistsInType( typeName, groupFields[1], tableType ) if missing: return S_ERROR( "Group fields %s are not defined" % ", ".join( missing ) ) if orderFields: missing = self.__checkFieldsExistsInType( typeName, orderFields[1], tableType ) if missing: return S_ERROR( "Order fields %s are not defined" % ", ".join( missing ) ) return S_OK() def retrieveRawRecords( self, typeName, startTime, endTime, condDict, orderFields, connObj = False ): """ Get RAW data from the DB """ if typeName not in self.dbCatalog: return S_ERROR( "Type %s not defined" % typeName ) selectFields = [ [ "%s", "%s" ], [ "startTime", "endTime" ] ] for tK in ( 'keys', 'values' ): for key in self.dbCatalog[ typeName ][ tK ]: selectFields[ 0 ].append( "%s" ) selectFields[ 1 ].append( key ) selectFields[ 0 ] = ", ".join( selectFields[ 0 ] ) return self.__queryType( typeName, startTime, endTime, selectFields, condDict, False, orderFields, "type" ) def retrieveBucketedData( self, typeName, startTime, endTime, selectFields, condDict, groupFields, orderFields, connObj = False ): """ Get data from the DB Parameters: - typeName -> typeName - startTime & endTime -> int epoch objects. Do I need to explain the meaning? - selectFields -> tuple containing a string and a list of fields: ( "SUM(%s), %s/%s", ( "field1name", "field2name", "field3name" ) ) - condDict -> conditions for the query key -> name of the field value -> list of possible values - groupFields -> list of fields to group by ( "%s, %s, %s", ( "field1name", "field2name", "field3name" ) ) - orderFields -> list of fields to order by ( "%s, %s, %s", ( "field1name", "field2name", "field3name" ) ) """ if typeName not in self.dbCatalog: return S_ERROR( "Type %s is not defined" % typeName ) startQueryEpoch = time.time() if len( selectFields ) < 2: return S_ERROR( "selectFields has to be a list containing a string and a list of fields" ) retVal = self.__checkIncomingFieldsForQuery( typeName, selectFields, condDict, groupFields, orderFields, "bucket" ) if not retVal[ 'OK' ]: return retVal nowEpoch = Time.toEpoch( Time.dateTime () ) bucketTimeLength = self.calculateBucketLengthForTime( typeName, nowEpoch , startTime ) startTime = startTime - startTime % bucketTimeLength result = self.__queryType( typeName, startTime, endTime, selectFields, condDict, groupFields, orderFields, "bucket", connObj = connObj ) gMonitor.addMark( "querytime", Time.toEpoch() - startQueryEpoch ) return result def __queryType( self, typeName, startTime, endTime, selectFields, condDict, groupFields, orderFields, tableType, connObj = False ): """ Execute a query over a main table """ tableName = _getTableName( tableType, typeName ) cmd = "SELECT" sqlLinkList = [] #Check if groupFields and orderFields are in ( "%s", ( field1, ) ) form if groupFields: try: groupFields[0] % tuple( groupFields[1] ) except Exception, e: return S_ERROR( "Cannot format properly group string: %s" % str( e ) ) if orderFields: try: orderFields[0] % tuple( orderFields[1] ) except Exception, e: return S_ERROR( "Cannot format properly order string: %s" % str( e ) ) #Calculate fields to retrieve realFieldList = [] for rawFieldName in selectFields[1]: keyTable = _getTableName( "key", typeName, rawFieldName ) if rawFieldName in self.dbCatalog[ typeName ][ 'keys' ]: realFieldList.append( "`%s`.`value`" % keyTable ) List.appendUnique( sqlLinkList, "`%s`.`%s` = `%s`.`id`" % ( tableName, rawFieldName, keyTable ) ) else: realFieldList.append( "`%s`.`%s`" % ( tableName, rawFieldName ) ) try: cmd += " %s" % selectFields[0] % tuple( realFieldList ) except Exception, e: return S_ERROR( "Error generating select fields string: %s" % str( e ) ) #Calculate tables needed sqlFromList = [ "`%s`" % tableName ] for key in self.dbCatalog[ typeName ][ 'keys' ]: if key in condDict or key in selectFields[1] \ or ( groupFields and key in groupFields[1] ) \ or ( orderFields and key in orderFields[1] ): sqlFromList.append( "`%s`" % _getTableName( "key", typeName, key ) ) cmd += " FROM %s" % ", ".join( sqlFromList ) #Calculate time conditions sqlTimeCond = [] if startTime: if tableType == 'bucket': #HACK because MySQL and UNIX do not start epoch at the same time startTime = startTime + 3600 startTime = self.calculateBuckets( typeName, startTime, startTime )[0][0] sqlTimeCond.append( "`%s`.`startTime` >= %s" % ( tableName, startTime ) ) if endTime: if tableType == "bucket": endTimeSQLVar = "startTime" endTime = endTime + 3600 endTime = self.calculateBuckets( typeName, endTime, endTime )[0][0] else: endTimeSQLVar = "endTime" sqlTimeCond.append( "`%s`.`%s` <= %s" % ( tableName, endTimeSQLVar, endTime ) ) cmd += " WHERE %s" % " AND ".join( sqlTimeCond ) #Calculate conditions sqlCondList = [] for keyName in condDict: sqlORList = [] if keyName in self.dbCatalog[ typeName ][ 'keys' ]: List.appendUnique( sqlLinkList, "`%s`.`%s` = `%s`.`id`" % ( tableName, keyName, _getTableName( "key", typeName, keyName ) ) ) if type( condDict[ keyName ] ) not in ( types.ListType, types.TupleType ): condDict[ keyName ] = [ condDict[ keyName ] ] for keyValue in condDict[ keyName ]: retVal = self._escapeString( keyValue ) if not retVal[ 'OK' ]: return retVal keyValue = retVal[ 'Value' ] if keyName in self.dbCatalog[ typeName ][ 'keys' ]: sqlORList.append( "`%s`.`value` = %s" % ( _getTableName( "key", typeName, keyName ), keyValue ) ) else: sqlORList.append( "`%s`.`%s` = %s" % ( tableName, keyName, keyValue ) ) sqlCondList.append( "( %s )" % " OR ".join( sqlORList ) ) if sqlCondList: cmd += " AND %s" % " AND ".join( sqlCondList ) #Calculate grouping and sorting for preGenFields in ( groupFields, orderFields ): if preGenFields: for i in range( len( preGenFields[1] ) ): field = preGenFields[1][i] if field in self.dbCatalog[ typeName ][ 'keys' ]: List.appendUnique( sqlLinkList, "`%s`.`%s` = `%s`.`id`" % ( tableName, field, _getTableName( "key", typeName, field ) ) ) if preGenFields[0] != "%s": # The default grouping was changed preGenFields[1][i] = "`%s`.Value" % _getTableName( "key", typeName, field ) else: # The default grouping is maintained preGenFields[1][i] = "`%s`.`%s`" % ( tableName, field ) if sqlLinkList: cmd += " AND %s" % " AND ".join( sqlLinkList ) if groupFields: cmd += " GROUP BY %s" % ( groupFields[0] % tuple( groupFields[1] ) ) if orderFields: cmd += " ORDER BY %s" % ( orderFields[0] % tuple( orderFields[1] ) ) self.log.verbose( cmd ) return self._query( cmd, conn = connObj ) def compactBuckets( self, typeFilter = False ): """ Compact buckets for all defined types """ if self.__readOnly: return S_ERROR( "ReadOnly mode enabled. No modification allowed" ) gSynchro.lock() try: if self.__doingCompaction: return S_OK() self.__doingCompaction = True finally: gSynchro.unlock() slow = True for typeName in self.dbCatalog: if typeFilter and typeName.find( typeFilter ) == -1: self.log.info( "[COMPACT] Skipping %s" % typeName ) continue if self.dbCatalog[ typeName ][ 'dataTimespan' ] > 0: self.log.info( "[COMPACT] Deleting records older that timespan for type %s" % typeName ) self.__deleteRecordsOlderThanDataTimespan( typeName ) self.log.info( "[COMPACT] Compacting %s" % typeName ) if slow: self.__slowCompactBucketsForType( typeName ) else: self.__compactBucketsForType( typeName ) self.log.info( "[COMPACT] Compaction finished" ) self.__lastCompactionEpoch = int( Time.toEpoch() ) gSynchro.lock() try: if self.__doingCompaction: self.__doingCompaction = False finally: gSynchro.unlock() return S_OK() def __selectForCompactBuckets( self, typeName, timeLimit, bucketLength, nextBucketLength, connObj = False ): """ Nasty SQL query to get ideal buckets using grouping by date calculations and adding value contents """ tableName = _getTableName( "bucket", typeName ) selectSQL = "SELECT " sqlSelectList = [] for field in self.dbCatalog[ typeName ][ 'keys' ]: sqlSelectList.append( "`%s`.`%s`" % ( tableName, field ) ) for field in self.dbCatalog[ typeName ][ 'values' ]: sqlSelectList.append( "SUM( `%s`.`%s` )" % ( tableName, field ) ) sqlSelectList.append( "SUM( `%s`.`entriesInBucket` )" % ( tableName ) ) sqlSelectList.append( "MIN( `%s`.`startTime` )" % tableName ) sqlSelectList.append( "MAX( `%s`.`startTime` )" % tableName ) selectSQL += ", ".join( sqlSelectList ) selectSQL += " FROM `%s`" % tableName selectSQL += " WHERE `%s`.`startTime` < '%s' AND" % ( tableName, timeLimit ) selectSQL += " `%s`.`bucketLength` = %s" % ( tableName, bucketLength ) #MAGIC bucketing sqlGroupList = [ _bucketizeDataField( "`%s`.`startTime`" % tableName, nextBucketLength ) ] for field in self.dbCatalog[ typeName ][ 'keys' ]: sqlGroupList.append( "`%s`.`%s`" % ( tableName, field ) ) selectSQL += " GROUP BY %s" % ", ".join( sqlGroupList ) return self._query( selectSQL, conn = connObj ) def __deleteForCompactBuckets( self, typeName, timeLimit, bucketLength, connObj = False ): """ Delete compacted buckets """ tableName = _getTableName( "bucket", typeName ) deleteSQL = "DELETE FROM `%s` WHERE " % tableName deleteSQL += "`%s`.`startTime` < '%s' AND " % ( tableName, timeLimit ) deleteSQL += "`%s`.`bucketLength` = %s" % ( tableName, bucketLength ) return self._update( deleteSQL, conn = connObj ) def __compactBucketsForType( self, typeName ): """ Compact all buckets for a given type """ nowEpoch = Time.toEpoch() #retVal = self.__startTransaction( connObj ) #if not retVal[ 'OK' ]: # return retVal for bPos in range( len( self.dbBucketsLength[ typeName ] ) - 1 ): self.log.info( "[COMPACT] Query %d of %d" % ( bPos + 1, len( self.dbBucketsLength[ typeName ] ) - 1 ) ) secondsLimit = self.dbBucketsLength[ typeName ][ bPos ][0] bucketLength = self.dbBucketsLength[ typeName ][ bPos ][1] timeLimit = ( nowEpoch - nowEpoch % bucketLength ) - secondsLimit nextBucketLength = self.dbBucketsLength[ typeName ][ bPos + 1 ][1] self.log.info( "[COMPACT] Compacting data newer that %s with bucket size %s" % ( Time.fromEpoch( timeLimit ), bucketLength ) ) #Retrieve the data retVal = self.__selectForCompactBuckets( typeName, timeLimit, bucketLength, nextBucketLength ) if not retVal[ 'OK' ]: #self.__rollbackTransaction( connObj ) return retVal bucketsData = retVal[ 'Value' ] self.log.info( "[COMPACT] Got %d records to compact" % len( bucketsData ) ) if len( bucketsData ) == 0: continue retVal = self.__deleteForCompactBuckets( typeName, timeLimit, bucketLength ) if not retVal[ 'OK' ]: #self.__rollbackTransaction( connObj ) return retVal self.log.info( "[COMPACT] Compacting %s records %s seconds size for %s" % ( len( bucketsData ), bucketLength, typeName ) ) #Add data for record in bucketsData: startTime = record[-2] endTime = record[-1] valuesList = record[:-2] retVal = self.__splitInBuckets( typeName, startTime, endTime, valuesList ) if not retVal[ 'OK' ]: #self.__rollbackTransaction( connObj ) self.log.error( "[COMPACT] Error while compacting data for record", "%s: %s" % ( typeName, retVal[ 'Value' ] ) ) self.log.info( "[COMPACT] Finished compaction %d of %d" % ( bPos, len( self.dbBucketsLength[ typeName ] ) - 1 ) ) #return self.__commitTransaction( connObj ) return S_OK() def __slowCompactBucketsForType( self, typeName ): """ Compact all buckets for a given type """ nowEpoch = Time.toEpoch() for bPos in range( len( self.dbBucketsLength[ typeName ] ) - 1 ): self.log.info( "[COMPACT] Query %d of %d" % ( bPos, len( self.dbBucketsLength[ typeName ] ) - 1 ) ) secondsLimit = self.dbBucketsLength[ typeName ][ bPos ][0] bucketLength = self.dbBucketsLength[ typeName ][ bPos ][1] timeLimit = ( nowEpoch - nowEpoch % bucketLength ) - secondsLimit nextBucketLength = self.dbBucketsLength[ typeName ][ bPos + 1 ][1] self.log.info( "[COMPACT] Compacting data newer that %s with bucket size %s for %s" % ( Time.fromEpoch( timeLimit ), bucketLength, typeName ) ) querySize = 10000 previousRecordsSelected = querySize totalCompacted = 0 while previousRecordsSelected == querySize: #Retrieve the data self.log.info( "[COMPACT] Retrieving buckets to compact newer that %s with size %s" % ( Time.fromEpoch( timeLimit ), bucketLength ) ) roundStartTime = time.time() result = self.__selectIndividualForCompactBuckets( typeName, timeLimit, bucketLength, nextBucketLength, querySize ) if not result[ 'OK' ]: #self.__rollbackTransaction( connObj ) return result bucketsData = result[ 'Value' ] previousRecordsSelected = len( bucketsData ) selectEndTime = time.time() self.log.info( "[COMPACT] Got %d buckets (%d done) (took %.2f secs)" % ( previousRecordsSelected, totalCompacted, selectEndTime - roundStartTime ) ) if len( bucketsData ) == 0: break result = self.__deleteIndividualForCompactBuckets( typeName, bucketsData ) if not result[ 'OK' ]: #self.__rollbackTransaction( connObj ) return result bucketsData = result[ 'Value' ] deleteEndTime = time.time() self.log.info( "[COMPACT] Deleted %s out-of-bounds buckets (took %.2f secs)" % ( len( bucketsData ), deleteEndTime - selectEndTime ) ) #Add data for record in bucketsData: startTime = record[-2] endTime = record[-2] + record[-1] valuesList = record[:-2] retVal = self.__splitInBuckets( typeName, startTime, endTime, valuesList ) if not retVal[ 'OK' ]: self.log.error( "[COMPACT] Error while compacting data for buckets", "%s: %s" % ( typeName, retVal[ 'Value' ] ) ) totalCompacted += len( bucketsData ) insertElapsedTime = time.time() - deleteEndTime self.log.info( "[COMPACT] Records compacted (took %.2f secs, %.2f secs/bucket)" % ( insertElapsedTime, insertElapsedTime / len( bucketsData ) ) ) self.log.info( "[COMPACT] Finised compaction %d of %d" % ( bPos, len( self.dbBucketsLength[ typeName ] ) - 1 ) ) #return self.__commitTransaction( connObj ) return S_OK() def __selectIndividualForCompactBuckets( self, typeName, timeLimit, bucketLength, nextBucketLength, querySize, connObj = False ): """ Nasty SQL query to get ideal buckets using grouping by date calculations and adding value contents """ tableName = _getTableName( "bucket", typeName ) selectSQL = "SELECT " sqlSelectList = [] for field in self.dbCatalog[ typeName ][ 'keys' ]: sqlSelectList.append( "`%s`.`%s`" % ( tableName, field ) ) for field in self.dbCatalog[ typeName ][ 'values' ]: sqlSelectList.append( "`%s`.`%s`" % ( tableName, field ) ) sqlSelectList.append( "`%s`.`entriesInBucket`" % ( tableName ) ) sqlSelectList.append( "`%s`.`startTime`" % tableName ) sqlSelectList.append( "`%s`.bucketLength" % ( tableName ) ) selectSQL += ", ".join( sqlSelectList ) selectSQL += " FROM `%s`" % tableName selectSQL += " WHERE `%s`.`startTime` < '%s' AND" % ( tableName, timeLimit ) selectSQL += " `%s`.`bucketLength` = %s" % ( tableName, bucketLength ) #MAGIC bucketing selectSQL += " LIMIT %d" % querySize return self._query( selectSQL, conn = connObj ) def __deleteIndividualForCompactBuckets( self, typeName, bucketsData, connObj = False ): """ Delete compacted buckets """ tableName = _getTableName( "bucket", typeName ) keyFields = self.dbCatalog[ typeName ][ 'keys' ] deleteQueryLimit = 50 deletedBuckets = [] for bLimit in range( 0, len( bucketsData ) , deleteQueryLimit ): delCondsSQL = [] for record in bucketsData[ bLimit : bLimit + deleteQueryLimit ]: condSQL = [] for iPos in range( len( keyFields ) ): field = keyFields[ iPos ] condSQL.append( "`%s`.`%s` = %s" % ( tableName, field, record[ iPos ] ) ) condSQL.append( "`%s`.`startTime` = %d" % ( tableName, record[-2] ) ) condSQL.append( "`%s`.`bucketLength` = %d" % ( tableName, record[-1] ) ) delCondsSQL.append( "(%s)" % " AND ".join( condSQL ) ) delSQL = "DELETE FROM `%s` WHERE %s" % ( tableName, " OR ".join( delCondsSQL ) ) result = self._update( delSQL, conn = connObj ) if not result[ 'OK' ]: self.log.error( "Cannot delete individual records for compaction", result[ 'Message' ] ) else: deletedBuckets.extend( bucketsData[ bLimit : bLimit + deleteQueryLimit ] ) return S_OK( deletedBuckets ) def __deleteRecordsOlderThanDataTimespan( self, typeName ): """ IF types define dataTimespan, then records older than datatimespan seconds will be deleted automatically """ dataTimespan = self.dbCatalog[ typeName ][ 'dataTimespan' ] if dataTimespan < 86400 * 30: return for table, field in ( ( _getTableName( "type", typeName ), 'endTime' ), ( _getTableName( "bucket", typeName ), 'startTime + %s' % self.dbBucketsLength[ typeName ][-1][1] ) ): self.log.info( "[COMPACT] Deleting old records for table %s" % table ) deleteLimit = 100000 deleted = deleteLimit while deleted >= deleteLimit: sqlCmd = "DELETE FROM `%s` WHERE %s < UNIX_TIMESTAMP()-%d LIMIT %d" % ( table, field, dataTimespan, deleteLimit ) result = self._update( sqlCmd ) if not result[ 'OK' ]: self.log.error( "[COMPACT] Cannot delete old records", "Table: %s Timespan: %s Error: %s" % ( table, dataTimespan, result[ 'Message' ] ) ) break self.log.info( "[COMPACT] Deleted %d records for %s table" % ( result[ 'Value' ], table ) ) deleted = result[ 'Value' ] time.sleep( 1 ) def regenerateBuckets( self, typeName ): if self.__readOnly: return S_ERROR( "ReadOnly mode enabled. No modification allowed" ) #Delete old entries if any if self.dbCatalog[ typeName ][ 'dataTimespan' ] > 0: self.log.info( "[REBUCKET] Deleting records older that timespan for type %s" % typeName ) self.__deleteRecordsOlderThanDataTimespan( typeName ) self.log.info( "[REBUCKET] Done deleting old records" ) rawTableName = _getTableName( "type", typeName ) #retVal = self.__startTransaction( connObj ) #if not retVal[ 'OK' ]: # return retVal self.log.info( "[REBUCKET] Deleting buckets for %s" % typeName ) retVal = self._update( "DELETE FROM `%s`" % _getTableName( "bucket", typeName ) ) if not retVal[ 'OK' ]: return retVal #Generate the common part of the query #SELECT fields startTimeTableField = "`%s`.startTime" % rawTableName endTimeTableField = "`%s`.endTime" % rawTableName #Select strings and sum select strings sqlSUMSelectList = [] sqlSelectList = [] for field in self.dbCatalog[ typeName ][ 'keys' ]: sqlSUMSelectList.append( "`%s`.`%s`" % ( rawTableName, field ) ) sqlSelectList.append( "`%s`.`%s`" % ( rawTableName, field ) ) for field in self.dbCatalog[ typeName ][ 'values' ]: sqlSUMSelectList.append( "SUM( `%s`.`%s` )" % ( rawTableName, field ) ) sqlSelectList.append( "`%s`.`%s`" % ( rawTableName, field ) ) sumSelectString = ", ".join( sqlSUMSelectList ) selectString = ", ".join( sqlSelectList ) #Grouping fields sqlGroupList = [] for field in self.dbCatalog[ typeName ][ 'keys' ]: sqlGroupList.append( "`%s`.`%s`" % ( rawTableName, field ) ) groupingString = ", ".join( sqlGroupList ) #List to contain all queries sqlQueries = [] dateInclusiveConditions = [] countedField = "`%s`.`%s`" % ( rawTableName, self.dbCatalog[ typeName ][ 'keys' ][0] ) lastTime = Time.toEpoch() #Iterate for all ranges for iRange in range( len( self.dbBucketsLength[ typeName ] ) ): bucketTimeSpan = self.dbBucketsLength[ typeName ][iRange][0] bucketLength = self.dbBucketsLength[ typeName ][iRange][1] startRangeTime = lastTime - bucketTimeSpan endRangeTime = lastTime lastTime -= bucketTimeSpan bucketizedStart = _bucketizeDataField( startTimeTableField, bucketLength ) bucketizedEnd = _bucketizeDataField( endTimeTableField, bucketLength ) timeSelectString = "MIN(%s), MAX(%s)" % ( startTimeTableField, endTimeTableField ) #Is the last bucket? if iRange == len( self.dbBucketsLength[ typeName ] ) - 1: whereString = "%s <= %d" % ( endTimeTableField, endRangeTime ) else: whereString = "%s > %d AND %s <= %d" % ( startTimeTableField, startRangeTime, endTimeTableField, endRangeTime ) sameBucketCondition = "(%s) = (%s)" % ( bucketizedStart, bucketizedEnd ) #Records that fit in a bucket sqlQuery = "SELECT %s, %s, COUNT(%s) FROM `%s` WHERE %s AND %s GROUP BY %s, %s" % ( timeSelectString, sumSelectString, countedField, rawTableName, whereString, sameBucketCondition, groupingString, bucketizedStart ) sqlQueries.append( sqlQuery ) #Records that fit in more than one bucket sqlQuery = "SELECT %s, %s, %s, 1 FROM `%s` WHERE %s AND NOT %s" % ( startTimeTableField, endTimeTableField, selectString, rawTableName, whereString, sameBucketCondition ) sqlQueries.append( sqlQuery ) dateInclusiveConditions.append( "( %s )" % whereString ) #Query for records that are in between two ranges sqlQuery = "SELECT %s, %s, %s, 1 FROM `%s` WHERE NOT %s" % ( startTimeTableField, endTimeTableField, selectString, rawTableName, " AND NOT ".join( dateInclusiveConditions ) ) sqlQueries.append( sqlQuery ) self.log.info( "[REBUCKET] Retrieving data for rebuilding buckets for type %s..." % ( typeName ) ) queryNum = 0 for sqlQuery in sqlQueries: self.log.info( "[REBUCKET] Executing query #%s..." % queryNum ) queryNum += 1 retVal = self._query( sqlQuery ) if not retVal[ 'OK' ]: self.log.error( "[REBUCKET] Can't retrieve data for rebucketing", retVal[ 'Message' ] ) #self.__rollbackTransaction( connObj ) return retVal rawData = retVal[ 'Value' ] self.log.info( "[REBUCKET] Retrieved %s records" % len( rawData ) ) rebucketedRecords = 0 startQuery = time.time() startBlock = time.time() numRecords = len( rawData ) for entry in rawData: startT = entry[0] endT = entry[1] values = entry[2:] retVal = self.__splitInBuckets( typeName, startT, endT, values ) if not retVal[ 'OK' ]: #self.__rollbackTransaction( connObj ) return retVal rebucketedRecords += 1 if rebucketedRecords % 1000 == 0: queryAvg = rebucketedRecords / float( time.time() - startQuery ) blockAvg = 1000 / float( time.time() - startBlock ) startBlock = time.time() perDone = 100 * rebucketedRecords / float ( numRecords ) expectedEnd = str( datetime.timedelta( seconds = int( ( numRecords - rebucketedRecords ) / blockAvg ) ) ) self.log.info( "[REBUCKET] Rebucketed %.2f%% %s (%.2f r/s block %.2f r/s query | ETA %s )..." % ( perDone, typeName, blockAvg, queryAvg, expectedEnd ) ) #return self.__commitTransaction( connObj ) return S_OK() def __startTransaction( self, connObj ): return self._query( "START TRANSACTION", conn = connObj ) def __commitTransaction( self, connObj ): return self._query( "COMMIT", conn = connObj ) def __rollbackTransaction( self, connObj ): return self._query( "ROLLBACK", conn = connObj ) def _bucketizeDataField( dataField, bucketLength ): return "%s - ( %s %% %s )" % ( dataField, dataField, bucketLength ) def _getTableName( tableType, typeName, keyName = None ): """ Generate table name """ if not keyName: return "ac_%s_%s" % ( tableType, typeName ) elif tableType == "key" : return "ac_%s_%s_%s" % ( tableType, typeName, keyName ) else: raise Exception( "Call to _getTableName with tableType as key but with no keyName" )
codeparrot/github-code-clean
# The level editor GUI. # The following workflow is expected: # 1) User load a level # 2) main windows display the scene layout # right-side-top-dock display: # - level scene tree # - level tree (a list in fact) # - level resources tree (a list in fact) # 3) user select an element in one of the tree, related properties are displayed in # right-side-down-dock property list # 4) user edit properties in property list # # Later on, provides property edition via scene layout display # Add toolbar to create new element # # In memory, we keep track of two things: # - updated level # - specific text/fx resources import xml.etree.ElementTree #@UnresolvedImport import os.path import glob #@UnresolvedImport import subprocess #@UnresolvedImport import louie import wogfile import metaworld import metawog import metaworldui import metatreeui import metaelementui import levelview import wogeditor_rc #@UnusedImport from shutil import copy2 #@UnresolvedImport from PyQt4 import QtCore, QtGui #@UnresolvedImport from PyQt4.QtCore import Qt #@UnresolvedImport import qthelper import editleveldialog import newleveldialog_ui import errors from utils import * #@UnusedWildImport from datetime import datetime YAML_FORMAT = True LOG_TO_FILE = False APP_NAME_UPPER = 'DFG-AMY-EDITOR' APP_NAME_LOWER = 'dfg-amy-editor' APP_NAME_PROPER = 'Amy In Da Farm! Editor' STR_DIR_STUB = 'levels' CURRENT_VERSION = "v0.1" CREATED_BY = 'Created by ' + APP_NAME_PROPER + ' ' + CURRENT_VERSION ISSUE_LEVEL_NONE = 0 ISSUE_LEVEL_ADVICE = 1 ISSUE_LEVEL_WARNING = 2 ISSUE_LEVEL_CRITICAL = 4 MAXRECENTFILES = 4 #@DaB New actions for Add item toolbar def _appendChildTag( parent_element, new_element_meta , mandatory_attributes, keepid = False ): """Adds the specified child tag to the specified element and update the tree view.""" assert parent_element is not None # build the list of attributes with their initial values. for attribute_meta in new_element_meta.attributes: if attribute_meta.mandatory: if attribute_meta.type == metaworld.IDENTIFIER_TYPE: try: given_id = mandatory_attributes[attribute_meta.name] except KeyError: given_id = None if given_id is None or not keepid: init_value = parent_element.world.generate_unique_identifier( attribute_meta ) mandatory_attributes[attribute_meta.name] = init_value else: init_value = attribute_meta.init if init_value is not None: if attribute_meta.name not in mandatory_attributes: mandatory_attributes[attribute_meta.name] = init_value if ( attribute_meta.default is not None and not attribute_meta.mandatory ): if attribute_meta.name not in mandatory_attributes: init_value = attribute_meta.default mandatory_attributes[attribute_meta.name] = init_value # Notes: when the element is added, the ElementAdded signal will cause the # corresponding item to be inserted into the tree. child_element = parent_element.make_child( new_element_meta, mandatory_attributes ) # Select new item in tree view if not keepid: child_element.world.set_selection( child_element ) return child_element class AddItemFactory( object ): def __init__( self, window, parent, itemtag, attrib ): self.window = window self.itemtag = itemtag self.attrib = attrib self.parent = parent def _element_has_children(self, element): meta_element = metawog.TREE_LEVEL_SCENE.find_immediate_child_by_tag( self.parent ) return meta_element.children_count > 0 def __call__( self ): assert self.parent is not None model = self.window.getCurrentModel() if model: window = self.window.mdiArea.activeSubWindow() if window: cview = window.widget() cp = cview.mapToScene( cview.width()*0.5, cview.height()*0.5 ) offsetx, offsety = 0, 0 if self.parent == 'level': root = model.level_root elif self.parent == 'scene': root = model.scene_root elif self.parent == 'resource': root = model.resource_root elif self._element_has_children( self.parent ): thisworld = cview.world selected_elements = thisworld.selected_elements cgparent = None for element in selected_elements: meta_element = metawog.TREE_LEVEL_SCENE.find_immediate_child_by_tag( element.tag ) if meta_element.children_count > 0: cgparent = element break else: # check to see if they are part of a cg pelement = element.parent if pelement is not None: if self._element_has_children( pelement.tag ): cgparent = pelement break if cgparent is None: QtGui.QMessageBox.warning( window, 'No composite geometry parent', 'You must select a CompositeGeom item to add this child to' ) return root = cgparent offsetx, offsety = root.get_native( 'center' ) else: print "Unknown Parent in AddItemFactory", self.parent return rootmbt = root.meta.find_immediate_child_by_tag( self.itemtag ) if rootmbt is not None: for attribute_meta in rootmbt.attributes: if attribute_meta.type == metaworld.XY_TYPE: self.attrib[attribute_meta.name] = str( cp.x() - offsetx ) + "," + str( -( cp.y() + offsety ) ) break _appendChildTag( root, rootmbt, self.attrib ) def tr( context, message ): return QtCore.QCoreApplication.translate( context, message ) def find_element_in_tree( root_element, element ): """Searchs the specified element in the root_element children and returns all its parent, and its index in its immediate parent. Returns None if the element is not found, otherwise returns a tuple ([parent_elements], child_index) root_element, element: must provides the interface xml.etree.ElementTree. """ for index, child_element in enumerate( root_element ): if child_element is element: return ( [root_element], index ) found = find_element_in_tree( child_element, element ) if found is not None: found_parents, found_index = found found_parents.insert( 0, root_element ) return found_parents, found_index return None def flattened_element_children( element ): """Returns a list of all the element children, including its grand-children...""" children = [] for child_element in element: children.append( child_element ) children.extend( flattened_element_children( child_element ) ) return children class GameModelException( Exception ): pass class PixmapCache( object ): """A global pixmap cache the cache the pixmap associated to each element. Maintains the cache up to date by listening for element events. """ def __init__( self, amy_dir, universe ): self._amy_dir = amy_dir self._pixmaps_by_path = {} self._filedate_by_path = {} self.__event_synthetizer = metaworld.ElementEventsSynthetizer( universe, None, self._on_element_updated, self._on_element_about_to_be_removed ) def get_pixmap( self, image_id ): """Returns a pixmap corresponding to the image id (actually image path). The pixmap is loaded if not present in the cache. None is returned on failure to load the pixmap. """ image_path = image_id pixmap = self._pixmaps_by_path.get( image_path ) if pixmap: return pixmap path = os.path.join( self._amy_dir, image_path + '.png' ) if not os.path.isfile( path ): print 'Warning: invalid image path "%(path)s"' % { 'path': image_path } else: return self._addToCache( path, image_id ) return None def _addToCache( self, path, image_id ): img = QtGui.QImage() image_path = image_id if not img.load( path ): data = file( path, 'rb' ).read() if not img.loadFromData( data ): if image_path in self._pixmaps_by_path.keys(): del self._pixmaps_by_path[image_path] del self._filedate_by_path[image_path] print 'Warning: failed to load image "%(path)s"' % { 'path' : image_path } return None # assume source image is in premultiplied alpha format # so, after converting image to ARGB32_Premultiplied # we need to restore its pixels to the pixels of original image img2 = img.convertToFormat( QtGui.QImage.Format_ARGB32_Premultiplied ) if img.hasAlphaChannel(): #img = img.convertToFormat( QtGui.QImage.Format_ARGB32 ) w = img.width() for y in xrange( img.height() ): pixels = img.scanLine( y ) pixels.setsize( 4 * w ) pixels_new = img2.scanLine( y ) pixels_new.setsize( 4 * w ) pixels_new[:] = pixels[:] self._pixmaps_by_path[image_path] = img2 self._filedate_by_path[image_path] = os.path.getmtime( path ) return img2 def refresh( self ): # check each file in the cache... # if it's out of date then reload for image_path, filedate in self._filedate_by_path.items(): path = os.path.normpath( os.path.join( self._amy_dir, image_path + '.png' ) ) if not os.path.isfile( path ): if image_path in self._pixmaps_by_path.keys(): del self._pixmaps_by_path[image_path] del self._filedate_by_path[image_path] print 'Warning: File is missing %s' % path elif os.path.getmtime( path ) > filedate: # refresh self._addToCache( path, {'id':path, 'path':image_path} ) def _on_element_about_to_be_removed( self, element, index_in_parent ): #IGNORE:W0 if element.tag == 'Image': if element.get( 'path', '' ) in self._pixmaps_by_element: del self._pixmaps_by_element[element.get( 'path', '' )] def _on_element_updated( self, element, name, new_value, old_value ): #IGNORE:W0613 if element.tag == 'Image': if old_value in self._pixmaps_by_element: del self._pixmaps_by_element[old_value] class GameModel( QtCore.QObject ): def __init__( self, amy_path, window ): """Loads text and global resources. Loads Levels. The following signals are provided: QtCore.SIGNAL('selectedObjectChanged(PyQt_PyObject,PyQt_PyObject,PyQt_PyObject)') """ QtCore.QObject.__init__( self ) self._window = window self._amy_path = amy_path if ON_PLATFORM == PLATFORM_MAC: # on Mac # amydir is Contents\resources\game\ self._amy_dir = os.path.join( self._amy_path, u'Contents', u'Resources', u'game' ) else: self._amy_dir = os.path.split( amy_path )[0] metaworld.AMY_PATH = self._amy_dir self._res_dir = os.path.join( self._amy_dir, u'Data' ) # On MAC # enumerate all files in res folder # convert all .png.binltl to .png if ON_PLATFORM == PLATFORM_MAC: window.statusBar().showMessage( self.tr( "Checking graphics files..." ) ) skipped, processed, found = 0, 0, 0 lresdir = len( self._res_dir ) toconvert = [] for ( path, dirs, files ) in os.walk( self._res_dir ): #@UnusedVariable for name in files: if name.endswith( '.png.binltl' ): found += 1 output_path = os.path.join( path, name[:-11] ) + '.png' if not os.path.isfile( output_path ): toconvert.append( [os.path.join( path, name ), output_path, os.path.join( path, name )[lresdir:]] ) processed += 1 else: skipped += 1 #print "png.binltl found",found,'processed',processed,'skipped',skipped if processed > 0: progress = QtGui.QProgressDialog( "", QtCore.QString(), 0, processed, window ); progress.setWindowTitle( window.tr( "Converting PNG.BINLTL files to PNG..." ) ); progress.setWindowModality( Qt.WindowModal ); progress.setMinimumWidth( 300 ) progress.forceShow() for filepair in toconvert: if progress.wasCanceled(): break progress.setValue( progress.value() + 1 ); progress.setLabelText( filepair[2] ) wogfile.pngbinltl2png( filepair[0], filepair[1] ) progress.setValue( progress.value() + 1 ); window.statusBar().showMessage( self.tr( "Game Model : Initializing" ) ) self._universe = metaworld.Universe() self.global_world = self._universe.make_world( metawog.WORLD_GLOBAL, 'game' ) window.statusBar().showMessage( self.tr( "Game Model : Loading Properties XMLs" ) ) self._readonly_resources = set() self._levels = self._loadDirList( os.path.join( self._res_dir, 'levels' ), filename_filter = '%s.scene' ) self.models_by_name = {} self.__is_dirty = False self.modified_worlds_to_check = set() louie.connect( self._onElementAdded, metaworld.ElementAdded ) louie.connect( self._onElementAboutToBeRemoved, metaworld.ElementAboutToBeRemoved ) louie.connect( self._onElementUpdated, metaworld.AttributeUpdated ) self.pixmap_cache = PixmapCache( self._amy_dir, self._universe ) window.statusBar().showMessage( self.tr( "Game Model : Complete" ) ) @property def is_dirty( self ): worlds = self.modified_worlds_to_check self.modified_worlds_to_check = set() for world in worlds: if world: self.__is_dirty = self.__is_dirty or world.is_dirty return self.__is_dirty def getResourcePath( self, game_dir_relative_path ): return os.path.join( self._amy_dir, game_dir_relative_path ) def _loadTree( self, world, meta_tree, directory, file_name ): path = os.path.join( directory, file_name ) if not os.path.isfile( path ): raise GameModelException( tr( 'LoadData', 'File "%1" does not exist. You likely provided an incorrect Amy In Da Farm! directory.' ).arg( path ) ) data = wogfile.decrypt_file_data( path ) try: if YAML_FORMAT: new_tree = world.make_tree_from_yaml( meta_tree, data ) else: new_tree = world.make_tree_from_xml( meta_tree, data ) except IOError, e: raise GameModelException( unicode( e ) + u' in file ' + file_name ) new_tree.setFilename( path ) return new_tree def _loadUnPackedTree( self, world, meta_tree, directory, file_name ): input_path = os.path.join( directory, file_name ) data = file( input_path, 'rb' ).read() try: if YAML_FORMAT: new_tree = world.make_tree_from_yaml( meta_tree, data ) else: new_tree = world.make_tree_from_xml( meta_tree, data ) except IOError, e: raise GameModelException( unicode( e ) + u' in file ' + file_name ) new_tree.setFilename( input_path ) return new_tree def _saveUnPackedTree( self, directory, file_name, tree ): if not os.path.isdir( directory ): os.makedirs( directory ) output_path = os.path.join( directory, file_name ) if YAML_FORMAT: data = '## ' + CREATED_BY + '\n' + tree.to_yaml() else: data = tree.to_xml() data = '<!-- ' + CREATED_BY + ' -->\n' + data.replace( '><', '>\n<' ) file( output_path, 'wb' ).write( data ) tree.setFilename( output_path ) def _saveTree( self, directory, file_name, tree ): if not os.path.isdir( directory ): os.makedirs( directory ) path = os.path.join( directory, file_name ) if YAML_FORMAT: data = '## ' + CREATED_BY + '\n' + tree.to_yaml() else: data = tree.to_xml() data = '<!-- ' + CREATED_BY + ' -->\n' + data.replace( '><', '>\n<' ) wogfile.encrypt_file_data( path, data ) tree.setFilename( path ) def _loadDirList( self, directory, filename_filter ): if not os.path.isdir( directory ): raise GameModelException( tr( 'LoadLevelList', 'Directory "%1" does not exist. You likely provided an incorrect Amy In Da Farm! directory.' ).arg( directory ) ) def is_valid_dir( entry ): """Accepts the directory only if it contains a specified file.""" dir_path = os.path.join( directory, entry ) if os.path.isdir( dir_path ): try: filter_file_path = filename_filter % entry except TypeError: filter_file_path = filename_filter if os.path.isfile( os.path.join( dir_path, filter_file_path ) ): return True return False dirs = [ entry for entry in os.listdir( directory ) if is_valid_dir( entry ) ] dirs.sort( key = unicode.lower ) return dirs def _loadFileList( self, directory, filename_filter ): if not os.path.isdir( directory ): raise GameModelException( tr( 'LoadFileList', 'Directory "%1" does not exist. You likely provided an incorrect Amy In Da Farm! directory.' ).arg( directory ) ) def is_valid_file( entry ): """Accepts the directory only if it contains a specified file.""" if entry.endswith( filename_filter ): file_path = os.path.join( directory, entry ) return os.path.isfile( file_path ) return False files = [ entry for entry in os.listdir( directory ) if is_valid_file( entry ) ] files.sort( key = unicode.lower ) return files @property def names( self ): return self._levels def getModel( self, name ): if name not in self.models_by_name: folder = os.path.join( self._res_dir, STR_DIR_STUB, name ) world = self.global_world.make_world( metawog.WORLD_LEVEL, name, LevelWorld, self ) self._loadUnPackedTree( world, metawog.TREE_LEVEL_GAME, folder, name + '.level' ) self._loadUnPackedTree( world, metawog.TREE_LEVEL_SCENE, folder, name + '.scene' ) self._loadUnPackedTree( world, metawog.TREE_LEVEL_RESOURCE, folder, name + '.resrc' ) if world.isReadOnly: world.clean_dirty_tracker() world.clear_undo_queue() self.models_by_name[name] = world return self.models_by_name[name] def selectLevel( self, name ): """Activate the specified level and load it if required. Returns the activated LevelWorld. """ model = self.getModel( name ) assert model is not None louie.send( metaworldui.ActiveWorldChanged, self._universe, model ) return model def _onElementAdded( self, element, index_in_parent ): #IGNORE:W0613 self.modified_worlds_to_check.add( element.world ) def _onElementUpdated( self, element, attribute_name, new_value, old_value ): #IGNORE:W0613 self.modified_worlds_to_check.add( element.world ) def _onElementAboutToBeRemoved( self, element, index_in_parent ): #IGNORE:W0613 self.modified_worlds_to_check.add( element.world ) def hasModifiedReadOnly( self ): """Checks if the user has modified read-only """ for model in self.models_by_name.itervalues(): if model.is_dirty and model.isReadOnly: return True return False def playLevel( self, level_model ): """Starts Amy to test the specified level.""" # remove PYTHONPATH from the environment of new process env = os.environ.copy() if 'PYTHONPATH' in env: del env['PYTHONPATH'] if ON_PLATFORM == PLATFORM_MAC: #print "ON MAC - Save and Play" #Then run the program file itself with no command-line parameters #print "launch ",os.path.join(self._amy_path,u'Contents',u'MacOS',u'Amy In Da Farm') subprocess.Popen( os.path.join( self._amy_path, u'Contents', u'MacOS', u'Amy In Da Farm' ), cwd = self._amy_dir, env = env ) else: #pid = subprocess.Popen( self._amy_path, cwd = self._amy_dir ).pid try: subprocess.Popen( [self._amy_path, level_model.name], cwd = self._amy_dir, env = env ) except: # debug build have executable in different place, try to use it exe_path = os.path.join( os.path.dirname( self._amy_dir ), '_Debug', 'Launcher.exe' ) subprocess.Popen( [exe_path, level_model.name], cwd = self._amy_dir, env = env ) # Don't wait for process end... # @Todo ? Monitor process so that only one can be launched ??? def newLevel( self, name ): """Creates a new blank level with the specified name. May fails with an IOError or OSError.""" return self._addNewLevel( name, self._universe.make_unattached_tree_from_xml( metawog.TREE_LEVEL_GAME, metawog.LEVEL_GAME_TEMPLATE ), self._universe.make_unattached_tree_from_xml( metawog.TREE_LEVEL_SCENE, metawog.LEVEL_SCENE_TEMPLATE ), self._universe.make_unattached_tree_from_xml( metawog.TREE_LEVEL_RESOURCE, metawog.LEVEL_RESOURCE_TEMPLATE ) ) def cloneLevel( self, cloned_name, new_name ): #Clone an existing level and its resources. model = self.getModel( cloned_name ) dir = os.path.join( self._res_dir, STR_DIR_STUB, new_name ) if not os.path.isdir( dir ): os.mkdir( dir ) os.mkdir( os.path.join( dir, 'animations' ) ) os.mkdir( os.path.join( dir, 'fx' ) ) os.mkdir( os.path.join( dir, 'scripts' ) ) os.mkdir( os.path.join( dir, 'textures' ) ) os.mkdir( os.path.join( dir, 'sounds' ) ) #new cloning method... #2 # worked for balls... might be going back to the old Nitrozark way.. # which didn't work right... Hmmm.! #get xml from existing #make unattached trees from it new_level_tree = self._universe.make_unattached_tree_from_xml( metawog.TREE_LEVEL_GAME, model.level_root.tree.to_xml() ) new_scene_tree = self._universe.make_unattached_tree_from_xml( metawog.TREE_LEVEL_SCENE, model.scene_root.tree.to_xml() ) new_res_tree = self._universe.make_unattached_tree_from_xml( metawog.TREE_LEVEL_RESOURCE, model.resource_root.tree.to_xml() ) #change stuff #TODO: copy level related resources to new folder and change their paths in scene # self._res_swap( new_level_tree.root, '_' + cloned_name.upper() + '_', '_' + new_name.upper() + '_' ) # self._res_swap( new_scene_tree.root, '_' + cloned_name.upper() + '_', '_' + new_name.upper() + '_' ) #save out new trees self._saveUnPackedTree( dir, new_name + '.level', new_level_tree ) self._saveUnPackedTree( dir, new_name + '.scene', new_scene_tree ) self._saveUnPackedTree( dir, new_name + '.resrc', new_res_tree ) self._levels.append( unicode( new_name ) ) self._levels.sort( key = unicode.lower ) self.__is_dirty = True # def _res_swap( self, element, find, replace ): # for attribute in element.meta.attributes: # if attribute.type == metaworld.REFERENCE_TYPE: # if attribute.reference_family in ['image', 'sound', 'TEXT_LEVELNAME_STR']: # value = element.get( attribute.name, None ) # if value is not None: # rv = ','.join( [v.replace( find, replace, 1 ) for v in value.split( ',' )] ) # element.set( attribute.name, rv ) # for child in element.getchildren(): # self._res_swap( child, find, replace ) def _isOriginalFile( self, filename, extension ): return False path_bits = filename.replace( '\\', '/' ).split( "/" ) if len( path_bits ) == 1: print filename, path_bits return False path_bits.pop( 0 ) file = path_bits.pop( len( path_bits ) - 1 ) root_element = self._files_tree.root return self._seekFile( root_element, path_bits, file, extension ) def _seekFile( self, element, path, file, ext ): if path == []: for fileitem in element.findall( 'file' ): if fileitem.get( 'name' ) == file: if fileitem.get( 'type' ) == ext: return True return False else: for folder in element.findall( 'folder' ): if folder.get( 'name' ) == path[0]: path.pop( 0 ) return self._seekFile( folder, path, file, ext ) return False def _addNewLevel( self, name, level_tree, scene_tree, resource_tree ): """Adds a new level using the specified level, scene and resource tree. The level directory is created, but the level xml files will not be saved immediately. """ dir_path = os.path.join( self._res_dir, STR_DIR_STUB, name ) if not os.path.isdir( dir_path ): os.mkdir( dir_path ) os.mkdir( os.path.join( dir_path, 'animations' ) ) os.mkdir( os.path.join( dir_path, 'fx' ) ) os.mkdir( os.path.join( dir_path, 'scripts' ) ) os.mkdir( os.path.join( dir_path, 'textures' ) ) os.mkdir( os.path.join( dir_path, 'sounds' ) ) # Creates and register the new level world = self.global_world.make_world( metawog.WORLD_LEVEL, name, LevelWorld, self, is_dirty = True ) treestoadd = [level_tree, scene_tree, resource_tree] world.add_tree( treestoadd ) self.models_by_name[name] = world self._levels.append( unicode( name ) ) self._levels.sort( key = unicode.lower ) self.__is_dirty = True class ThingWorld( metaworld.World, metaworldui.SelectedElementsTracker, metaworldui.ElementIssueTracker, metaworldui.UndoWorldTracker ): def __init__( self, universe, world_meta, name, game_model, is_dirty = False ): metaworld.World.__init__( self, universe, world_meta, name ) metaworldui.SelectedElementsTracker.__init__( self, self ) metaworldui.ElementIssueTracker.__init__( self, self ) metaworldui.UndoWorldTracker.__init__( self, self, 100 ) self.game_model = game_model @property def name( self ): return self.key class LevelWorld( ThingWorld ): def __init__( self, universe, world_meta, name, game_model, is_dirty = False ): ThingWorld.__init__( self, universe, world_meta, name, game_model, is_dirty = is_dirty ) self.__dirty_tracker = metaworldui.DirtyWorldTracker( self, is_dirty ) self._importError = None self._sceneissues = '' self._levelissues = '' self._resrcissues = '' self._globalissues = '' self._scene_issue_level = ISSUE_LEVEL_NONE self._level_issue_level = ISSUE_LEVEL_NONE self._resrc_issue_level = ISSUE_LEVEL_NONE self._global_issue_level = ISSUE_LEVEL_NONE self._view = None @property def level_root( self ): return self.find_tree( metawog.TREE_LEVEL_GAME ).root @property def scene_root( self ): return self.find_tree( metawog.TREE_LEVEL_SCENE ).root @property def resource_root( self ): return self.find_tree( metawog.TREE_LEVEL_RESOURCE ).root @property def is_dirty( self ): return self.__dirty_tracker.is_dirty @property def isReadOnly( self ): return self.name.lower() in metawog.LEVELS_ORIGINAL_LOWER @property def view( self ): return self._view def setView ( self, newview ): self._view = newview #@DaB - Issue checking used when saving the level def hasIssues ( self ): #Checks all 3 element trees for outstanding issues # Returns True if there are any. tIssue = ISSUE_LEVEL_NONE if self.element_issue_level( self.scene_root ): tIssue |= ISSUE_LEVEL_CRITICAL if self.element_issue_level( self.level_root ): tIssue |= ISSUE_LEVEL_CRITICAL if self.element_issue_level( self.resource_root ): tIssue |= ISSUE_LEVEL_CRITICAL #If we have a tree Issue.. don't perform the extra checks #because that can cause rt errors (because of the tree issues) #and then we don't see a popup. if tIssue == ISSUE_LEVEL_CRITICAL: #ensure old issues don't get redisplayed is we do "bail" here self._sceneissues = '' self._levelissues = '' self._resrcissues = '' self._globalissues = '' return tIssue if self.haslevel_issue(): tIssue |= self._level_issue_level if self.hasscene_issue(): tIssue |= self._scene_issue_level if self.hasresrc_issue(): tIssue |= self._resrc_issue_level if self.hasglobal_issue(): tIssue |= self._global_issue_level return tIssue def getIssues ( self ): #Get a 'report' of outstanding Issues #Used for Popup Message txtIssue = '' if self.element_issue_level( self.scene_root ): txtIssue = txtIssue + '<p>Scene Tree:<br>' + self.element_issue_report( self.scene_root ) + '</p>' if self.scene_issue_report != '': txtIssue += '<p>Scene Checks:<br>' + self.scene_issue_report + '</p>' if self.element_issue_level( self.level_root ): txtIssue = txtIssue + '<p>Level Tree:<br>' + self.element_issue_report( self.level_root ) + '</p>' if self.level_issue_report != '': txtIssue += '<p>Level Checks:<br>' + self.level_issue_report + '</p>' if self.element_issue_level( self.resource_root ): txtIssue = txtIssue + '<p>Resource Tree:<br>' + self.element_issue_report( self.resource_root ) + '</p>' if self.resrc_issue_report != '': txtIssue += '<p>Resource Checks:<br>' + self.resrc_issue_report + '</p>' if self.global_issue_report != '': txtIssue += '<p>Global Checks:<br>' + self.global_issue_report + '</p>' return txtIssue #@DaB Additional Checking Level,Scene,Resource (at tree level) def hasglobal_issue( self ): # check for issues across trees #if there's a levelexit it must be within the scene bounds self._globalissues = '' self._global_issue_level = ISSUE_LEVEL_NONE levelexit = self.level_root.find( 'levelexit' ) if levelexit is not None: exit_posx, exit_posy = levelexit.get_native( 'pos' ) minx, maxx = self.scene_root.get_native( 'minx' ), self.scene_root.get_native( 'maxx' ) miny, maxy = self.scene_root.get_native( 'miny' ), self.scene_root.get_native( 'maxy' ) if exit_posx > maxx or exit_posx < minx or exit_posy > maxy or exit_posy < miny: # exit outside scene bounds warning self.addGlobalError( 401, None ) return self._global_issue_level != ISSUE_LEVEL_NONE def haslevel_issue( self ): # rules for "DUMBASS" proofing (would normally use a much ruder word) root = self.level_root self._levelissues = '' self._level_issue_level = ISSUE_LEVEL_NONE normal_camera = False widescreen_camera = False #must have 1 normal camera and 1 widescreen camera for camera in root.findall( 'camera' ): c_aspect = camera.get( 'aspect' ) if c_aspect == 'normal': normal_camera = True elif c_aspect == 'widescreen': widescreen_camera = True #only Single poi travel time check if len( camera._children ) == 1: if camera._children[0].get_native( 'traveltime', 0 ) > 1: self.addLevelError( 101, c_aspect ) if not normal_camera: self.addLevelError( 102, None ) if not widescreen_camera: self.addLevelError( 103, None ) end_conditions = [] if len( end_conditions ) > 1: self.addLevelError( 111, ','.join( end_conditions ) ) return self._level_issue_level != ISSUE_LEVEL_NONE def addSceneError( self, error_num, subst ): error = errors.ERROR_INFO[error_num] self._scene_issue_level, self._sceneissues = self.addError( self._scene_issue_level, self._sceneissues, error, error_num, subst ) def addLevelError( self, error_num, subst ): error = errors.ERROR_INFO[error_num] self._level_issue_level, self._levelissues = self.addError( self._level_issue_level, self._levelissues, error, error_num, subst ) def addResourceError( self, error_num, subst ): error = errors.ERROR_INFO[error_num] self._resrc_issue_level, self._resrcissues = self.addError( self._resrc_issue_level, self._resrcissues, error, error_num, subst ) def addGlobalError( self, error_num, subst ): error = errors.ERROR_INFO[error_num] self._global_issue_level, self._globalissues = self.addError( self._global_issue_level, self._globalissues, error, error_num, subst ) def addError( self, err_level, err_message, error, error_num, err_subst ): err_level |= error[0] err_message += errors.ERROR_FRONT[error[0]] if err_subst is not None: err_message += error[1] % err_subst else: err_message += error[1] err_message += errors.ERROR_MORE_INFO % error_num err_message += "<br>" return err_level, err_message def hasscene_issue( self ): # TODO: check SceneLayer tiling applied to only pow2 textures #rules root = self.scene_root self._scene_issue_level = ISSUE_LEVEL_NONE self._sceneissues = '' #motor attached to static body motorbodys = set() for motor in root.findall( 'motor' ): motorbodys.add( motor.get( 'body' ) ) hingebodys = set() for hinge in root.findall( 'hinge' ): hingebodys.add( hinge.get( 'body1' ) ) body2 = hinge.get( 'body2', '' ) if body2 != '': hingebodys.add( hinge.get( 'body2' ) ) rotspeedbodys = set() geomitems = [] for geomitem in root.findall( 'rectangle' ): geomitems.append( geomitem ) for geomitem in root.findall( 'circle' ): geomitems.append( geomitem ) # # mass checks on rectangle and circles # for geomitem in geomitems: # geomstatic = geomitem.get_native( 'static', False ) # #static / masscheck! # if not geomstatic: # if geomitem.get_native( 'mass', 0 ) <= 0: # self.addSceneError( 1, geomitem.get( 'id', '' ) ) # # check on composite geoms geomchildren = set() for geomitem in root.findall( 'compositegeom' ): geomitems.append( geomitem ) # geomstatic = geomitem.get_native( 'static', False ) # if not geomstatic: # if geomitem.get_native( 'rotation', 0 ) != 0: # self.addSceneError( 2, geomitem.get( 'id', '' ) ) nchildren = 0 for geomchild in geomitem.getchildren(): nchildren += 1 geomchildren.add( geomchild.get( 'id', '' ) ) # if not geomstatic: # if geomchild.get_native( 'mass', 0.0 ) <= 0: # self.addSceneError( 3, ( geomitem.get( 'id', '' ), geomchild.get( 'id', '' ) ) ) # if geomchild.get( 'image' ): # self.addSceneError( 4, geomchild.get( 'id', '' ) ) # if nchildren == 0: # if not geomstatic: # self.addSceneError( 5, geomitem.get( 'id', '' ) ) # else: # self.addSceneError( 6, geomitem.get( 'id', '' ) ) # Get any radial forcefields.. ready for next check rfflist = {} for rff in root.findall( 'radialforcefield' ): rffid = rff.get( 'id', len( rfflist ) ) rfflist[rffid] = rff.get_native( 'center' ) # check on ALL geometry bodies # for geomitem in geomitems: # id = geomitem.get( 'id', '' ) # if geomitem.get_native( 'rotspeed', 0 ) != 0: # rotspeedbodys.add( id ) # geomstatic = geomitem.get_native( 'static', False ) # #static vs motor check # if geomstatic and id in motorbodys: # self.addSceneError( 7, id ) # # if not geomstatic: # gx, gy = geomitem.get_native( 'center', ( 0, 0 ) ) # for rffid, rffpos in rfflist.items(): # if abs( gx - rffpos[0] + gy - rffpos[1] ) < 0.001: # self.addSceneError( 8, ( id, rffid ) ) # finally some checks on unfixed spinning things spinning = motorbodys | rotspeedbodys spinningnohinge = spinning - hingebodys for body in spinningnohinge: self.addSceneError( 9, body ) hingedchildren = hingebodys & geomchildren for hingedchild in hingedchildren: self.addSceneError( 10, hingedchild ) #linearforcefield can have center but no size #but CANNOT have size, but no center for lff in root.findall( 'linearforcefield' ): if lff.get( 'size' ) is not None: if lff.get( 'center', '' ) == '': self.addSceneError( 11, lff.get( 'id', '' ) ) return self._scene_issue_level != ISSUE_LEVEL_NONE def _get_all_resource_ids( self, root, tag ): resource_ids = set() for resource in root.findall( './/' + tag ): resource_ids.add( resource.get( 'path' ) + resource.attribute_meta( 'path' ).strip_extension ) return resource_ids def _get_unused_resources( self ): used = self._get_used_resources() resources = self._get_all_resource_ids( self.resource_root, "Image" ) | self._get_all_resource_ids( self.resource_root, "Sound" ) unused = resources - used return unused def _remove_unused_resources( self, element, unused ): self.suspend_undo() to_remove = [] def _recursive_remove( element ): for attribute_meta in element.meta.attributes: if attribute_meta.type == metaworld.PATH_TYPE: if element.get( attribute_meta.name ) + attribute_meta.strip_extension in unused: to_remove.append( element ) for child in element: _recursive_remove( child ) _recursive_remove( element ) for element in to_remove: element.parent.remove( element ) self.activate_undo() def _get_used_resources( self ): used = set() #go through scene and level root #store the resource id of any that do for root in ( self.scene_root, self.level_root ): for element in root: for attribute_meta in element.meta.attributes: if attribute_meta.type == metaworld.PATH_TYPE: if element.get( attribute_meta.name ): used.add( element.get( attribute_meta.name ) + attribute_meta.strip_extension ) return used def hasresrc_issue( self ): root = self.resource_root self._resrcissues = '' self._resrc_issue_level = ISSUE_LEVEL_NONE # confirm every file referenced exists used_resources = self._get_used_resources() image_resources = set() for resource in root.findall( './/Image' ): image_resources.add( resource.get( 'path' ) ) full_filename = os.path.join( self.game_model._amy_dir, resource.get( 'path' ) + resource.attribute_meta( 'path' ).strip_extension ) if ON_PLATFORM == PLATFORM_WIN: #confirm extension on drive is lower case real_filename = getRealFilename( full_filename ) real_ext = os.path.splitext( real_filename )[1] if real_ext != ".png": self.addResourceError( 201, resource.get( 'path' ) + real_ext ) unused_images = image_resources.difference( used_resources ) if len( unused_images ) != 0: for unused in unused_images: self.addResourceError( 202, unused ) sound_resources = set() for resource in root.findall( './/Sound' ): sound_resources.add( resource.get( 'path' ) ) full_filename = os.path.join( self.game_model._amy_dir, resource.get( 'path' ) + ".ogg" ) if ON_PLATFORM == PLATFORM_WIN: #confirm extension on drive is lower case real_filename = getRealFilename( full_filename ) real_ext = os.path.splitext( real_filename )[1] if real_ext != ".ogg": self.addResourceError( 203, resource.get( 'path' ) + real_ext ) unused_sounds = sound_resources.difference( used_resources ) if len( unused_sounds ) != 0: for unused in unused_sounds: self.addResourceError( 204, unused ) return self._resrc_issue_level != ISSUE_LEVEL_NONE @property def scene_issue_report( self ): return self._sceneissues @property def level_issue_report( self ): return self._levelissues @property def resrc_issue_report( self ): return self._resrcissues @property def global_issue_report( self ): return self._globalissues def _isNumber( self, input ): try: f = float( input ) #@UnusedVariable return True except ValueError: return False def _cleanleveltree( self ): pass def _cleanscenetree( self ): self.suspend_undo() for hinge in self.scene_root.findall( 'hinge' ): self.scene_root.remove( hinge ) self.scene_root.append( hinge ) for motor in self.scene_root.findall( 'motor' ): self.scene_root.remove( motor ) self.scene_root.append( motor ) self.activate_undo() def _cleanresourcetree( self ): #removes any unused resources from the resource and text resource trees self.suspend_undo() root = self.resource_root #ensure cAsE sensitive path is stored in resource file #Only required on windows... #If path was not CaSe SenSitivE match on Linux / Mac would be File not found earlier if ON_PLATFORM == PLATFORM_WIN: for resource in root.findall( './/Image' ): full_filename = os.path.normpath( os.path.join( self.game_model._amy_dir, resource.get( 'path' ) + ".png" ) ) if os.path.exists( full_filename ): #confirm extension on drive is lower case len_wogdir = len( os.path.normpath( self.game_model._amy_dir ) ) + 1 real_filename = os.path.normpath( getRealFilename( full_filename ) ) real_file = os.path.splitext( real_filename )[0][len_wogdir:] full_file = os.path.splitext( full_filename )[0][len_wogdir:] if real_file != full_file: print "Correcting Path", resource.get( 'id' ), full_file, "-->", real_file resource.attribute_meta( 'path' ).set( resource, real_file ) for resource in root.findall( './/Sound' ): full_filename = os.path.normpath( os.path.join( self.game_model._amy_dir, resource.get( 'path' ) + ".ogg" ) ) if os.path.exists( full_filename ): #confirm extension on drive is lower case len_wogdir = len( os.path.normpath( self.game_model._amy_dir ) ) real_filename = os.path.normpath( getRealFilename( full_filename ) ) real_file = os.path.splitext( real_filename )[0][len_wogdir:] full_file = os.path.splitext( full_filename )[0][len_wogdir:] if real_file != full_file: print "Correcting Path", resource.get( 'id' ), full_file, "-->", real_file resource.attribute_meta( 'path' ).set( resource, real_file ) self.activate_undo() def saveModifiedElements( self ): """Save the modified scene, level, resource tree.""" if not self.isReadOnly: # Discards change made on read-only level name = self.name dir = os.path.join( self.game_model._res_dir, STR_DIR_STUB, name ) if not os.path.isdir( dir ): os.mkdir( dir ) os.mkdir( os.path.join( dir, 'animations' ) ) os.mkdir( os.path.join( dir, 'fx' ) ) os.mkdir( os.path.join( dir, 'scripts' ) ) os.mkdir( os.path.join( dir, 'textures' ) ) os.mkdir( os.path.join( dir, 'sounds' ) ) if self.__dirty_tracker.is_dirty_tree( metawog.TREE_LEVEL_GAME ): if not self.element_issue_level( self.level_root ): #clean tree caused an infinite loop when there was a missing ball # so only clean trees with no issues self._cleanleveltree() self.game_model._saveUnPackedTree( dir, name + '.level', self.level_root.tree ) if self.__dirty_tracker.is_dirty_tree( metawog.TREE_LEVEL_RESOURCE ): self.game_model._saveUnPackedTree( dir, name + '.resrc', self.resource_root.tree ) # ON Mac # Convert all "custom" png to .png.binltl # Only works with REAL PNG if ON_PLATFORM == PLATFORM_MAC: for image in self.resource_root.findall( './/Image' ): if not self.game_model._isOriginalFile( image.get( 'path' ), 'png' ): in_path = os.path.join( self.game_model._amy_dir, image.get( 'path' ) ) out_path = in_path + '.png.binltl' in_path += '.png' wogfile.png2pngbinltl( in_path, out_path ) if self.__dirty_tracker.is_dirty_tree( metawog.TREE_LEVEL_SCENE ): if not self.element_issue_level( self.scene_root ): # so only clean trees with no issues self._cleanscenetree() self.game_model._saveUnPackedTree( dir, name + '.scene', self.scene_root.tree ) self.__dirty_tracker.clean() def clean_dirty_tracker( self ): self.__dirty_tracker.clean() def getImagePixmap( self, image_id ): pixmap = self.game_model.pixmap_cache.get_pixmap( image_id ) if pixmap is None: print 'Warning: invalid image reference:|', image_id, '|' return pixmap def updateResources( self ): """Ensures all image/sound resource present in the level directory are in the resource tree. Adds new resource to the resource tree if required. """ game_dir = os.path.normpath( self.game_model._amy_dir ) dir = os.path.join( game_dir, 'Data', STR_DIR_STUB, self.name ) if not os.path.isdir( dir ): print 'Warning: level directory does not exist' return [] resource_element = self.resource_root.find( './/Resources' ) if resource_element is None: print 'Warning: root element not found in resource tree' return [] added_elements = [] for tag, extension, subfolder in ( ( 'Image', 'png', 'textures' ), ( 'Sound', 'ogg', 'sounds' ) ): known_paths = set() for element in self.resource_root.findall( './/' + tag ): path = os.path.normpath( os.path.splitext( element.get( 'path', '' ).lower() )[0] ) # known path are related to wog top dir in unix format & lower case without the file extension known_paths.add( path ) existing_paths = glob.glob( os.path.join( dir, subfolder, '*.' + extension ) ) for existing_path in existing_paths: existing_path = existing_path[len( game_dir ) + 1:] # makes path relative to top dir existing_path = os.path.splitext( existing_path )[0] # strip file extension path = os.path.normpath( existing_path ).lower() if path not in known_paths: resource_path = existing_path.replace( "\\", "/" ) meta_element = metawog.TREE_LEVEL_RESOURCE.find_element_meta_by_tag( tag ) new_resource = metaworld.Element( meta_element, {'path':resource_path} ) resource_element.append( new_resource ) added_elements.append( new_resource ) return added_elements #@DaB New Functionality - Import resources direct from files def importError( self ): return self._importError def importResources( self, importedfiles, res_dir ): """Import Resources direct from files into the level If files are located outside the Wog/res folder it copies them png -> Data/levels/{name}/textures ogg -> Data/levels/{name}/sounds """ self._importError = None res_dir = os.path.normpath( res_dir ) game_dir = os.path.split( res_dir )[0] resource_element = self.resource_root.find( './/Resources' ) if resource_element is None: print 'Warning: root element not found in resource tree' return [] all_local = True includesogg = False for file in importedfiles: file = os.path.normpath( file ) # "Are you Local?" # Check if the files were imported from outside the Res folder fileext = os.path.splitext( file )[1][1:4] if fileext.lower() == "ogg": includesogg = True if file[:len( res_dir )] != res_dir: all_local = False if not all_local and self.isReadOnly: self._importError = ["Cannot import external files...!", "You cannot import external files into the original levels.\nIf you really want to do this... Clone the level first!"] return [] if not all_local: level_path = os.path.join( res_dir, STR_DIR_STUB, self.name ) if not os.path.isdir( level_path ): os.mkdir( level_path ) os.mkdir( os.path.join( level_path, 'animations' ) ) os.mkdir( os.path.join( level_path, 'fx' ) ) os.mkdir( os.path.join( level_path, 'scripts' ) ) os.mkdir( os.path.join( level_path, 'textures' ) ) os.mkdir( os.path.join( level_path, 'sounds' ) ) if includesogg: #' confirm / create import folder' music_path = os.path.join( res_dir, STR_DIR_STUB, 'sounds', self.name ) if not os.path.isdir( music_path ): os.mkdir( music_path ) localfiles = [] resmap = {'png':( 'Image', 'textures' ), 'ogg':( 'Sound', 'sounds' )} for file in importedfiles: # "Are you Local?" fileext = os.path.splitext( file )[1][1:4] if file[:len( res_dir )] != res_dir: #@DaB - Ensure if the file is copied that it's new extension is always lower case fname = os.path.splitext( os.path.split( file )[1] )[0] fileext = fileext.lower() newfile = os.path.join( res_dir, STR_DIR_STUB, self.name, resmap[fileext][1], fname + "." + fileext ) copy2( file, newfile ) localfiles.append( newfile ) else: #@DaB - File Extension Capitalization Check if fileext != fileext.lower(): #Must be png or ogg to be compatible with LINUX and MAC self._importError = ["File Extension CAPITALIZATION Warning!", "To be compatible with Linux and Mac - All file extensions must be lower case.\nYou should rename the file below, and then import it again.\n\n" + file + " skipped!"] else: localfiles.append( file ) added_elements = [] known_paths = {'Image':set(), 'Sound':set()} for ext in resmap: for element in self.resource_root.findall( './/' + resmap[ext][0] ): path = os.path.normpath( os.path.splitext( element.get( 'path', '' ).lower() )[0] ) # known path are related to wog top dir in unix format & lower case without the file extension known_paths[resmap[ext][0]].add( path ) for file in localfiles: file = file[len( game_dir ) + 1:] # makes path relative to top dir filei = os.path.splitext( file ) path = os.path.normpath( filei[0] ).lower() ext = filei[1][1:4] if path not in known_paths[resmap[ext][0]]: resource_path = filei[0].replace( "\\", "/" ) meta_element = metawog.TREE_LEVEL_RESOURCE.find_element_meta_by_tag( resmap[ext][0] ) new_resource = metaworld.Element( meta_element, {'path':resource_path} ) resource_element.append( new_resource ) added_elements.append( new_resource ) return added_elements class MainWindow( QtGui.QMainWindow ): def __init__( self, parent = None ): QtGui.QMainWindow.__init__( self, parent ) self.setWindowIcon( QtGui.QIcon( ":/images/icon.png" ) ) self.setAttribute( Qt.WA_DeleteOnClose ) self.actionTimer = None self.statusTimer = None self._amy_path = None # Path to 'amy' executable self.recentfiles = None self.createMDIArea() self.createActions() self.createMenus() self.createToolBars() self.createStatusBar() self.createDockWindows() self.setWindowTitle( self.tr( "Amy In Da Farm! Editor" ) ) self._readSettings() self._game_model = None if self._amy_path: #Check that the stored path is still valid if not os.path.exists( self._amy_path ): self.changeAmyDir() else: self._reloadGameModel() else: # if amy_path is missing, prompt for it. self.changeAmyDir() def changeAmyDir( self ): amy_path = QtGui.QFileDialog.getOpenFileName( self, self.tr( 'Select Amy In Da Farm! program in the folder you want to edit' ), r'', self.tr( 'Amy In Da Farm! (Amy*)' ) ) if amy_path.isEmpty(): # user canceled action return self._amy_path = os.path.normpath( unicode( amy_path ) ) #print "_amy_path=",self._amy_path self._reloadGameModel() def _reloadGameModel( self ): try: self._game_model = GameModel( self._amy_path, self ) except GameModelException, e: QtGui.QMessageBox.warning( self, self.tr( "Loading Amy In Da Farm! levels (" + APP_NAME_PROPER + " " + CURRENT_VERSION + ")" ), unicode( e ) ) def _updateRecentFiles( self ): if self.recentFiles is None: numRecentFiles = 0 else: numRecentFiles = min( len( self.recentFiles ), MAXRECENTFILES ) for i in range( 0, numRecentFiles ): self.recentfile_actions[i].setText( self.recentFiles[i] ) self.recentfile_actions[i].setVisible( True ) for i in range( numRecentFiles, MAXRECENTFILES ): self.recentfile_actions[i].setVisible( False ) self.separatorRecent.setVisible( numRecentFiles > 0 ); def _setRecentFile( self, filename ): self.recentFiles.removeAll( filename ) self.recentFiles.prepend( filename ) if len( self.recentFiles ) > MAXRECENTFILES: self.recentFiles = self.recentFiles[:MAXRECENTFILES] self._updateRecentFiles() def on_recentfile_action( self ): action = self.sender() name = unicode( action.text() ) if self.open_level_view_by_name( name ): self._setRecentFile( name ) def editLevel( self ): if self._game_model: dialog = QtGui.QDialog() ui = editleveldialog.Ui_EditLevelDialog() ui.setupUi( dialog , set( self._game_model.names ), metawog.LEVELS_ORIGINAL ) if dialog.exec_() and ui.levelList.currentItem: settings = QtCore.QSettings() settings.beginGroup( "MainWindow" ) settings.setValue( "level_filter", ui.comboBox.currentIndex() ) settings.endGroup() name = unicode( ui.levelList.currentItem().text() ) if self.open_level_view_by_name( name ): self._setRecentFile( name ) def open_level_view_by_name( self, name ): try: world = self._game_model.selectLevel( name ) except GameModelException, e: QtGui.QMessageBox.warning( self, self.tr( "Failed to load level! (" + APP_NAME_PROPER + " " + CURRENT_VERSION + ")" ), unicode( e ) ) else: sub_window = self._findWorldMDIView( world ) if sub_window: self.mdiArea.setActiveSubWindow( sub_window ) else: self._addGraphicView( world ) return True return False def _addGraphicView( self, world ): """Adds a new MDI GraphicView window for the specified level.""" level_view = levelview.LevelGraphicView( world, self.view_actions, self.common_actions ) sub_window = self.mdiArea.addSubWindow( level_view ) self.connect( level_view, QtCore.SIGNAL( 'mouseMovedInScene(PyQt_PyObject,PyQt_PyObject)' ), self._updateMouseScenePosInStatusBar ) self.connect( sub_window, QtCore.SIGNAL( 'aboutToActivate()' ), level_view.selectLevelOnSubWindowActivation ) world.set_selection( world.scene_root ) world.setView( level_view ) level_view.show() def _updateMouseScenePosInStatusBar( self, x, y ): """Called whenever the mouse move in the LevelView.""" # Round displayed coordinate to 2dp (0.01) x = round( x, 2 ) y = -round( y, 2 ) # Reverse transformation done when mapping to scene (in Qt 0 = top, in WOG 0 = bottom) self._mousePositionLabel.setText( self.tr( 'x: %1 y: %2' ).arg( x ).arg( y ) ) def _findWorldMDIView( self, world ): """Search for an existing MDI window for level name. Return the LevelGraphicView widget, or None if not found.""" for window in self.mdiArea.subWindowList(): sub_window = window.widget() if sub_window.world == world: return window return None def get_active_view( self ): """Returns the view of the active MDI window. Returns None if no view is active. """ window = self.mdiArea.activeSubWindow() if window: return window.widget() return None def getCurrentModel( self ): """Returns the level model of the active MDI window.""" window = self.mdiArea.activeSubWindow() if window: return window.widget().getModel() return None #@DaB - New save routines to save ONLY the current Level def saveCurrent( self ): if self._game_model: model = self.getCurrentModel() if model is not None: if model.isReadOnly: if model.is_dirty: QtGui.QMessageBox.warning( self, self.tr( "Can not save Amy In Da Farm! standard levels!" ), self.tr( 'You can not save changes made to levels that come with Amy In Da Farm!.\n' 'Instead, clone the level using the "Clone selected level" tool.\n' 'Do so now, or your change will be lost once you quit the editor' ) ) return False return True else: #Check for issues try: model.saveModifiedElements() self.statusBar().showMessage( self.tr( "Saved " + model.name ), 2000 ) return True except ( IOError, OSError ), e: QtGui.QMessageBox.warning( self, self.tr( "Failed saving levels (" + APP_NAME_PROPER + " " + CURRENT_VERSION + ")" ), unicode( e ) ) return False def saveIT( self ): if self.saveCurrent(): QtGui.QApplication.setOverrideCursor( Qt.WaitCursor ) model = self.getCurrentModel() issue_level = model.hasIssues() QtGui.QApplication.restoreOverrideCursor() if issue_level >= ISSUE_LEVEL_WARNING: txtIssue = self.tr( """<p>There are unresolved issues with this level that may cause problems.<br> You should fix these before you try to play or make a goomod.</p>""" ) txtIssue = txtIssue + self.tr( model.getIssues() ) txtIssue = txtIssue + self.tr( '<br>The level has been saved!' ) QtGui.QMessageBox.warning( self, self.tr( "This level has issues!" ), txtIssue ) def saveAndPlayLevel( self ): #@DaB only save current level, and don't "play" if it has "Issues" if self.saveCurrent(): model = self.getCurrentModel() if model: issue_level = model.hasIssues() if issue_level >= ISSUE_LEVEL_CRITICAL: txtIssue = self.tr( """<p>There are CRITICAL issues with this level that will cause World of Goo to crash.<br> You must fix these before you try to play the level.</p>""" ) txtIssue = txtIssue + self.tr( model.getIssues() ) txtIssue = txtIssue + self.tr( '<br>The level has been saved!' ) QtGui.QMessageBox.warning( self, self.tr( "This level has CRITICAL issues!" ), txtIssue ) elif issue_level > ISSUE_LEVEL_NONE: txtIssue = self.tr( """<p>There are Advice/Warnings for this level that may cause problems.<br> You should fix these before you try to play the level.</p>""" ) txtIssue = txtIssue + self.tr( model.getIssues() ) txtIssue = txtIssue + self.tr( '<br>Click OK to Play anyway, or click Cancel to go back.' ) ret = QtGui.QMessageBox.warning( self, self.tr( "This level has warnings!" ), txtIssue, QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel ) if ret == QtGui.QMessageBox.Ok: self._game_model.playLevel( model ) else: self._game_model.playLevel( model ) else: self.statusBar().showMessage( self.tr( "You must select a level to play" ), 2000 ) def newLevel( self ): """Creates a new blank level.""" new_name = self._pickNewName( is_cloning = False ) if new_name: try: self._game_model.newLevel( new_name ) world = self._game_model.selectLevel( new_name ) self._addGraphicView( world ) except ( IOError, OSError ), e: QtGui.QMessageBox.warning( self, self.tr( "Failed to create the new level! (" + APP_NAME_PROPER + " " + CURRENT_VERSION + ")" ), unicode( e ) ) def _pickNewName( self, is_cloning = False ): if self._game_model: dialog = QtGui.QDialog() ui = newleveldialog_ui.Ui_NewLevelDialog() ui.setupUi( dialog ) reg_ex = QtCore.QRegExp( '[A-Za-z][0-9A-Za-z_][0-9A-Za-z_]+' ) validator = QtGui.QRegExpValidator( reg_ex, dialog ) ui.levelName.setValidator( validator ) if is_cloning: dialog.setWindowTitle( tr( "NewLevelDialog", "Cloning Level" ) ) if dialog.exec_(): new_name = str( ui.levelName.text() ) existing_names = [name.lower() for name in self._game_model.names] if new_name.lower() not in existing_names: return new_name QtGui.QMessageBox.warning( self, self.tr( "Can not create level!" ), self.tr( "There is already a level named '%1'" ).arg( new_name ) ) return None def cloneLevel( self ): """Clone the selected level.""" current_model = self.getCurrentModel() if current_model: new_name = self._pickNewName( is_cloning = True ) if new_name: try: self._game_model.cloneLevel( current_model.name, new_name ) world = self._game_model.selectLevel( new_name ) self._addGraphicView( world ) self._setRecentFile( new_name ) except ( IOError, OSError ), e: QtGui.QMessageBox.warning( self, self.tr( "Failed to create the new cloned level! (" + APP_NAME_PROPER + " " + CURRENT_VERSION + ")" ), unicode( e ) ) def updateResources( self ): """Adds the required resource in the level based on existing file.""" model = self.getCurrentModel() if model: model.game_model.pixmap_cache.refresh() added_resource_elements = model.updateResources() if added_resource_elements: model.set_selection( added_resource_elements ) model._view.refreshFromModel() def cleanResources( self ): model = self.getCurrentModel() if model: unused = model._get_unused_resources() unusedlist = '' for id in unused: unusedlist += id + '\n' if unusedlist != '': unusedlist = "The following resources are unused\n" + unusedlist + "\nAre you sure you want to remove them?" ret = QtGui.QMessageBox.warning( self, self.tr( "Remove unused resources" ), unusedlist, QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel ) if ret == QtGui.QMessageBox.Ok: model._remove_unused_resources( model.resource_root, unused ) else: QtGui.QMessageBox.warning( self, self.tr( "Remove unused resources" ), self.tr( "There are no unused resources\n" ) ) def importResources( self ): """Adds the required resource in the level based on existing file.""" model = self.getCurrentModel() if model: #game_dir = os.path.normpath( os.path.split( self._amy_path )[0] ) #res_dir = os.path.join( game_dir, 'res' ) dir = os.path.join( self._game_model._res_dir, STR_DIR_STUB ) files = QtGui.QFileDialog.getOpenFileNames( self, self.tr( 'Select the Images to import...' ), dir, self.tr( 'Images (*.png)' ) ) if files.isEmpty(): # user canceled action return safefiles = [] for file in files: safefiles.append( os.path.normpath( str( file ) ) ) added_resource_elements = model.importResources( safefiles, self._game_model._res_dir ) if added_resource_elements: model.set_selection( added_resource_elements ) else: ie = model.importError() if ie is not None: QtGui.QMessageBox.warning( self, self.tr( ie[0] ), self.tr( ie[1] ) ) def about( self ): QtGui.QMessageBox.about( self, self.tr( "About Amy In Da Farm! Level Editor " + CURRENT_VERSION ), self.tr( """<p>Amy In Da Farm! Level Editor helps you create new levels for Amy In Da Farm!.<p> <p>Developer Page, Sources and Reference Guide:<br> <a href="http://github.com/reven86/dfg-amy-editor">http://github.com/reven86/dfg-amy-editor</a></p> <p>Copyright 2010, Andrew Karpushin &lt;andrew.karpushin at dreamfarmgames.com&gt;</p> <p>&nbsp;<br>Original based on World Of Goo Level Editor (WooGLE) by DaftasBrush: (v0.77)</p> <p>Copyright 2010, DaftasBrush<br> <a href="http://goofans.com/download/utility/world-of-goo-level-editor">http://goofans.com/download/utility/world-of-goo-level-editor</a></p> <p>&nbsp;<br>Original Sourceforge project: (v0.5) <a href="http://www.sourceforge.net/projects/wogedit">http://www.sourceforge.net/projects/wogedit</a><br> Copyright 2008-2009, NitroZark &lt;nitrozark at users.sourceforget.net&gt;</p>""" ) ) def on_cut_action( self ): elements = self.on_copy_action( is_cut_action = True ) if elements: for element in elements: if element.meta.read_only: #Messagebox QtGui.QMessageBox.warning( self, self.tr( "Cannot Cut read only element!" ), self.tr( 'This element is read only.\n' 'It cannot be cut' ) ) return self.on_delete_action( is_cut_action = True ) self.statusBar().showMessage( self.tr( 'Element "%s" cut to clipboard' % elements[0].tag ), 1000 ) def on_copy_action( self, is_cut_action = False ): world = self.getCurrentModel() if world: elements = list( world.selected_elements ) on_clipboard = set() clipboard_element = xml.etree.ElementTree._ElementInterface( 'WooGLEClipboard', {} ) for element in elements: on_clipboard.add( element.tag ) xml_data = element.to_xml_with_meta() clipboard_element.append( xml.etree.ElementTree.fromstring( xml_data ) ) clipboard = QtGui.QApplication.clipboard() if len( on_clipboard ) == 1: clipboard_element.set( 'type', list( on_clipboard )[0] ) else: clipboard_element.set( 'type', "Various" ) scene = self.get_active_view().scene() # bounding rect of selected items i = 0 for item in scene.selectedItems(): if i == 0: brect = item.mapToScene( item.boundingRect() ).boundingRect() mybrect = [brect.left(), brect.right(), brect.bottom(), brect.top()] else: brect = item.mapToScene( item.boundingRect() ).boundingRect() if brect.left() < mybrect[0]: mybrect[0] = brect.left() if brect.right() > mybrect[1]: mybrect[1] = brect.right() if brect.bottom() < mybrect[2]: mybrect[2] = brect.bottom() if brect.top() > mybrect[3]: mybrect[3] = brect.top() i += 1 clipboard_element.set( 'posx', str( ( mybrect[0] + mybrect[1] ) * 0.5 ) ) clipboard_element.set( 'posy', str( -( mybrect[2] + mybrect[3] ) * 0.5 ) ) xml_data = xml.etree.ElementTree.tostring( clipboard_element, 'utf-8' ) clipboard.setText( xml_data ) if not is_cut_action: self.statusBar().showMessage( self.tr( '%d Element "%s" copied to clipboard' % ( len( elements ), clipboard_element.get( 'type' ) ) ), 1000 ) self.common_actions['paste'].setText( "Paste In Place (" + clipboard_element.get( 'type' ) + ")" ) self.common_actions['pastehere'].setText( "Paste Here (" + clipboard_element.get( 'type' ) + ")" ) return elements def on_pasteto_action( self ): clipboard = QtGui.QApplication.clipboard() xml_data = unicode( clipboard.text() ) world = self.getCurrentModel() if world is None or not xml_data: return clipboard_element = xml.etree.ElementTree.fromstring( xml_data ) view = self.get_active_view() paste_posx, paste_posy = view._last_pos.x(), -view._last_pos.y() copy_posx, copy_posy = float( clipboard_element.get( 'posx', 0 ) ), float( clipboard_element.get( 'posy', 0 ) ) pasted_elements = [] for clip_child in clipboard_element.getchildren(): xml_data = xml.etree.ElementTree.tostring( clip_child, 'utf-8' ) for element in [tree.root for tree in world.trees]: child_elements = element.make_detached_child_from_xml( xml_data ) if child_elements: pasted_elements.extend( child_elements ) for child_element in child_elements: # find the pos attribute in the meta # set it to view._last_release_at pos_attribute = self._getPositionAttribute( child_element ) if pos_attribute is not None: old_pos = pos_attribute.get_native( child_element, ( 0, 0 ) ) if clipboard_element.__len__() == 1: pos_attribute.set_native( child_element, [view._last_pos.x(), -view._last_pos.y()] ) else: pos_attribute.set_native( child_element, [old_pos[0] + paste_posx - copy_posx, old_pos[1] + paste_posy - copy_posy] ) element.safe_identifier_insert( len( element ), child_element ) break if len( pasted_elements ) >= 1: world.set_selection( pasted_elements ) def _getPositionAttribute( self, element ): for attribute_meta in element.meta.attributes: if attribute_meta.type == metaworld.XY_TYPE: if attribute_meta.position: return attribute_meta return None def on_paste_action( self ): clipboard = QtGui.QApplication.clipboard() xml_data = unicode( clipboard.text() ) world = self.getCurrentModel() if world is None or not xml_data: return elements = list( world.selected_elements ) if len( elements ) == 0: # Allow pasting to root when no selection elements = [tree.root for tree in world.trees] # Try to paste in one of the selected elements. Stop when succeed clipboard_element = xml.etree.ElementTree.fromstring( xml_data ) pasted_elements = [] for clip_child in clipboard_element.getchildren(): xml_data = xml.etree.ElementTree.tostring( clip_child, 'utf-8' ) for element in elements: while element is not None: child_elements = element.make_detached_child_from_xml( xml_data ) if child_elements: for child_element in child_elements: element.safe_identifier_insert( len( element ), child_element ) pasted_elements.extend( child_elements ) break element = element.parent if len( pasted_elements ) >= 1: element.world.set_selection( pasted_elements ) def on_undo_action( self ): world = self.getCurrentModel() if world is None: return world.undo() def on_redo_action( self ): world = self.getCurrentModel() if world is None: return world.redo() def on_delete_action( self, is_cut_action = False ): world = self.getCurrentModel() if world is None: return deleted_elements = [] previous_element = None for element in list( world.selected_elements ): if element.meta.read_only: #messagebox QtGui.QMessageBox.warning( self, self.tr( "Cannot delete read only element!" ), self.tr( 'This element is read only.\n' 'It cannot be deleted' ) ) return 0 elif not element.is_root(): if element.previous_element() not in list( world.selected_elements ): previous_element = element.previous_element() deleted_elements.append( element.tag ) element.parent.remove( element ) if is_cut_action: return len( deleted_elements ) if deleted_elements: self.statusBar().showMessage( self.tr( 'Deleted %d element(s)' % len( deleted_elements ) ), 1000 ) world.set_selection( previous_element ) def _on_view_tool_actived( self, tool_name ): active_view = self.get_active_view() if active_view is not None: active_view.tool_activated( tool_name ) def on_pan_tool_action( self ): self._on_view_tool_actived( levelview.TOOL_PAN ) def on_move_tool_action( self ): self._on_view_tool_actived( levelview.TOOL_MOVE ) def onRefreshAction( self ): """Called multiple time per second. Used to refresh enabled flags of actions.""" has_amy_dir = self._game_model is not None #@DaB - Now that save and "save and play" only act on the # current level it's better if that toolbars buttons # change state based on the current level, rather than all levels currentModel = self.getCurrentModel() is_selected = currentModel is not None can_select = is_selected and self.view_actions[levelview.TOOL_MOVE].isChecked() if is_selected: can_save = has_amy_dir and currentModel.is_dirty element_is_selected = can_select and len( currentModel.selected_elements ) > 0 can_import = is_selected and not currentModel.isReadOnly can_undo = currentModel.can_undo can_redo = currentModel.can_redo if currentModel.is_dirty: if currentModel.isReadOnly: self.mdiArea.activeSubWindow().setWindowIcon( QtGui.QIcon ( ':/images/nosave.png' ) ) else: self.mdiArea.activeSubWindow().setWindowIcon( QtGui.QIcon ( ':/images/dirty.png' ) ) else: self.mdiArea.activeSubWindow().setWindowIcon( QtGui.QIcon ( ':/images/clean.png' ) ) else: can_save = False element_is_selected = False can_import = False can_undo = False can_redo = False self.editLevelAction.setEnabled( has_amy_dir ) self.newLevelAction.setEnabled( has_amy_dir ) self.cloneLevelAction.setEnabled( is_selected ) self.saveAction.setEnabled( can_save and True or False ) self.playAction.setEnabled( is_selected ) #Edit Menu / ToolBar self.common_actions['cut'].setEnabled ( element_is_selected ) self.common_actions['copy'].setEnabled ( element_is_selected ) self.common_actions['paste'].setEnabled ( is_selected ) self.common_actions['delete'].setEnabled ( element_is_selected ) self.undoAction.setEnabled ( can_undo ) self.redoAction.setEnabled ( can_redo ) #Resources self.importResourcesAction.setEnabled ( can_import ) self.cleanResourcesAction.setEnabled ( can_import ) self.updateResourcesAction.setEnabled( can_import ) self.addItemToolBar.setEnabled( can_select ) self.showhideToolBar.setEnabled( is_selected ) active_view = self.get_active_view() enabled_view_tools = set() if active_view: enabled_view_tools = active_view.get_enabled_view_tools() for name, action in self.view_actions.iteritems(): is_enabled = name in enabled_view_tools action.setEnabled( is_enabled ) if self.view_action_group.checkedAction() is None: self.view_actions[levelview.TOOL_MOVE].setChecked( True ) def _on_refresh_element_status( self ): # broadcast the event to all ElementIssueTracker louie.send_minimal( metaworldui.RefreshElementIssues ) def createMDIArea( self ): self.mdiArea = QtGui.QMdiArea() self.mdiArea.setViewMode( QtGui.QMdiArea.TabbedView ) for thing in self.mdiArea.findChildren( QtGui.QTabBar ): thing.setTabsClosable( True ) self.connect ( thing, QtCore.SIGNAL( "tabCloseRequested(int)" ), self.on_closeTab ) self.setCentralWidget( self.mdiArea ) def on_closeTab( self, index ): sub = self.mdiArea.subWindowList()[index] sub.close() def createActions( self ): self.changeAmyDirAction = qthelper.action( self, handler = self.changeAmyDir, icon = ":/images/open.png", text = "&Change Amy In Da Farm! directory...", shortcut = QtGui.QKeySequence.Open, status_tip = "Change Amy In Da Farm! top-directory" ) self.editLevelAction = qthelper.action( self, handler = self.editLevel, icon = ":/images/icon-amy-level.png", text = "&Edit existing level...", shortcut = "Ctrl+L", status_tip = "Select a level to edit" ) self.newLevelAction = qthelper.action( self, handler = self.newLevel, icon = ":/images/icon-amy-new-level2.png", text = "&New level...", shortcut = QtGui.QKeySequence.New, status_tip = "Creates a new level" ) self.cloneLevelAction = qthelper.action( self, handler = self.cloneLevel, icon = ":/images/icon-amy-clone-level.png", text = "&Clone selected level...", shortcut = "Ctrl+D", status_tip = "Clone the selected level" ) self.saveAction = qthelper.action( self, handler = self.saveIT, icon = ":/images/save.png", text = "&Save...", shortcut = QtGui.QKeySequence.Save, status_tip = "Saves the Level" ) self.playAction = qthelper.action( self, handler = self.saveAndPlayLevel, icon = ":/images/play.png", text = "&Save and play Level...", shortcut = "Ctrl+P", status_tip = "Save and play the selected level" ) self.updateResourcesAction = qthelper.action( self, handler = self.updateResources, icon = ":/images/update-level-resources.png", text = "&Update level resources...", shortcut = "Ctrl+U", status_tip = "Adds automatically all .png & .ogg files in the level directory to the level resources" ) self.cleanResourcesAction = qthelper.action( self, handler = self.cleanResources, icon = ":/images/cleanres.png", text = "&Clean Resources", status_tip = "Removes any unused resource from the level." ) self.importResourcesAction = qthelper.action( self, handler = self.importResources, icon = ":/images/importres.png", text = "&Import images...", shortcut = "Ctrl+I", status_tip = "Adds images (png) to the level resources" ) self.quitAct = qthelper.action( self, handler = self.close, text = "&Quit", shortcut = "Ctrl+Q", status_tip = "Quit the application" ) self.aboutAct = qthelper.action( self, handler = self.about, icon = ":/images/icon.png", text = "&About", status_tip = "Show the application's About box" ) self.recentfile_actions = [qthelper.action( self, handler = self.on_recentfile_action, visible = False ) for i in range( 0, MAXRECENTFILES )] #@UnusedVariable self.common_actions = { 'cut': qthelper.action( self, handler = self.on_cut_action, icon = ":/images/cut.png", text = "Cu&t", shortcut = QtGui.QKeySequence.Cut ), 'copy': qthelper.action( self, handler = self.on_copy_action, icon = ":/images/copy.png", text = "&Copy", shortcut = QtGui.QKeySequence.Copy ), 'paste': qthelper.action( self, handler = self.on_paste_action, icon = ":/images/paste.png", text = "Paste &In Place", shortcut = "Ctrl+Shift+V" ), 'pastehere': qthelper.action( self, handler = self.on_pasteto_action, icon = ":/images/paste.png", text = "&Paste Here", shortcut = QtGui.QKeySequence.Paste ), 'delete': qthelper.action( self, handler = self.on_delete_action, icon = ":/images/delete.png", text = "&Delete", shortcut = QtGui.QKeySequence.Delete ) } self.undoAction = qthelper.action( self, handler = self.on_undo_action, icon = ":/images/undo.png", text = "&Undo", shortcut = QtGui.QKeySequence.Undo ) self.redoAction = qthelper.action( self, handler = self.on_redo_action, icon = ":/images/redo.png", text = "&Redo", shortcut = QtGui.QKeySequence.Redo ) class ShowHideFactory( object ): def __init__( self, window, elements ): self.window = window self.elements = elements def __call__( self ): lv = self.window.get_active_view() if lv is not None: for elementtype in self.elements: currentstate = lv.get_element_state( elementtype ) newstate = 2 - currentstate lv.set_element_state( elementtype, newstate ) lv.refreshFromModel() self.showhide_actions = { 'camera': qthelper.action( self, handler = ShowHideFactory( self , ['camera', 'poi'] ), text = "Show/Hide Camera" , icon = ":/images/show-camera.png" ), 'fields': qthelper.action( self, handler = ShowHideFactory( self , ['linearforcefield', 'radialforcefield'] ), text = "Show/Hide Forcefields", icon = ":/images/show-physic.png" ), 'geom': qthelper.action( self, handler = ShowHideFactory( self , ['rectangle', 'circle', 'compositegeom', 'levelexit', 'line', 'hinge'] ), text = "Show/Hide Geometry" , icon = ":/images/show-geom.png" ), 'gfx': qthelper.action( self, handler = ShowHideFactory( self , ['scenelayer', 'pixmap'] ), text = "Show/Hide Graphics" , icon = ":/images/show-gfx.png" ), 'labels': qthelper.action( self, handler = ShowHideFactory( self , ['label'] ), text = "Show/Hide Labels" , icon = ":/images/show-label.png" ) } self.view_action_group = QtGui.QActionGroup( self ) self.view_actions = { levelview.TOOL_PAN: qthelper.action( self, handler = self.on_pan_tool_action, icon = ":/images/zoom.png", text = "&Zoom and Pan view (F)", shortcut = 'F', checkable = True ), levelview.TOOL_MOVE: qthelper.action( self, handler = self.on_move_tool_action, icon = ":/images/tool-move.png", text = "&Select, Move and Resize", shortcut = 'T', checked = True, checkable = True ) } for action in self.view_actions.itervalues(): self.view_action_group.addAction( action ) self.additem_actions = { 'line':qthelper.action( self, handler = AddItemFactory( self, 'scene', 'line', {} ), icon = ":/images/addline.png", text = "&Add a Line" ), 'rectangle':qthelper.action( self, handler = AddItemFactory( self, 'scene', 'rectangle', {} ), icon = ":/images/addrect.png", text = "&Add Rectangle" ), 'circle': qthelper.action( self, handler = AddItemFactory( self, 'scene', 'circle', {} ), icon = ":/images/addcircle.png", text = "&Add Circle" ), 'image': qthelper.action( self, handler = AddItemFactory( self, 'scene', 'scenelayer', {} ), icon = ":/images/group-image.png", text = "&Add Image (SceneLayer)" ), 'compgeom': qthelper.action( self, handler = AddItemFactory( self, 'scene', 'compositegeom', {} ), icon = ":/images/compgeom.png", text = "&Add Composite Geometry (Parent)" ), 'childrect':qthelper.action( self, handler = AddItemFactory( self, 'compositegeom', 'rectangle', {} ), icon = ":/images/childrect.png", text = "&Add Child Rectangle" ), 'childcircle':qthelper.action( self, handler = AddItemFactory( self, 'compositegeom', 'circle', {} ), icon = ":/images/childcircle.png", text = "&Add Child Circle" ), 'hinge': qthelper.action( self, handler = AddItemFactory( self, 'scene', 'hinge', {} ), icon = ":/images/hinge.png", text = "&Add Hinge" ), 'lff': qthelper.action( self, handler = AddItemFactory( self, 'scene', 'linearforcefield', {'size':'100,100'} ), icon = ":/images/lff.png", text = "&Add Linear force Field" ), 'rff': qthelper.action( self, handler = AddItemFactory( self, 'scene', 'radialforcefield', {} ), icon = ":/images/rff.png", text = "&Add Radial force Field" ), 'label': qthelper.action( self, handler = AddItemFactory( self, 'scene', 'label', {} ), icon = ":/images/label.png", text = "&Add Label" ) } self.actionTimer = QtCore.QTimer( self ) self.connect( self.actionTimer, QtCore.SIGNAL( "timeout()" ), self.onRefreshAction ) self.actionTimer.start( 250 ) # Refresh action enabled flag every 250ms. self.statusTimer = QtCore.QTimer( self ) self.connect( self.statusTimer, QtCore.SIGNAL( "timeout()" ), self._on_refresh_element_status ) self.statusTimer.start( 300 ) # Refresh element status every 300ms. def createMenus( self ): self.fileMenu = self.menuBar().addMenu( self.tr( "&File" ) ) self.fileMenu.addAction( self.newLevelAction ) self.fileMenu.addAction( self.editLevelAction ) self.fileMenu.addAction( self.cloneLevelAction ) self.fileMenu.addAction( self.saveAction ) self.fileMenu.addAction( self.playAction ) self.fileMenu.addSeparator() self.fileMenu.addAction( self.changeAmyDirAction ) self.separatorRecent = self.fileMenu.addSeparator() for recentaction in self.recentfile_actions: self.fileMenu.addAction( recentaction ) self.fileMenu.addSeparator() self.fileMenu.addAction( self.quitAct ) self.editMenu = self.menuBar().addMenu( self.tr( "&Edit" ) ) self.editMenu.addAction( self.undoAction ) self.editMenu.addAction( self.redoAction ) self.editMenu.addSeparator() self.editMenu.addAction( self.common_actions['cut'] ) self.editMenu.addAction( self.common_actions['copy'] ) self.editMenu.addAction( self.common_actions['paste'] ) self.editMenu.addAction( self.common_actions['pastehere'] ) self.editMenu.addSeparator() self.editMenu.addAction( self.common_actions['delete'] ) self.menuBar().addSeparator() self.resourceMenu = self.menuBar().addMenu( self.tr( "&Resources" ) ) self.resourceMenu.addAction( self.updateResourcesAction ) self.resourceMenu.addAction( self.importResourcesAction ) self.resourceMenu.addSeparator() self.resourceMenu.addAction( self.cleanResourcesAction ) self.resourceMenu.addSeparator() self.menuBar().addSeparator() # @todo add Windows menu. Take MDI example as model. self.helpMenu = self.menuBar().addMenu( self.tr( "&Help" ) ) self.helpMenu.addAction( self.aboutAct ) def createToolBars( self ): self.fileToolBar = self.addToolBar( self.tr( "File" ) ) self.fileToolBar.setObjectName( "fileToolbar" ) # self.fileToolBar.addAction(self.changeAmyDirAction) self.fileToolBar.addAction( self.newLevelAction ) self.fileToolBar.addAction( self.editLevelAction ) self.fileToolBar.addAction( self.cloneLevelAction ) self.fileToolBar.addSeparator() self.fileToolBar.addAction( self.saveAction ) self.fileToolBar.addAction( self.playAction ) self.fileToolBar.addSeparator() self.editToolbar = self.addToolBar( self.tr( "Edit" ) ) self.editToolbar.setObjectName( "editToolbar" ) self.editToolbar.addAction( self.undoAction ) self.editToolbar.addAction( self.redoAction ) self.editToolbar.addSeparator() self.editToolbar.addAction( self.common_actions['cut'] ) self.editToolbar.addAction( self.common_actions['copy'] ) self.editToolbar.addAction( self.common_actions['paste'] ) self.editToolbar.addSeparator() self.editToolbar.addAction( self.common_actions['delete'] ) self.resourceToolBar = self.addToolBar( self.tr( "Resources" ) ) self.resourceToolBar.setObjectName( "resourceToolbar" ) self.resourceToolBar.addAction( self.updateResourcesAction ) self.resourceToolBar.addAction( self.importResourcesAction ) self.resourceToolBar.addSeparator() self.resourceToolBar.addAction( self.cleanResourcesAction ) self.resourceToolBar.addSeparator() self.levelViewToolBar = self.addToolBar( self.tr( "Level View" ) ) self.levelViewToolBar.setObjectName( "levelViewToolbar" ) for name in ( 'move', 'pan' ): action = self.view_actions[name] self.levelViewToolBar.addAction( action ) self.addItemToolBar = QtGui.QToolBar( self.tr( "Add Item" ) ) self.addItemToolBar.setObjectName( "addItemToolbar" ) self.addToolBar( Qt.LeftToolBarArea, self.addItemToolBar ) additem_action_list = ['line', 'rectangle', 'circle', 'image', 'compgeom', 'childrect', 'childcircle', 'hinge', 'sep1', 'lff', 'rff', 'sep2', 'label' ] for name in additem_action_list: if name not in self.additem_actions: self.addItemToolBar.addSeparator() else: self.addItemToolBar.addAction( self.additem_actions[name] ) self.showhideToolBar = self.addToolBar( self.tr( "Show/Hide" ) ) self.showhideToolBar.setObjectName( "showhideToolbar" ) for elementtype in ( 'camera', 'fields', 'geom', 'gfx', 'labels' ): self.showhideToolBar.addAction( self.showhide_actions[elementtype] ) def createStatusBar( self ): self.statusBar().showMessage( self.tr( "Ready" ) ) self._mousePositionLabel = QtGui.QLabel() self.statusBar().addPermanentWidget( self._mousePositionLabel ) def createElementTreeView( self, name, tree_meta, sibling_tabbed_dock = None ): dock = QtGui.QDockWidget( self.tr( name ), self ) dock.setObjectName( name + '_tab' ) dock.setAllowedAreas( Qt.RightDockWidgetArea ) element_tree_view = metatreeui.MetaWorldTreeView( self.common_actions, self.group_icons, dock ) tree_model = metatreeui.MetaWorldTreeModel( tree_meta, self.group_icons, element_tree_view ) element_tree_view.setModel( tree_model ) dock.setWidget( element_tree_view ) self.addDockWidget( Qt.RightDockWidgetArea, dock ) if sibling_tabbed_dock: # Stacks the dock widget together self.tabifyDockWidget( sibling_tabbed_dock, dock ) dock.setFeatures( QtGui.QDockWidget.NoDockWidgetFeatures ) self.tree_view_by_element_world[tree_meta] = element_tree_view return dock, element_tree_view def createDockWindows( self ): self.group_icons = {} for group in 'camera game image physic resource shape text info material rect circle compgeom line anim'.split(): self.group_icons[group] = QtGui.QIcon( ":/images/group-%s.png" % group ) self.tree_view_by_element_world = {} # map of all tree views scene_dock, self.sceneTree = self.createElementTreeView( 'Scene', metawog.TREE_LEVEL_SCENE ) level_dock, self.levelTree = self.createElementTreeView( 'Level', metawog.TREE_LEVEL_GAME, scene_dock ) resource_dock, self.levelResourceTree = self.createElementTreeView( 'Resource', #@UnusedVariable metawog.TREE_LEVEL_RESOURCE, level_dock ) scene_dock.raise_() # Makes the scene the default active tab dock = QtGui.QDockWidget( self.tr( "Properties" ), self ) dock.setAllowedAreas( Qt.RightDockWidgetArea ) dock.setFeatures( QtGui.QDockWidget.NoDockWidgetFeatures ) dock.setObjectName( 'properties' ) self.propertiesList = metaelementui.MetaWorldPropertyListView( self.statusBar(), dock ) self.propertiesListModel = metaelementui.MetaWorldPropertyListModel( 0, 2, self.propertiesList ) # nb rows, nb cols self.propertiesList.setModel( self.propertiesListModel ) dock.setWidget( self.propertiesList ) self.addDockWidget( Qt.RightDockWidgetArea, dock ) def _readSettings( self ): """Reads setting from previous session & restore window state.""" settings = QtCore.QSettings() settings.beginGroup( "MainWindow" ) self._amy_path = unicode( settings.value( "amy_path", QtCore.QVariant( u'' ) ).toString() ) if self._amy_path == u'.': self._amy_path = u'' elif self._amy_path != u'': self._amy_path = os.path.normpath( self._amy_path ) if settings.value( "wasMaximized", False ).toBool(): self.showMaximized() else: self.resize( settings.value( "size", QtCore.QVariant( QtCore.QSize( 640, 480 ) ) ).toSize() ) self.move( settings.value( "pos", QtCore.QVariant( QtCore.QPoint( 200, 200 ) ) ).toPoint() ) windowstate = settings.value( "windowState", None ); if windowstate is not None: self.restoreState( windowstate.toByteArray() ) self.recentFiles = settings.value( "recent_files" ).toStringList() self._updateRecentFiles() settings.endGroup() def _writeSettings( self ): """Persists the session window state for future restoration.""" # Settings should be stored in HKEY_CURRENT_USER\Software\WOGCorp\WOG Editor settings = QtCore.QSettings() #@todo makes helper to avoid QVariant conversions settings.beginGroup( "MainWindow" ) settings.setValue( "amy_path", QtCore.QVariant( QtCore.QString( self._amy_path or u'' ) ) ) settings.setValue( "wasMaximized", QtCore.QVariant( self.isMaximized() ) ) settings.setValue( "size", QtCore.QVariant( self.size() ) ) settings.setValue( "pos", QtCore.QVariant( self.pos() ) ) settings.setValue( "windowState", self.saveState() ) settings.setValue( "recent_files", self.recentFiles ) settings.endGroup() def closeEvent( self, event ): """Called when user close the main window.""" #@todo check if user really want to quit for subwin in self.mdiArea.subWindowList(): if not subwin.close(): event.ignore() return self._writeSettings() self.actionTimer.stop self.statusTimer.stop QtGui.QMainWindow.closeEvent( self, event ) event.accept() if __name__ == "__main__": app = QtGui.QApplication( sys.argv ) # Set keys for settings app.setOrganizationName( "DreamFarmGames" ) app.setOrganizationDomain( "dreamfarmgames.com" ) app.setApplicationName( "Amy In Da Farm! Editor" ) if LOG_TO_FILE: saveout = sys.stdout saveerr = sys.stderr fout = open( APP_NAME_LOWER + '.log', 'a' ) sys.stdout = fout sys.stderr = fout print "" print "------------------------------------------------------" print APP_NAME_PROPER + " started ", datetime.now(), "File Logging Enabled" mainwindow = MainWindow() mainwindow.show() appex = app.exec_() if LOG_TO_FILE: sys.stdout = saveout sys.stderr = saveerr fout.close() sys.exit( appex )
codeparrot/github-code-clean
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base Estimator class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import os import tempfile import numpy as np import six from google.protobuf import message from tensorflow.core.framework import summary_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.client import session as tf_session from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.estimator import run_config from tensorflow.python.estimator.export import export as export_helpers from tensorflow.python.estimator.export import export_output from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import metrics as metrics_lib from tensorflow.python.ops import resources from tensorflow.python.ops import variables from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model import constants from tensorflow.python.summary import summary from tensorflow.python.summary.writer import writer_cache from tensorflow.python.training import device_setter from tensorflow.python.training import distribute as distribute_lib from tensorflow.python.training import evaluation from tensorflow.python.training import monitored_session from tensorflow.python.training import saver from tensorflow.python.training import training from tensorflow.python.training import training_util from tensorflow.python.training import warm_starting_util from tensorflow.python.util import compat from tensorflow.python.util import compat_internal from tensorflow.python.util import function_utils from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export _VALID_MODEL_FN_ARGS = set( ['features', 'labels', 'mode', 'params', 'self', 'config']) @tf_export('estimator.Estimator') class Estimator(object): """Estimator class to train and evaluate TensorFlow models. The `Estimator` object wraps a model which is specified by a `model_fn`, which, given inputs and a number of other parameters, returns the ops necessary to perform training, evaluation, or predictions. All outputs (checkpoints, event files, etc.) are written to `model_dir`, or a subdirectory thereof. If `model_dir` is not set, a temporary directory is used. The `config` argument can be passed `RunConfig` object containing information about the execution environment. It is passed on to the `model_fn`, if the `model_fn` has a parameter named "config" (and input functions in the same manner). If the `config` parameter is not passed, it is instantiated by the `Estimator`. Not passing config means that defaults useful for local execution are used. `Estimator` makes config available to the model (for instance, to allow specialization based on the number of workers available), and also uses some of its fields to control internals, especially regarding checkpointing. The `params` argument contains hyperparameters. It is passed to the `model_fn`, if the `model_fn` has a parameter named "params", and to the input functions in the same manner. `Estimator` only passes params along, it does not inspect it. The structure of `params` is therefore entirely up to the developer. None of `Estimator`'s methods can be overridden in subclasses (its constructor enforces this). Subclasses should use `model_fn` to configure the base class, and may add methods implementing specialized functionality. """ def __init__(self, model_fn, model_dir=None, config=None, params=None, warm_start_from=None): """Constructs an `Estimator` instance. See @{$estimators} for more information. To warm-start an `Estimator`: ```python estimator = tf.estimator.DNNClassifier( feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb], hidden_units=[1024, 512, 256], warm_start_from="/path/to/checkpoint/dir") ``` For more details on warm-start configuration, see @{tf.estimator.WarmStartSettings$WarmStartSettings}. Args: model_fn: Model function. Follows the signature: * Args: * `features`: This is the first item returned from the `input_fn` passed to `train`, `evaluate`, and `predict`. This should be a single `Tensor` or `dict` of same. * `labels`: This is the second item returned from the `input_fn` passed to `train`, `evaluate`, and `predict`. This should be a single `Tensor` or `dict` of same (for multi-head models). If mode is `ModeKeys.PREDICT`, `labels=None` will be passed. If the `model_fn`'s signature does not accept `mode`, the `model_fn` must still be able to handle `labels=None`. * `mode`: Optional. Specifies if this training, evaluation or prediction. See `ModeKeys`. * `params`: Optional `dict` of hyperparameters. Will receive what is passed to Estimator in `params` parameter. This allows to configure Estimators from hyper parameter tuning. * `config`: Optional configuration object. Will receive what is passed to Estimator in `config` parameter, or the default `config`. Allows updating things in your `model_fn` based on configuration such as `num_ps_replicas`, or `model_dir`. * Returns: `EstimatorSpec` model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. If `PathLike` object, the path will be resolved. If `None`, the model_dir in `config` will be used if set. If both are set, they must be same. If both are `None`, a temporary directory will be used. config: Configuration object. params: `dict` of hyper parameters that will be passed into `model_fn`. Keys are names of parameters, values are basic python types. warm_start_from: Optional string filepath to a checkpoint or SavedModel to warm-start from, or a `tf.estimator.WarmStartSettings` object to fully configure warm-starting. If the string filepath is provided instead of a `WarmStartSettings`, then all variables are warm-started, and it is assumed that vocabularies and Tensor names are unchanged. Raises: ValueError: parameters of `model_fn` don't match `params`. ValueError: if this is called via a subclass and if that class overrides a member of `Estimator`. """ Estimator._assert_members_are_not_overridden(self) if config is None: self._config = run_config.RunConfig() logging.info('Using default config.') else: if not isinstance(config, run_config.RunConfig): raise ValueError( 'config must be an instance of RunConfig, but provided %s.' % config) self._config = config # The distribute field contains an instance of DistributionStrategy. self._distribution = self._config.train_distribute # Model directory. model_dir = compat_internal.path_to_str(model_dir) if (model_dir is not None) and (self._config.model_dir is not None): if model_dir != self._config.model_dir: # TODO(alanyee): remove this suppression after it is no longer needed # pylint: disable=g-doc-exception raise ValueError( "model_dir are set both in constructor and RunConfig, but with " "different values. In constructor: '{}', in RunConfig: " "'{}' ".format(model_dir, self._config.model_dir)) # pylint: enable=g-doc-exception self._model_dir = model_dir or self._config.model_dir if self._model_dir is None: self._model_dir = tempfile.mkdtemp() logging.warning('Using temporary folder as model directory: %s', self._model_dir) if self._config.model_dir is None: self._config = self._config.replace(model_dir=self._model_dir) logging.info('Using config: %s', str(vars(self._config))) if self._config.session_config is None: rewrite_opts = rewriter_config_pb2.RewriterConfig( meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE) graph_opts = config_pb2.GraphOptions(rewrite_options=rewrite_opts) self._session_config = config_pb2.ConfigProto( allow_soft_placement=True, graph_options=graph_opts) else: self._session_config = self._config.session_config self._device_fn = ( self._config.device_fn or _get_replica_device_setter(self._config)) if model_fn is None: raise ValueError('model_fn must be provided to Estimator.') _verify_model_fn_args(model_fn, params) self._model_fn = model_fn self._params = copy.deepcopy(params or {}) # pylint: disable=protected-access self._warm_start_settings = _get_default_warm_start_settings( warm_start_from) # pylint: enable=protected-access @property def model_dir(self): return self._model_dir @property def config(self): return copy.deepcopy(self._config) @property def params(self): return copy.deepcopy(self._params) @property def model_fn(self): """Returns the model_fn which is bound to self.params. Returns: The model_fn with following signature: `def model_fn(features, labels, mode, config)` """ def public_model_fn(features, labels, mode, config): return self._call_model_fn(features, labels, mode, config) return public_model_fn # TODO(ispir): support a list of names def get_variable_value(self, name): """Returns value of the variable given by name. Args: name: string or a list of string, name of the tensor. Returns: Numpy array - value of the tensor. Raises: ValueError: If the Estimator has not produced a checkpoint yet. """ _check_checkpoint_available(self.model_dir) with context.graph_mode(): return training.load_variable(self.model_dir, name) def get_variable_names(self): """Returns list of all variable names in this model. Returns: List of names. Raises: ValueError: If the Estimator has not produced a checkpoint yet. """ _check_checkpoint_available(self.model_dir) with context.graph_mode(): return [name for name, _ in training.list_variables(self.model_dir)] def latest_checkpoint(self): """Finds the filename of latest saved checkpoint file in `model_dir`. Returns: The full path to the latest checkpoint or `None` if no checkpoint was found. """ with context.graph_mode(): return saver.latest_checkpoint(self.model_dir) def train(self, input_fn, hooks=None, steps=None, max_steps=None, saving_listeners=None): """Trains a model given training data input_fn. Args: input_fn: A function that provides input data for training as minibatches. See @{$premade_estimators#create_input_functions} for more information. The function should construct and return one of the following: * A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a tuple (features, labels) with same constraints as below. * A tuple (features, labels): Where `features` is a `Tensor` or a dictionary of string feature name to `Tensor` and `labels` is a `Tensor` or a dictionary of string label name to `Tensor`. Both `features` and `labels` are consumed by `model_fn`. They should satisfy the expectation of `model_fn` from inputs. hooks: List of `SessionRunHook` subclass instances. Used for callbacks inside the training loop. steps: Number of steps for which to train model. If `None`, train forever or train until input_fn generates the `OutOfRange` error or `StopIteration` exception. 'steps' works incrementally. If you call two times train(steps=10) then training occurs in total 20 steps. If `OutOfRange` or `StopIteration` occurs in the middle, training stops before 20 steps. If you don't want to have incremental behavior please set `max_steps` instead. If set, `max_steps` must be `None`. max_steps: Number of total steps for which to train model. If `None`, train forever or train until input_fn generates the `OutOfRange` error or `StopIteration` exception. If set, `steps` must be `None`. If `OutOfRange` or `StopIteration` occurs in the middle, training stops before `max_steps` steps. Two calls to `train(steps=100)` means 200 training iterations. On the other hand, two calls to `train(max_steps=100)` means that the second call will not do any iteration since first call did all 100 steps. saving_listeners: list of `CheckpointSaverListener` objects. Used for callbacks that run immediately before or after checkpoint savings. Returns: `self`, for chaining. Raises: ValueError: If both `steps` and `max_steps` are not `None`. ValueError: If either `steps` or `max_steps` is <= 0. """ with context.graph_mode(): if (steps is not None) and (max_steps is not None): raise ValueError('Can not provide both steps and max_steps.') if steps is not None and steps <= 0: raise ValueError('Must specify steps > 0, given: {}'.format(steps)) if max_steps is not None and max_steps <= 0: raise ValueError( 'Must specify max_steps > 0, given: {}'.format(max_steps)) if max_steps is not None: start_step = _load_global_step_from_checkpoint_dir(self._model_dir) if max_steps <= start_step: logging.info('Skipping training since max_steps has already saved.') return self hooks = _check_hooks_type(hooks) hooks.extend(self._convert_train_steps_to_hooks(steps, max_steps)) saving_listeners = _check_listeners_type(saving_listeners) loss = self._train_model(input_fn, hooks, saving_listeners) logging.info('Loss for final step: %s.', loss) return self def _convert_train_steps_to_hooks(self, steps, max_steps): if steps is not None or max_steps is not None: return [training.StopAtStepHook(steps, max_steps)] else: return [] def eval_dir(self, name=None): """Shows directory name where evaluation metrics are dumped. Args: name: Name of the evaluation if user needs to run multiple evaluations on different data sets, such as on training data vs test data. Metrics for different evaluations are saved in separate folders, and appear separately in tensorboard. Returns: A string which is the path of directory contains evaluation metrics. """ return os.path.join(self._model_dir, 'eval' if not name else 'eval_' + name) def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None, name=None): """Evaluates the model given evaluation data input_fn. For each step, calls `input_fn`, which returns one batch of data. Evaluates until: - `steps` batches are processed, or - `input_fn` raises an end-of-input exception (`OutOfRangeError` or `StopIteration`). Args: input_fn: A function that constructs the input data for evaluation. See @{$premade_estimators#create_input_functions} for more information. The function should construct and return one of the following: * A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a tuple (features, labels) with same constraints as below. * A tuple (features, labels): Where `features` is a `Tensor` or a dictionary of string feature name to `Tensor` and `labels` is a `Tensor` or a dictionary of string label name to `Tensor`. Both `features` and `labels` are consumed by `model_fn`. They should satisfy the expectation of `model_fn` from inputs. steps: Number of steps for which to evaluate model. If `None`, evaluates until `input_fn` raises an end-of-input exception. hooks: List of `SessionRunHook` subclass instances. Used for callbacks inside the evaluation call. checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the latest checkpoint in `model_dir` is used. If there are no checkpoints in `model_dir`, evaluation is run with newly initialized `Variables` instead of restored from checkpoint. name: Name of the evaluation if user needs to run multiple evaluations on different data sets, such as on training data vs test data. Metrics for different evaluations are saved in separate folders, and appear separately in tensorboard. Returns: A dict containing the evaluation metrics specified in `model_fn` keyed by name, as well as an entry `global_step` which contains the value of the global step for which this evaluation was performed. Raises: ValueError: If `steps <= 0`. ValueError: If no model has been trained, namely `model_dir`, or the given `checkpoint_path` is empty. """ with context.graph_mode(): hooks = _check_hooks_type(hooks) hooks.extend(self._convert_eval_steps_to_hooks(steps)) # Check that model has been trained (if nothing has been set explicitly). if not checkpoint_path: latest_path = saver.latest_checkpoint(self._model_dir) if not latest_path: logging.info('Could not find trained model in model_dir: {}, running ' 'initialization to evaluate.'.format(self._model_dir)) checkpoint_path = latest_path with ops.Graph().as_default(): (scaffold, update_op, eval_dict, all_hooks) = self._evaluate_build_graph( input_fn, hooks, checkpoint_path) return self._evaluate_run( checkpoint_path=checkpoint_path, scaffold=scaffold, update_op=update_op, eval_dict=eval_dict, all_hooks=all_hooks, output_dir=self.eval_dir(name)) def _convert_eval_steps_to_hooks(self, steps): if steps is None: return [] if steps <= 0: raise ValueError('Must specify steps > 0, given: {}'.format(steps)) return [evaluation._StopAfterNEvalsHook(num_evals=steps)] # pylint: disable=protected-access def predict(self, input_fn, predict_keys=None, hooks=None, checkpoint_path=None, yield_single_examples=True): """Yields predictions for given features. Args: input_fn: A function that constructs the features. Prediction continues until `input_fn` raises an end-of-input exception (`OutOfRangeError` or `StopIteration`). See @{$premade_estimators#create_input_functions} for more information. The function should construct and return one of the following: * A 'tf.data.Dataset' object: Outputs of `Dataset` object must have same constraints as below. * features: A `Tensor` or a dictionary of string feature name to `Tensor`. features are consumed by `model_fn`. They should satisfy the expectation of `model_fn` from inputs. * A tuple, in which case the first item is extracted as features. predict_keys: list of `str`, name of the keys to predict. It is used if the `EstimatorSpec.predictions` is a `dict`. If `predict_keys` is used then rest of the predictions will be filtered from the dictionary. If `None`, returns all. hooks: List of `SessionRunHook` subclass instances. Used for callbacks inside the prediction call. checkpoint_path: Path of a specific checkpoint to predict. If `None`, the latest checkpoint in `model_dir` is used. If there are no checkpoints in `model_dir`, prediction is run with newly initialized `Variables` instead of restored from checkpoint. yield_single_examples: If False, yield the whole batch as returned by the `model_fn` instead of decomposing the batch into individual elements. This is useful if `model_fn` returns some tensors whose first dimension is not equal to the batch size. Yields: Evaluated values of `predictions` tensors. Raises: ValueError: Could not find a trained model in `model_dir`. ValueError: If batch length of predictions is not the same and `yield_single_examples` is True. ValueError: If there is a conflict between `predict_keys` and `predictions`. For example if `predict_keys` is not `None` but `EstimatorSpec.predictions` is not a `dict`. """ with context.graph_mode(): hooks = _check_hooks_type(hooks) # Check that model has been trained. if not checkpoint_path: checkpoint_path = saver.latest_checkpoint(self._model_dir) if not checkpoint_path: logging.info('Could not find trained model in model_dir: {}, running ' 'initialization to predict.'.format(self._model_dir)) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) self._create_and_assert_global_step(g) features, input_hooks = self._get_features_from_input_fn( input_fn, model_fn_lib.ModeKeys.PREDICT) estimator_spec = self._call_model_fn( features, None, model_fn_lib.ModeKeys.PREDICT, self.config) # Call to warm_start has to be after model_fn is called. self._maybe_warm_start(checkpoint_path) predictions = self._extract_keys( estimator_spec.predictions, predict_keys) all_hooks = list(input_hooks) all_hooks.extend(hooks) all_hooks.extend(list(estimator_spec.prediction_hooks or [])) with training.MonitoredSession( session_creator=training.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, master=self._config.master, scaffold=estimator_spec.scaffold, config=self._session_config), hooks=all_hooks) as mon_sess: while not mon_sess.should_stop(): preds_evaluated = mon_sess.run(predictions) if not yield_single_examples: yield preds_evaluated elif not isinstance(predictions, dict): for pred in preds_evaluated: yield pred else: for i in range(self._extract_batch_length(preds_evaluated)): yield { key: value[i] for key, value in six.iteritems(preds_evaluated) } def _assert_members_are_not_overridden(self): """Asserts members of `Estimator` are not overridden.""" allowed_overrides = set([ '_call_input_fn', '_create_global_step', '_convert_train_steps_to_hooks', '_convert_eval_steps_to_hooks', '_tf_api_names', '_validate_features_in_predict_input', '_call_model_fn', '_add_meta_graph_for_mode' ]) estimator_members = set([m for m in Estimator.__dict__.keys() if not m.startswith('__')]) subclass_members = set(self.__class__.__dict__.keys()) common_members = estimator_members & subclass_members - allowed_overrides overridden_members = [ m for m in common_members if Estimator.__dict__[m] != self.__class__.__dict__[m]] if overridden_members: raise ValueError( 'Subclasses of Estimator cannot override members of Estimator. ' '{} does override {}'.format(self.__class__, overridden_members)) def export_savedmodel( self, export_dir_base, serving_input_receiver_fn, assets_extra=None, as_text=False, checkpoint_path=None, strip_default_attrs=False): # pylint: disable=line-too-long """Exports inference graph as a SavedModel into given dir. For a detailed guide, see @{$saved_model#using_savedmodel_with_estimators$Using SavedModel with Estimators}. This method builds a new graph by first calling the serving_input_receiver_fn to obtain feature `Tensor`s, and then calling this `Estimator`'s model_fn to generate the model graph based on those features. It restores the given checkpoint (or, lacking that, the most recent checkpoint) into this graph in a fresh session. Finally it creates a timestamped export directory below the given export_dir_base, and writes a `SavedModel` into it containing a single `MetaGraphDef` saved from this session. The exported `MetaGraphDef` will provide one `SignatureDef` for each element of the export_outputs dict returned from the model_fn, named using the same keys. One of these keys is always signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, indicating which signature will be served when a serving request does not specify one. For each signature, the outputs are provided by the corresponding `ExportOutput`s, and the inputs are always the input receivers provided by the serving_input_receiver_fn. Extra assets may be written into the SavedModel via the assets_extra argument. This should be a dict, where each key gives a destination path (including the filename) relative to the assets.extra directory. The corresponding value gives the full path of the source file to be copied. For example, the simple case of copying a single file without renaming it is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`. Args: export_dir_base: A string containing a directory in which to create timestamped subdirectories containing exported SavedModels. serving_input_receiver_fn: A function that takes no argument and returns a `ServingInputReceiver` or `TensorServingInputReceiver`. assets_extra: A dict specifying how to populate the assets.extra directory within the exported SavedModel, or `None` if no extra assets are needed. as_text: whether to write the SavedModel proto in text format. checkpoint_path: The checkpoint path to export. If `None` (the default), the most recent checkpoint found within the model directory is chosen. strip_default_attrs: Boolean. If `True`, default-valued attributes will be removed from the NodeDefs. For a detailed guide, see [Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes). Returns: The string path to the exported directory. Raises: ValueError: if no serving_input_receiver_fn is provided, no export_outputs are provided, or no checkpoint can be found. """ # pylint: enable=line-too-long return self._export_saved_model_for_mode( export_dir_base, serving_input_receiver_fn, assets_extra=assets_extra, as_text=as_text, checkpoint_path=checkpoint_path, strip_default_attrs=strip_default_attrs, mode=model_fn_lib.ModeKeys.PREDICT) def _export_saved_model_for_mode( self, export_dir_base, input_receiver_fn, assets_extra=None, as_text=False, checkpoint_path=None, strip_default_attrs=False, mode=model_fn_lib.ModeKeys.PREDICT): # pylint: disable=line-too-long """Exports a single train/eval/predict graph as a SavedModel. This method is a wrapper for _export_all_saved_models, and wraps a raw input_receiver_fn in a dictionary to pass in to that function. See _export_all_saved_models for full docs. See tf.contrib.estimator.export_saved_model_for_mode for the currently exposed version of this function. Args: export_dir_base: A string containing a directory in which to create timestamped subdirectories containing exported SavedModels. input_receiver_fn: a function that takes no argument and returns the appropriate subclass of `InputReceiver`. assets_extra: A dict specifying how to populate the assets.extra directory within the exported SavedModel, or `None` if no extra assets are needed. as_text: whether to write the SavedModel proto in text format. checkpoint_path: The checkpoint path to export. If `None` (the default), the most recent checkpoint found within the model directory is chosen. strip_default_attrs: Boolean. If `True`, default-valued attributes will be removed from the NodeDefs. For a detailed guide, see [Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes). mode: tf.estimator.ModeKeys value indicating with mode will be exported. Returns: The string path to the exported directory. Raises: ValueError: if input_receiver_fn is None, no export_outputs are provided, or no checkpoint can be found. """ # pylint: enable=line-too-long if not input_receiver_fn: raise ValueError('An input_receiver_fn must be defined.') input_receiver_fn_map = {mode: input_receiver_fn} return self._export_all_saved_models( export_dir_base, input_receiver_fn_map, assets_extra=assets_extra, as_text=as_text, checkpoint_path=checkpoint_path, strip_default_attrs=strip_default_attrs) def _export_all_saved_models( self, export_dir_base, input_receiver_fn_map, assets_extra=None, as_text=False, checkpoint_path=None, strip_default_attrs=False): # pylint: disable=line-too-long """Exports a SavedModel containing MetaGraphDefs for each requested mode. See tf.contrib.estimator.export_all_saved_models for the currently exposed version of this function. For each mode passed in via the input_receiver_fn_map, this method builds a new graph by calling the input_receiver_fn to obtain feature and label `Tensor`s. Next, this method calls the `Estimator`'s model_fn in the passed mode to generate the model graph based on those features and labels, and restores the given checkpoint (or, lacking that, the most recent checkpoint) into the graph. Only one of the modes is used for saving variables to the SavedModel (order of preference: TRAIN, EVAL, then PREDICT), such that up to three MetaGraphDefs are saved with a single set of variables in a single SavedModel directory. For the variables and MetaGraphDefs, a timestamped export directory below export_dir_base, and writes a `SavedModel` into it containing the `MetaGraphDef` for the given mode and its associated signatures. For prediction, the exported `MetaGraphDef` will provide one `SignatureDef` for each element of the export_outputs dict returned from the model_fn, named using the same keys. One of these keys is always signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, indicating which signature will be served when a serving request does not specify one. For each signature, the outputs are provided by the corresponding `ExportOutput`s, and the inputs are always the input receivers provided by the serving_input_receiver_fn. For training and evaluation, the train_op is stored in an extra collection, and loss, metrics, and predictions are included in a SignatureDef for the mode in question. Extra assets may be written into the SavedModel via the assets_extra argument. This should be a dict, where each key gives a destination path (including the filename) relative to the assets.extra directory. The corresponding value gives the full path of the source file to be copied. For example, the simple case of copying a single file without renaming it is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`. Args: export_dir_base: A string containing a directory in which to create timestamped subdirectories containing exported SavedModels. input_receiver_fn_map: dict of tf.estimator.ModeKeys to input_receiver_fn mappings, where the input_receiver_fn is a function that takes no argument and returns the appropriate subclass of `InputReceiver`. assets_extra: A dict specifying how to populate the assets.extra directory within the exported SavedModel, or `None` if no extra assets are needed. as_text: whether to write the SavedModel proto in text format. checkpoint_path: The checkpoint path to export. If `None` (the default), the most recent checkpoint found within the model directory is chosen. strip_default_attrs: Boolean. If `True`, default-valued attributes will be removed from the NodeDefs. For a detailed guide, see [Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes). Returns: A dict of tf.estimator.ModeKeys value to string path for each exported directory. Raises: ValueError: if any input_receiver_fn is None, no export_outputs are provided, or no checkpoint can be found. """ # pylint: enable=line-too-long # TODO(b/65561022): Consider allowing multiple input_receiver_fns per mode. with context.graph_mode(): if not checkpoint_path: # Locate the latest checkpoint checkpoint_path = saver.latest_checkpoint(self._model_dir) if not checkpoint_path: raise ValueError("Couldn't find trained model at %s." % self._model_dir) export_dir = export_helpers.get_timestamped_export_dir(export_dir_base) temp_export_dir = export_helpers.get_temp_export_dir(export_dir) builder = saved_model_builder.SavedModelBuilder(temp_export_dir) save_variables = True # Note that the order in which we run here matters, as the first # mode we pass through will be used to save the variables. We run TRAIN # first, as that is also the mode used for checkpoints, and therefore # we are not likely to have vars in PREDICT that are not in the checkpoint # created by TRAIN. if input_receiver_fn_map.get(model_fn_lib.ModeKeys.TRAIN): self._add_meta_graph_for_mode( builder, input_receiver_fn_map, checkpoint_path, strip_default_attrs, save_variables, mode=model_fn_lib.ModeKeys.TRAIN) save_variables = False if input_receiver_fn_map.get(model_fn_lib.ModeKeys.EVAL): self._add_meta_graph_for_mode( builder, input_receiver_fn_map, checkpoint_path, strip_default_attrs, save_variables, mode=model_fn_lib.ModeKeys.EVAL) save_variables = False if input_receiver_fn_map.get(model_fn_lib.ModeKeys.PREDICT): self._add_meta_graph_for_mode( builder, input_receiver_fn_map, checkpoint_path, strip_default_attrs, save_variables, mode=model_fn_lib.ModeKeys.PREDICT) save_variables = False if save_variables: raise ValueError('No valid modes for exporting found. Got {}.'.format( input_receiver_fn_map.keys())) builder.save(as_text) # Add the extra assets if assets_extra: assets_extra_path = os.path.join(compat.as_bytes(temp_export_dir), compat.as_bytes('assets.extra')) for dest_relative, source in assets_extra.items(): dest_absolute = os.path.join(compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative)) dest_path = os.path.dirname(dest_absolute) gfile.MakeDirs(dest_path) gfile.Copy(source, dest_absolute) gfile.Rename(temp_export_dir, export_dir) return export_dir def _add_meta_graph_for_mode(self, builder, input_receiver_fn_map, checkpoint_path, strip_default_attrs, save_variables=True, mode=model_fn_lib.ModeKeys.PREDICT, export_tags=None): # pylint: disable=line-too-long """Loads variables and adds them along with a MetaGraphDef for saving. Args: builder: instance of SavedModelBuilder that will be used for saving. input_receiver_fn_map: dict of tf.estimator.ModeKeys to input_receiver_fn mappings, where the input_receiver_fn is a function that takes no argument and returns the appropriate subclass of `InputReceiver`. checkpoint_path: The checkpoint path to export. If `None` (the default), the most recent checkpoint found within the model directory is chosen. strip_default_attrs: Boolean. If `True`, default-valued attributes will be removed from the NodeDefs. For a detailed guide, see [Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes). save_variables: bool, whether variables should be saved. If False, just the MetaGraphDef will be saved. Note that save_variables should only be True for the first call to this function, and the SavedModelBuilder will raise an error if that is not the case. mode: tf.estimator.ModeKeys value indicating which mode will be exported. export_tags: The set of tags with which to save `MetaGraphDef`. If None, a default set will be selected to matched the passed mode. """ # pylint: enable=line-too-long if export_tags is None: export_tags = model_fn_lib.EXPORT_TAG_MAP[mode] input_receiver_fn = input_receiver_fn_map[mode] with ops.Graph().as_default() as g: self._create_and_assert_global_step(g) random_seed.set_random_seed(self._config.tf_random_seed) input_receiver = input_receiver_fn() # Call the model_fn and collect the export_outputs. estimator_spec = self._call_model_fn( features=input_receiver.features, labels=getattr(input_receiver, 'labels', None), mode=mode, config=self.config) export_outputs = self._get_export_outputs_for_spec(estimator_spec) # Build the SignatureDefs from receivers and all outputs signature_def_map = export_helpers.build_all_signature_defs( input_receiver.receiver_tensors, export_outputs, getattr(input_receiver, 'receiver_tensors_alternatives', None), serving_only=(mode == model_fn_lib.ModeKeys.PREDICT)) with tf_session.Session(config=self._session_config) as session: local_init_op = ( estimator_spec.scaffold.local_init_op or monitored_session.Scaffold.default_local_init_op()) saver_for_restore = estimator_spec.scaffold.saver or saver.Saver( sharded=True) try: saver_for_restore.restore(session, checkpoint_path) except errors.NotFoundError as e: msg = ('Could not load all requested variables from the checkpoint. ' 'Please make sure your model_fn does not expect variables ' 'that were not saved in the checkpoint.\n\n' 'Encountered error with mode `{}` while restoring checkpoint ' 'from: `{}`. Full Traceback:\n\n{}').format( mode, checkpoint_path, e) raise ValueError(msg) # We add the train op explicitly for now, so that we don't have to # change the Builder public interface. Note that this is a no-op # for prediction, where train_op is None. builder._add_train_op(estimator_spec.train_op) # pylint: disable=protected-access meta_graph_kwargs = dict( tags=export_tags, signature_def_map=signature_def_map, assets_collection=ops.get_collection( ops.GraphKeys.ASSET_FILEPATHS), strip_default_attrs=strip_default_attrs, legacy_init_op=local_init_op) if save_variables: builder.add_meta_graph_and_variables( session, **meta_graph_kwargs) else: builder.add_meta_graph(**meta_graph_kwargs) def _get_export_outputs_for_spec(self, estimator_spec): """Given an EstimatorSpec, determine what our export outputs should be. EstimatorSpecs contain export_outputs that are used for serving, but for training and eval graphs, we must wrap the tensors of interest in appropriate ExportOutput objects. Args: estimator_spec: EstimatorSpec object that will be exported. Returns: a dict mapping export_output_name to ExportOutput object. Raises: ValueError: if an appropriate ExportOutput cannot be found for the passed EstimatorSpec.mode """ mode = estimator_spec.mode if mode == model_fn_lib.ModeKeys.PREDICT: outputs = estimator_spec.export_outputs else: if mode == model_fn_lib.ModeKeys.TRAIN: output_class = export_output.TrainOutput elif mode == model_fn_lib.ModeKeys.EVAL: output_class = export_output.EvalOutput else: raise ValueError( 'Export output type not found for mode: {}'.format(mode)) export_out = output_class( loss=estimator_spec.loss, predictions=estimator_spec.predictions, metrics=estimator_spec.eval_metric_ops) outputs = {mode: export_out} return outputs def _get_features_from_input_fn(self, input_fn, mode): """Extracts the `features` from return values of `input_fn`.""" result = self._call_input_fn(input_fn, mode) input_hooks = [] if isinstance(result, dataset_ops.Dataset): iterator = result.make_initializable_iterator() input_hooks.append(_DatasetInitializerHook(iterator)) result = iterator.get_next() if isinstance(result, (list, tuple)): # Unconditionally drop the label (the second element of result). result = result[0] self._validate_features_in_predict_input(result) return result, input_hooks def _validate_features_in_predict_input(self, result): if not _has_dataset_or_queue_runner(result): logging.warning('Input graph does not use tf.data.Dataset or contain a ' 'QueueRunner. That means predict yields forever. ' 'This is probably a mistake.') def _get_features_and_labels_from_input_fn(self, input_fn, mode): """Extracts the `features` and labels from return values of `input_fn`.""" input_hooks = [] if self._distribution is not None and mode == model_fn_lib.ModeKeys.TRAIN: result = self._distribution.distribute_dataset( lambda: self._call_input_fn(input_fn, mode)) iterator = result.make_initializable_iterator() input_hooks.append(_DatasetInitializerHook(iterator)) result = iterator.get_next() else: result = self._call_input_fn(input_fn, mode) if isinstance(result, dataset_ops.Dataset): iterator = result.make_initializable_iterator() input_hooks.append(_DatasetInitializerHook(iterator)) result = iterator.get_next() if isinstance(result, (list, tuple)): if len(result) != 2: raise ValueError( 'input_fn should return (features, labels) as a len 2 tuple.') return result[0], result[1], input_hooks return result, None, input_hooks def _extract_batch_length(self, preds_evaluated): """Extracts batch length of predictions.""" batch_length = None for key, value in six.iteritems(preds_evaluated): batch_length = batch_length or value.shape[0] if value.shape[0] != batch_length: raise ValueError('Batch length of predictions should be same. %s has ' 'different batch length than others.' % key) return batch_length def _extract_keys(self, predictions, predict_keys): """Extracts `predict_keys` from `predictions`.""" if not predict_keys: return predictions if not isinstance(predictions, dict): raise ValueError( 'predict_keys argument is not valid in case of non-dict predictions.') existing_keys = predictions.keys() predictions = { key: value for key, value in six.iteritems(predictions) if key in predict_keys } if not predictions: raise ValueError('Expected to run at least one output from %s, ' 'provided %s.' % (existing_keys, predict_keys)) return predictions def _create_global_step(self, graph): """Creates the global step tensor in graph. The global step tensor must be an integer type with name 'global_step' and be added to the collection @{tf.GraphKeys.GLOBAL_STEP}. Args: graph: The graph in which to create the global step tensor. Returns: The global step `Tensor`. """ return training.create_global_step(graph) def _create_and_assert_global_step(self, graph): """Creates and asserts properties of the global step. Args: graph: The graph in which to create the global step tensor. Returns: The global step `Tensor`. """ step = self._create_global_step(graph) assert step == training.get_global_step() assert step.dtype.is_integer return step def _call_input_fn(self, input_fn, mode): """Calls the input function. Args: input_fn: The input function. mode: ModeKeys Returns: Either features or (features, labels) where features and labels are: features - `Tensor` or dictionary of string feature name to `Tensor`. labels - `Tensor` or dictionary of `Tensor` with labels. Raises: ValueError: if input_fn takes invalid arguments. """ input_fn_args = function_utils.fn_args(input_fn) kwargs = {} if 'mode' in input_fn_args: kwargs['mode'] = mode if 'params' in input_fn_args: kwargs['params'] = self.params if 'config' in input_fn_args: kwargs['config'] = self.config with ops.device('/cpu:0'): return input_fn(**kwargs) def _call_model_fn(self, features, labels, mode, config): """Calls model function. Args: features: features dict. labels: labels dict. mode: ModeKeys config: RunConfig Returns: An `EstimatorSpec` object. Raises: ValueError: if model_fn returns invalid objects. """ model_fn_args = function_utils.fn_args(self._model_fn) kwargs = {} if 'labels' in model_fn_args: kwargs['labels'] = labels else: if labels is not None: raise ValueError( 'model_fn does not take labels, but input_fn returns labels.') if 'mode' in model_fn_args: kwargs['mode'] = mode if 'params' in model_fn_args: kwargs['params'] = self.params if 'config' in model_fn_args: kwargs['config'] = config logging.info('Calling model_fn.') model_fn_results = self._model_fn(features=features, **kwargs) logging.info('Done calling model_fn.') if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec): raise ValueError('model_fn should return an EstimatorSpec.') return model_fn_results def _train_model(self, input_fn, hooks, saving_listeners): if self._distribution: return self._train_model_distributed(input_fn, hooks, saving_listeners) else: return self._train_model_default(input_fn, hooks, saving_listeners) def _train_model_default(self, input_fn, hooks, saving_listeners): worker_hooks = [] with ops.Graph().as_default() as g, g.device(self._device_fn): random_seed.set_random_seed(self._config.tf_random_seed) global_step_tensor = self._create_and_assert_global_step(g) training_util._get_or_create_global_step_read() # pylint: disable=protected-access features, labels, input_hooks = ( self._get_features_and_labels_from_input_fn( input_fn, model_fn_lib.ModeKeys.TRAIN)) worker_hooks.extend(input_hooks) estimator_spec = self._call_model_fn( features, labels, model_fn_lib.ModeKeys.TRAIN, self.config) return self._train_with_estimator_spec(estimator_spec, worker_hooks, hooks, global_step_tensor, saving_listeners) def _train_model_distributed(self, input_fn, hooks, saving_listeners): self._distribution.configure(self._session_config) worker_hooks = [] with ops.Graph().as_default() as g: with self._distribution.scope(): random_seed.set_random_seed(self._config.tf_random_seed) features, labels, input_hooks = ( self._get_features_and_labels_from_input_fn( input_fn, model_fn_lib.ModeKeys.TRAIN)) worker_hooks.extend(input_hooks) global_step_tensor = self._create_and_assert_global_step(g) # The default destination for the global_step_tensor fetch call is the # CPU. global_step_read_tensor = self._distribution.fetch(global_step_tensor) # we want to add to the global collection in the main thread not the # tower threads. ops.add_to_collection(training_util.GLOBAL_STEP_READ_KEY, global_step_read_tensor) grouped_estimator_spec = self._distribution.call_for_each_tower( self._call_model_fn, features, labels, # although this will be None it seems model_fn_lib.ModeKeys.TRAIN, self.config) # TODO(anjalisridhar): Figure out how to resolve the following scaffold # parameters: init_feed_dict, init_fn. scaffold_list = self._distribution.unwrap( grouped_estimator_spec.scaffold) init_feed_dict = [ s.init_feed_dict for s in scaffold_list if s.init_feed_dict is not None ] if init_feed_dict: init_feed_dict = self._distribution.group(init_feed_dict) else: init_feed_dict = None init_fn = [s.init_fn for s in scaffold_list if s.init_fn is not None] if init_fn: init_fn = self._distribution.group(init_fn) else: init_fn = None init_op = [s.init_op for s in scaffold_list if s.init_op is not None] if init_op: init_op = self._distribution.group(init_op) else: init_op = None ready_op = self._distribution.call_for_each_tower( create_per_tower_ready_op, grouped_estimator_spec.scaffold) if ready_op is not None: ready_op = self._distribution.group(ready_op) else: ready_op = None ready_for_local_init_op = self._distribution.call_for_each_tower( create_per_tower_ready_for_local_init_op, grouped_estimator_spec.scaffold) if ready_for_local_init_op is not None: ready_for_local_init_op = self._distribution.group( ready_for_local_init_op) else: ready_for_local_init_op = None local_init_op = [ s.local_init_op for s in scaffold_list if s.local_init_op is not None ] if local_init_op: local_init_op = self._distribution.group(local_init_op) else: local_init_op = None summary_op = [ s.summary_op for s in scaffold_list if s.summary_op is not None ] if summary_op: summary_op = self._distribution.group(summary_op) else: summary_op = None scaffold = monitored_session.Scaffold( init_op=init_op, ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op, local_init_op=local_init_op, summary_op=summary_op, init_feed_dict=init_feed_dict, init_fn=init_fn) def get_hooks_from_the_first_device(per_device_hooks): hooks_list = self._distribution.unwrap(per_device_hooks) assert hooks_list return hooks_list[0] training_hooks = get_hooks_from_the_first_device( grouped_estimator_spec.training_hooks) training_chief_hooks = get_hooks_from_the_first_device( grouped_estimator_spec.training_chief_hooks) estimator_spec = model_fn_lib.EstimatorSpec( mode=grouped_estimator_spec.mode, loss=self._distribution.unwrap( self._distribution.reduce(distribute_lib.get_loss_reduction(), grouped_estimator_spec.loss, destinations='/device:CPU:0'))[0], train_op=self._distribution.group(grouped_estimator_spec.train_op), training_hooks=training_hooks, training_chief_hooks=training_chief_hooks, scaffold=scaffold) return self._train_with_estimator_spec(estimator_spec, worker_hooks, hooks, global_step_read_tensor, saving_listeners) def _train_with_estimator_spec(self, estimator_spec, worker_hooks, hooks, global_step_tensor, saving_listeners): """Train a model with the given Estimator Spec.""" if self._warm_start_settings: logging.info('Warm-starting with WarmStartSettings: %s' % (self._warm_start_settings,)) warm_starting_util.warm_start(*self._warm_start_settings) # Check if the user created a loss summary, and add one if they didn't. # We assume here that the summary is called 'loss'. If it is not, we will # make another one with the name 'loss' to ensure it shows up in the right # graph in TensorBoard. if not any([x.op.name == 'loss' for x in ops.get_collection(ops.GraphKeys.SUMMARIES)]): summary.scalar('loss', estimator_spec.loss) ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss) worker_hooks.extend(hooks) worker_hooks.append( training.NanTensorHook(estimator_spec.loss) ) if self._config.log_step_count_steps is not None: worker_hooks.append( training.LoggingTensorHook( { 'loss': estimator_spec.loss, 'step': global_step_tensor }, every_n_iter=self._config.log_step_count_steps) ) worker_hooks.extend(estimator_spec.training_hooks) if not (estimator_spec.scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)): ops.add_to_collection( ops.GraphKeys.SAVERS, training.Saver( sharded=True, max_to_keep=self._config.keep_checkpoint_max, keep_checkpoint_every_n_hours=( self._config.keep_checkpoint_every_n_hours), defer_build=True, save_relative_paths=True)) chief_hooks = [] all_hooks = worker_hooks + list(estimator_spec.training_chief_hooks) saver_hooks = [ h for h in all_hooks if isinstance(h, training.CheckpointSaverHook)] if (self._config.save_checkpoints_secs or self._config.save_checkpoints_steps): if not saver_hooks: chief_hooks = [ training.CheckpointSaverHook( self._model_dir, save_secs=self._config.save_checkpoints_secs, save_steps=self._config.save_checkpoints_steps, scaffold=estimator_spec.scaffold) ] saver_hooks = [chief_hooks[0]] if saving_listeners: if not saver_hooks: raise ValueError( 'There should be a CheckpointSaverHook to use saving_listeners. ' 'Please set one of the RunConfig.save_checkpoints_steps or ' 'RunConfig.save_checkpoints_secs.') else: # It is expected to have one CheckpointSaverHook. If multiple, we pick # up the first one to add listener. saver_hooks[0]._listeners.extend(saving_listeners) # pylint: disable=protected-access with training.MonitoredTrainingSession( master=self._config.master, is_chief=self._config.is_chief, checkpoint_dir=self._model_dir, scaffold=estimator_spec.scaffold, hooks=worker_hooks, chief_only_hooks=( tuple(chief_hooks) + tuple(estimator_spec.training_chief_hooks)), save_checkpoint_secs=0, # Saving is handled by a hook. save_summaries_steps=self._config.save_summary_steps, config=self._session_config, log_step_count_steps=self._config.log_step_count_steps) as mon_sess: loss = None while not mon_sess.should_stop(): _, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss]) return loss def _evaluate_build_graph(self, input_fn, hooks=None, checkpoint_path=None): """Builds the graph and related hooks to run evaluation.""" random_seed.set_random_seed(self._config.tf_random_seed) global_step_tensor = self._create_and_assert_global_step( ops.get_default_graph()) features, labels, input_hooks = ( self._get_features_and_labels_from_input_fn(input_fn, model_fn_lib.ModeKeys.EVAL)) estimator_spec = self._call_model_fn( features, labels, model_fn_lib.ModeKeys.EVAL, self.config) # Call to warm_start has to be after model_fn is called. self._maybe_warm_start(checkpoint_path) if model_fn_lib.LOSS_METRIC_KEY in estimator_spec.eval_metric_ops: raise ValueError( 'Metric with name "%s" is not allowed, because Estimator ' % (model_fn_lib.LOSS_METRIC_KEY) + 'already defines a default metric with the same name.') estimator_spec.eval_metric_ops[ model_fn_lib.LOSS_METRIC_KEY] = metrics_lib.mean(estimator_spec.loss) update_op, eval_dict = _extract_metric_update_ops( estimator_spec.eval_metric_ops) if ops.GraphKeys.GLOBAL_STEP in eval_dict: raise ValueError( 'Metric with name `global_step` is not allowed, because Estimator ' 'already defines a default metric with the same name.') eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor all_hooks = list(input_hooks) all_hooks.extend(hooks) all_hooks.extend(list(estimator_spec.evaluation_hooks or [])) return estimator_spec.scaffold, update_op, eval_dict, all_hooks def _evaluate_run(self, checkpoint_path, scaffold, update_op, eval_dict, all_hooks, output_dir): """Run evaluation.""" eval_results = evaluation._evaluate_once( # pylint: disable=protected-access checkpoint_path=checkpoint_path, master=self._config.evaluation_master, scaffold=scaffold, eval_ops=update_op, final_ops=eval_dict, hooks=all_hooks, config=self._session_config) _write_dict_to_summary( output_dir=output_dir, dictionary=eval_results, current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP]) return eval_results def _maybe_warm_start(self, checkpoint_path): if not checkpoint_path and self._warm_start_settings: logging.info('Warm-starting with WarmStartSettings: %s' % (self._warm_start_settings,)) warm_starting_util.warm_start(*self._warm_start_settings) def create_per_tower_ready_op(scaffold): """Create a Scaffold.ready_op inside a tower.""" if scaffold.ready_op: return scaffold.ready_op def default_ready_op(): return array_ops.concat([ variables.report_uninitialized_variables(), resources.report_uninitialized_resources() ], 0) return monitored_session.Scaffold.get_or_default( 'ready_op', ops.GraphKeys.READY_OP, default_ready_op) def create_per_tower_ready_for_local_init_op(scaffold): """Create a Scaffold.ready_for_local_init_op inside a tower.""" if scaffold.ready_for_local_init_op: return scaffold.ready_for_local_init_op def default_ready_for_local_init_op(): return variables.report_uninitialized_variables( variables.global_variables()) return monitored_session.Scaffold.get_or_default( 'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP, default_ready_for_local_init_op) def _check_checkpoint_available(model_dir): latest_path = saver.latest_checkpoint(model_dir) if not latest_path: raise ValueError( 'Could not find trained model in model_dir: {}.'.format(model_dir)) def _check_hooks_type(hooks): """Returns hooks if all are SessionRunHook, raises TypeError otherwise.""" hooks = list(hooks or []) for h in hooks: if not isinstance(h, training.SessionRunHook): raise TypeError('Hooks must be a SessionRunHook, given: {}'.format(h)) return hooks def _check_listeners_type(saving_listeners): """Check listeners type.""" listeners = list(saving_listeners or []) for l in listeners: if not isinstance(l, training.CheckpointSaverListener): raise TypeError( 'saving_listeners must be a list of CheckpointSaverListener, ' 'given: {}'.format(l)) return listeners def _get_replica_device_setter(config): """Creates a replica device setter if required as a default device_fn. `Estimator` uses ReplicaDeviceSetter as a default device placer. It sets the distributed related arguments such as number of ps_replicas based on given config. Args: config: A `RunConfig` instance. Returns: A replica device setter, or None. """ if config.task_type: worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id) else: worker_device = '/job:worker' if config.num_ps_replicas > 0: return training.replica_device_setter( ps_tasks=config.num_ps_replicas, worker_device=worker_device, merge_devices=True, ps_ops=list(device_setter.STANDARD_PS_OPS), cluster=config.cluster_spec) else: return None def _verify_model_fn_args(model_fn, params): """Verifies model fn arguments.""" args = set(function_utils.fn_args(model_fn)) if 'features' not in args: raise ValueError('model_fn (%s) must include features argument.' % model_fn) if params is not None and 'params' not in args: raise ValueError('model_fn (%s) does not include params argument, ' 'but params (%s) is passed to Estimator.' % (model_fn, params)) if params is None and 'params' in args: logging.warning('Estimator\'s model_fn (%s) includes params ' 'argument, but params are not passed to Estimator.', model_fn) non_valid_args = list(args - _VALID_MODEL_FN_ARGS) if non_valid_args: raise ValueError('model_fn (%s) has following not expected args: %s' % (model_fn, non_valid_args)) def _load_global_step_from_checkpoint_dir(checkpoint_dir): try: checkpoint_reader = training.NewCheckpointReader( training.latest_checkpoint(checkpoint_dir)) return checkpoint_reader.get_tensor(ops.GraphKeys.GLOBAL_STEP) except: # pylint: disable=bare-except return 0 def _extract_metric_update_ops(eval_dict): """Separate update operations from metric value operations.""" update_ops = [] value_ops = {} # Sort metrics lexicographically so graph is identical every time. for name, metric_ops in sorted(six.iteritems(eval_dict)): value_ops[name] = metric_ops[0] update_ops.append(metric_ops[1]) if update_ops: update_op = control_flow_ops.group(*update_ops) else: update_op = None return update_op, value_ops def _dict_to_str(dictionary): """Get a `str` representation of a `dict`. Args: dictionary: The `dict` to be represented as `str`. Returns: A `str` representing the `dictionary`. """ return ', '.join('%s = %s' % (k, v) for k, v in sorted(six.iteritems(dictionary)) if not isinstance(v, six.binary_type)) def _write_dict_to_summary(output_dir, dictionary, current_global_step): """Writes a `dict` into summary file in given output directory. Args: output_dir: `str`, directory to write the summary file in. dictionary: the `dict` to be written to summary file. current_global_step: `int`, the current global step. """ logging.info('Saving dict for global step %d: %s', current_global_step, _dict_to_str(dictionary)) summary_writer = writer_cache.FileWriterCache.get(output_dir) summary_proto = summary_pb2.Summary() for key in dictionary: if dictionary[key] is None: continue if key == 'global_step': continue if (isinstance(dictionary[key], np.float32) or isinstance(dictionary[key], float)): summary_proto.value.add(tag=key, simple_value=float(dictionary[key])) elif (isinstance(dictionary[key], np.int64) or isinstance(dictionary[key], np.int32) or isinstance(dictionary[key], int)): summary_proto.value.add(tag=key, simple_value=int(dictionary[key])) elif isinstance(dictionary[key], six.binary_type): try: summ = summary_pb2.Summary.FromString(dictionary[key]) for i, _ in enumerate(summ.value): summ.value[i].tag = '%s/%d' % (key, i) summary_proto.value.extend(summ.value) except message.DecodeError: logging.warn('Skipping summary for %s, cannot parse string to Summary.', key) continue else: logging.warn( 'Skipping summary for %s, must be a float, np.float32, np.int64, ' 'np.int32 or int or a serialized string of Summary.', key) summary_writer.add_summary(summary_proto, current_global_step) summary_writer.flush() def _has_dataset_or_queue_runner(maybe_tensor): """Returns True if TF dataset or QueueRunner has been used.""" # Check TF dataset first. Here, we use a simple algorithm to check the top # level Tensors only, which should be sufficient for most users. tensors = [x for x in nest.flatten(maybe_tensor) if isinstance(x, ops.Tensor)] if any([t.op.type == 'IteratorGetNext' for t in tensors]): return True # Now, check queue. return ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS) class _DatasetInitializerHook(training.SessionRunHook): def __init__(self, iterator): self._iterator = iterator def begin(self): self._initializer = self._iterator.initializer def after_create_session(self, session, coord): del coord session.run(self._initializer) VocabInfo = warm_starting_util.VocabInfo # pylint: disable=invalid-name tf_export('estimator.VocabInfo', allow_multiple_exports=True)(VocabInfo) @tf_export('estimator.WarmStartSettings') class WarmStartSettings( collections.namedtuple('WarmStartSettings', [ 'ckpt_to_initialize_from', 'vars_to_warm_start', 'var_name_to_vocab_info', 'var_name_to_prev_var_name', ])): """Settings for warm-starting in Estimators. Example Use with canned `DNNEstimator`: ``` emb_vocab_file = tf.feature_column.embedding_column( tf.feature_column.categorical_column_with_vocabulary_file( "sc_vocab_file", "new_vocab.txt", vocab_size=100), dimension=8) emb_vocab_list = tf.feature_column.embedding_column( tf.feature_column.categorical_column_with_vocabulary_list( "sc_vocab_list", vocabulary_list=["a", "b"]), dimension=8) estimator = tf.estimator.DNNClassifier( hidden_units=[128, 64], feature_columns=[emb_vocab_file, emb_vocab_list], warm_start_from=ws) ``` where `ws` could be defined as: Warm-start all weights in the model (input layer and hidden weights). Either the directory or a specific checkpoint can be provided (in the case of the former, the latest checkpoint will be used): ``` ws = WarmStartSettings(ckpt_to_initialize_from="/tmp") ws = WarmStartSettings(ckpt_to_initialize_from="/tmp/model-1000") ``` Warm-start only the embeddings (input layer): ``` ws = WarmStartSettings(ckpt_to_initialize_from="/tmp", vars_to_warm_start=".*input_layer.*") ``` Warm-start all weights but the embedding parameters corresponding to `sc_vocab_file` have a different vocab from the one used in the current model: ``` vocab_info = tf.estimator.VocabInfo( new_vocab=sc_vocab_file.vocabulary_file, new_vocab_size=sc_vocab_file.vocabulary_size, num_oov_buckets=sc_vocab_file.num_oov_buckets, old_vocab="old_vocab.txt" ) ws = WarmStartSettings( ckpt_to_initialize_from="/tmp", var_name_to_vocab_info={ "input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info }) ``` Warm-start only `sc_vocab_file` embeddings (and no other variables), which have a different vocab from the one used in the current model: ``` vocab_info = tf.estimator.VocabInfo( new_vocab=sc_vocab_file.vocabulary_file, new_vocab_size=sc_vocab_file.vocabulary_size, num_oov_buckets=sc_vocab_file.num_oov_buckets, old_vocab="old_vocab.txt" ) ws = WarmStartSettings( ckpt_to_initialize_from="/tmp", vars_to_warm_start=None, var_name_to_vocab_info={ "input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info }) ``` Warm-start all weights but the parameters corresponding to `sc_vocab_file` have a different vocab from the one used in current checkpoint, and only 100 of those entries were used: ``` vocab_info = tf.estimator.VocabInfo( new_vocab=sc_vocab_file.vocabulary_file, new_vocab_size=sc_vocab_file.vocabulary_size, num_oov_buckets=sc_vocab_file.num_oov_buckets, old_vocab="old_vocab.txt", old_vocab_size=100 ) ws = WarmStartSettings( ckpt_to_initialize_from="/tmp", var_name_to_vocab_info={ "input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info }) ``` Warm-start all weights but the parameters corresponding to `sc_vocab_file` have a different vocab from the one used in current checkpoint and the parameters corresponding to `sc_vocab_list` have a different name from the current checkpoint: ``` vocab_info = tf.estimator.VocabInfo( new_vocab=sc_vocab_file.vocabulary_file, new_vocab_size=sc_vocab_file.vocabulary_size, num_oov_buckets=sc_vocab_file.num_oov_buckets, old_vocab="old_vocab.txt", old_vocab_size=100 ) ws = WarmStartSettings( ckpt_to_initialize_from="/tmp", var_name_to_vocab_info={ "input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info }, var_name_to_prev_var_name={ "input_layer/sc_vocab_list_embedding/embedding_weights": "old_tensor_name" }) ``` Attributes: ckpt_to_initialize_from: [Required] A string specifying the directory with checkpoint file(s) or path to checkpoint from which to warm-start the model parameters. vars_to_warm_start: [Optional] One of the following: - A regular expression (string) that captures which variables to warm-start (see tf.get_collection). This expression will only consider variables in the TRAINABLE_VARIABLES collection. - A list of Variables to warm-start. - A list of strings, each representing a full variable name to warm-start. - `None`, in which case only variables specified in `var_name_to_vocab_info` will be warm-started. Defaults to `'.*'`, which warm-starts all variables in the TRAINABLE_VARIABLES collection. Note that this excludes variables such as accumulators and moving statistics from batch norm. var_name_to_vocab_info: [Optional] Dict of variable names (strings) to VocabInfo. The variable names should be "full" variables, not the names of the partitions. If not explicitly provided, the variable is assumed to have no vocabulary. var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to name of the previously-trained variable in `ckpt_to_initialize_from`. If not explicitly provided, the name of the variable is assumed to be same between previous checkpoint and current model. """ def __new__(cls, ckpt_to_initialize_from, vars_to_warm_start='.*', var_name_to_vocab_info=None, var_name_to_prev_var_name=None): if not ckpt_to_initialize_from: raise ValueError( '`ckpt_to_initialize_from` MUST be set in WarmStartSettings') return super(WarmStartSettings, cls).__new__( cls, ckpt_to_initialize_from, vars_to_warm_start, var_name_to_vocab_info or {}, var_name_to_prev_var_name or {}, ) def _get_default_warm_start_settings(warm_start_from): """Returns default WarmStartSettings. Args: warm_start_from: Either a string representing the filepath of a checkpoint or SavedModel to initialize from, or an instance of WarmStartSettings. Returns: Either None or an instance of WarmStartSettings. Raises: ValueError: If warm_start_from is not None but is neither a string nor an instance of WarmStartSettings. """ if warm_start_from is None: return None if isinstance(warm_start_from, (six.string_types, six.binary_type)): # Infer that this is a SavedModel if export_path + # 'variables/variables.index' exists, and if so, construct the # WarmStartSettings pointing to export_path + 'variables/variables'. if gfile.Exists(os.path.join(compat.as_bytes(warm_start_from), compat.as_bytes('variables/variables.index'))): logging.info('Warm-starting from a SavedModel') return WarmStartSettings(ckpt_to_initialize_from=os.path.join( compat.as_bytes(warm_start_from), compat.as_bytes('{}/{}'.format(constants.VARIABLES_DIRECTORY, constants.VARIABLES_FILENAME)))) return WarmStartSettings(ckpt_to_initialize_from=warm_start_from) elif isinstance(warm_start_from, WarmStartSettings): return warm_start_from else: raise ValueError('warm_start_from must be a string or a WarmStartSettings, ' 'instead got {}'.format(type(warm_start_from)))
codeparrot/github-code-clean
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Built-in loss functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import functools import six from tensorflow.python.autograph.core import ag_ctx from tensorflow.python.autograph.impl import api as autograph from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import smart_cond from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend as K from tensorflow.python.keras.utils import losses_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object from tensorflow.python.keras.utils.generic_utils import serialize_keras_object from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops.losses import losses_impl from tensorflow.python.ops.ragged import ragged_map_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.util import dispatch from tensorflow.python.util.tf_export import keras_export from tensorflow.tools.docs import doc_controls @keras_export('keras.losses.Loss') class Loss(object): """Loss base class. To be implemented by subclasses: * `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`. Example subclass implementation: ```python class MeanSquaredError(Loss): def call(self, y_true, y_pred): y_pred = tf.convert_to_tensor_v2(y_pred) y_true = tf.cast(y_true, y_pred.dtype) return tf.reduce_mean(math_ops.square(y_pred - y_true), axis=-1) ``` When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction types, and reduce losses explicitly in your training loop. Using 'AUTO' or 'SUM_OVER_BATCH_SIZE' will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details on this. You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like: ```python with strategy.scope(): loss_obj = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE) .... loss = (tf.reduce_sum(loss_obj(labels, predictions)) * (1. / global_batch_size)) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None): """Initializes `Loss` class. Args: reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. """ losses_utils.ReductionV2.validate(reduction) self.reduction = reduction self.name = name # SUM_OVER_BATCH is only allowed in losses managed by `fit` or # CannedEstimators. self._allow_sum_over_batch_size = False self._set_name_scope() def _set_name_scope(self): """Creates a valid `name_scope` name.""" if self.name is None: self._name_scope = self.__class__.__name__ elif self.name == '<lambda>': self._name_scope = 'lambda' else: # E.g. '_my_loss' => 'my_loss' self._name_scope = self.name.strip('_') def __call__(self, y_true, y_pred, sample_weight=None): """Invokes the `Loss` instance. Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except sparse loss functions such as sparse categorical crossentropy where shape = `[batch_size, d0, .. dN-1]` y_pred: The predicted values. shape = `[batch_size, d0, .. dN]` sample_weight: Optional `sample_weight` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `sample_weight` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `sample_weight` vector. If the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted to this shape), then each loss element of `y_pred` is scaled by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss functions reduce by 1 dimension, usually axis=-1.) Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1` because all loss functions reduce by 1 dimension, usually axis=-1.) Raises: ValueError: If the shape of `sample_weight` is invalid. """ # If we are wrapping a lambda function strip '<>' from the name as it is not # accepted in scope name. graph_ctx = tf_utils.graph_context_for_symbolic_tensors( y_true, y_pred, sample_weight) with K.name_scope(self._name_scope), graph_ctx: if context.executing_eagerly(): call_fn = self.call else: call_fn = autograph.tf_convert(self.call, ag_ctx.control_status_ctx()) losses = call_fn(y_true, y_pred) return losses_utils.compute_weighted_loss( losses, sample_weight, reduction=self._get_reduction()) @classmethod def from_config(cls, config): """Instantiates a `Loss` from its config (output of `get_config()`). Args: config: Output of `get_config()`. Returns: A `Loss` instance. """ return cls(**config) def get_config(self): """Returns the config dictionary for a `Loss` instance.""" return {'reduction': self.reduction, 'name': self.name} @abc.abstractmethod @doc_controls.for_subclass_implementers def call(self, y_true, y_pred): """Invokes the `Loss` instance. Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except sparse loss functions such as sparse categorical crossentropy where shape = `[batch_size, d0, .. dN-1]` y_pred: The predicted values. shape = `[batch_size, d0, .. dN]` Returns: Loss values with the shape `[batch_size, d0, .. dN-1]`. """ raise NotImplementedError('Must be implemented in subclasses.') def _get_reduction(self): """Handles `AUTO` reduction cases and returns the reduction value.""" if (not self._allow_sum_over_batch_size and distribution_strategy_context.has_strategy() and (self.reduction == losses_utils.ReductionV2.AUTO or self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)): raise ValueError( 'Please use `tf.keras.losses.Reduction.SUM` or ' '`tf.keras.losses.Reduction.NONE` for loss reduction when losses are ' 'used with `tf.distribute.Strategy` outside of the built-in training ' 'loops. You can implement ' '`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch ' 'size like:\n```\nwith strategy.scope():\n' ' loss_obj = tf.keras.losses.CategoricalCrossentropy(' 'reduction=tf.keras.losses.Reduction.NONE)\n....\n' ' loss = tf.reduce_sum(loss_obj(labels, predictions)) * ' '(1. / global_batch_size)\n```\nPlease see ' 'https://www.tensorflow.org/tutorials/distribute/custom_training' ' for more details.') if self.reduction == losses_utils.ReductionV2.AUTO: return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE return self.reduction class LossFunctionWrapper(Loss): """Wraps a loss function in the `Loss` class.""" def __init__(self, fn, reduction=losses_utils.ReductionV2.AUTO, name=None, **kwargs): """Initializes `LossFunctionWrapper` class. Args: fn: The loss function to wrap, with signature `fn(y_true, y_pred, **kwargs)`. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: (Optional) name for the loss. **kwargs: The keyword arguments that are passed on to `fn`. """ super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name) self.fn = fn self._fn_kwargs = kwargs def call(self, y_true, y_pred): """Invokes the `LossFunctionWrapper` instance. Args: y_true: Ground truth values. y_pred: The predicted values. Returns: Loss values per sample. """ if tensor_util.is_tf_type(y_pred) and tensor_util.is_tf_type(y_true): y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true) ag_fn = autograph.tf_convert(self.fn, ag_ctx.control_status_ctx()) return ag_fn(y_true, y_pred, **self._fn_kwargs) def get_config(self): config = {} for k, v in six.iteritems(self._fn_kwargs): config[k] = K.eval(v) if tf_utils.is_tensor_or_variable(v) else v base_config = super(LossFunctionWrapper, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.losses.MeanSquaredError') class MeanSquaredError(LossFunctionWrapper): """Computes the mean of squares of errors between labels and predictions. `loss = square(y_true - y_pred)` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> mse = tf.keras.losses.MeanSquaredError() >>> mse(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> mse(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 0.25 >>> # Using 'sum' reduction type. >>> mse = tf.keras.losses.MeanSquaredError( ... reduction=tf.keras.losses.Reduction.SUM) >>> mse(y_true, y_pred).numpy() 1.0 >>> # Using 'none' reduction type. >>> mse = tf.keras.losses.MeanSquaredError( ... reduction=tf.keras.losses.Reduction.NONE) >>> mse(y_true, y_pred).numpy() array([0.5, 0.5], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanSquaredError()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_squared_error'): """Initializes `MeanSquaredError` instance. Args: reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'mean_squared_error'. """ super(MeanSquaredError, self).__init__( mean_squared_error, name=name, reduction=reduction) @keras_export('keras.losses.MeanAbsoluteError') class MeanAbsoluteError(LossFunctionWrapper): """Computes the mean of absolute difference between labels and predictions. `loss = abs(y_true - y_pred)` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> mae = tf.keras.losses.MeanAbsoluteError() >>> mae(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> mae(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 0.25 >>> # Using 'sum' reduction type. >>> mae = tf.keras.losses.MeanAbsoluteError( ... reduction=tf.keras.losses.Reduction.SUM) >>> mae(y_true, y_pred).numpy() 1.0 >>> # Using 'none' reduction type. >>> mae = tf.keras.losses.MeanAbsoluteError( ... reduction=tf.keras.losses.Reduction.NONE) >>> mae(y_true, y_pred).numpy() array([0.5, 0.5], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsoluteError()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_absolute_error'): """Initializes `MeanAbsoluteError` instance. Args: reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'mean_absolute_error'. """ super(MeanAbsoluteError, self).__init__( mean_absolute_error, name=name, reduction=reduction) @keras_export('keras.losses.MeanAbsolutePercentageError') class MeanAbsolutePercentageError(LossFunctionWrapper): """Computes the mean absolute percentage error between `y_true` and `y_pred`. `loss = 100 * abs(y_true - y_pred) / y_true` Standalone usage: >>> y_true = [[2., 1.], [2., 3.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> mape = tf.keras.losses.MeanAbsolutePercentageError() >>> mape(y_true, y_pred).numpy() 50. >>> # Calling with 'sample_weight'. >>> mape(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 20. >>> # Using 'sum' reduction type. >>> mape = tf.keras.losses.MeanAbsolutePercentageError( ... reduction=tf.keras.losses.Reduction.SUM) >>> mape(y_true, y_pred).numpy() 100. >>> # Using 'none' reduction type. >>> mape = tf.keras.losses.MeanAbsolutePercentageError( ... reduction=tf.keras.losses.Reduction.NONE) >>> mape(y_true, y_pred).numpy() array([25., 75.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanAbsolutePercentageError()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_absolute_percentage_error'): """Initializes `MeanAbsolutePercentageError` instance. Args: reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'mean_absolute_percentage_error'. """ super(MeanAbsolutePercentageError, self).__init__( mean_absolute_percentage_error, name=name, reduction=reduction) @keras_export('keras.losses.MeanSquaredLogarithmicError') class MeanSquaredLogarithmicError(LossFunctionWrapper): """Computes the mean squared logarithmic error between `y_true` and `y_pred`. `loss = square(log(y_true + 1.) - log(y_pred + 1.))` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [1., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> msle = tf.keras.losses.MeanSquaredLogarithmicError() >>> msle(y_true, y_pred).numpy() 0.240 >>> # Calling with 'sample_weight'. >>> msle(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy() 0.120 >>> # Using 'sum' reduction type. >>> msle = tf.keras.losses.MeanSquaredLogarithmicError( ... reduction=tf.keras.losses.Reduction.SUM) >>> msle(y_true, y_pred).numpy() 0.480 >>> # Using 'none' reduction type. >>> msle = tf.keras.losses.MeanSquaredLogarithmicError( ... reduction=tf.keras.losses.Reduction.NONE) >>> msle(y_true, y_pred).numpy() array([0.240, 0.240], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='mean_squared_logarithmic_error'): """Initializes `MeanSquaredLogarithmicError` instance. Args: reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'mean_squared_logarithmic_error'. """ super(MeanSquaredLogarithmicError, self).__init__( mean_squared_logarithmic_error, name=name, reduction=reduction) @keras_export('keras.losses.BinaryCrossentropy') class BinaryCrossentropy(LossFunctionWrapper): """Computes the cross-entropy loss between true labels and predicted labels. Use this cross-entropy loss for binary (0 or 1) classification applications. The loss function requires the following inputs: - `y_true` (true label): This is either 0 or 1. - `y_pred` (predicted value): This is the model's prediction, i.e, a single floating-point value which either represents a [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf] when `from_logits=True`) or a probability (i.e, value in [0., 1.] when `from_logits=False`). **Recommended Usage:** (set `from_logits=True`) With `tf.keras` API: ```python model.compile( loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), .... ) ``` As a standalone function: >>> # Example 1: (batch_size = 1, number of samples = 4) >>> y_true = [0, 1, 0, 0] >>> y_pred = [-18.6, 0.51, 2.94, -12.8] >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) >>> bce(y_true, y_pred).numpy() 0.865 >>> # Example 2: (batch_size = 2, number of samples = 4) >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[-18.6, 0.51], [2.94, -12.8]] >>> # Using default 'auto'/'sum_over_batch_size' reduction type. >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) >>> bce(y_true, y_pred).numpy() 0.865 >>> # Using 'sample_weight' attribute >>> bce(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.243 >>> # Using 'sum' reduction` type. >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True, ... reduction=tf.keras.losses.Reduction.SUM) >>> bce(y_true, y_pred).numpy() 1.730 >>> # Using 'none' reduction type. >>> bce = tf.keras.losses.BinaryCrossentropy(from_logits=True, ... reduction=tf.keras.losses.Reduction.NONE) >>> bce(y_true, y_pred).numpy() array([0.235, 1.496], dtype=float32) **Default Usage:** (set `from_logits=False`) >>> # Make the following updates to the above "Recommended Usage" section >>> # 1. Set `from_logits=False` >>> tf.keras.losses.BinaryCrossentropy() # OR ...('from_logits=False') >>> # 2. Update `y_pred` to use probabilities instead of logits >>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]] """ def __init__(self, from_logits=False, label_smoothing=0, reduction=losses_utils.ReductionV2.AUTO, name='binary_crossentropy'): """Initializes `BinaryCrossentropy` instance. Args: from_logits: Whether to interpret `y_pred` as a tensor of [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we assume that `y_pred` contains probabilities (i.e., values in [0, 1]). **Note - Using from_logits=True may be more numerically stable. label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0, we compute the loss between the predicted labels and a smoothed version of the true labels, where the smoothing squeezes the labels towards 0.5. Larger values of `label_smoothing` correspond to heavier smoothing. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: (Optional) Name for the op. Defaults to 'binary_crossentropy'. """ super(BinaryCrossentropy, self).__init__( binary_crossentropy, name=name, reduction=reduction, from_logits=from_logits, label_smoothing=label_smoothing) self.from_logits = from_logits @keras_export('keras.losses.CategoricalCrossentropy') class CategoricalCrossentropy(LossFunctionWrapper): """Computes the crossentropy loss between the labels and predictions. Use this crossentropy loss function when there are two or more label classes. We expect labels to be provided in a `one_hot` representation. If you want to provide labels as integers, please use `SparseCategoricalCrossentropy` loss. There should be `# classes` floating point values per feature. In the snippet below, there is `# classes` floating pointing values per example. The shape of both `y_pred` and `y_true` are `[batch_size, num_classes]`. Standalone usage: >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> cce = tf.keras.losses.CategoricalCrossentropy() >>> cce(y_true, y_pred).numpy() 1.177 >>> # Calling with 'sample_weight'. >>> cce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy() 0.814 >>> # Using 'sum' reduction type. >>> cce = tf.keras.losses.CategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.SUM) >>> cce(y_true, y_pred).numpy() 2.354 >>> # Using 'none' reduction type. >>> cce = tf.keras.losses.CategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.NONE) >>> cce(y_true, y_pred).numpy() array([0.0513, 2.303], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalCrossentropy()) ``` """ def __init__(self, from_logits=False, label_smoothing=0, reduction=losses_utils.ReductionV2.AUTO, name='categorical_crossentropy'): """Initializes `CategoricalCrossentropy` instance. Args: from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. **Note - Using from_logits=True is more numerically stable.** label_smoothing: Float in [0, 1]. When > 0, label values are smoothed, meaning the confidence on label values are relaxed. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'categorical_crossentropy'. """ super(CategoricalCrossentropy, self).__init__( categorical_crossentropy, name=name, reduction=reduction, from_logits=from_logits, label_smoothing=label_smoothing) @keras_export('keras.losses.SparseCategoricalCrossentropy') class SparseCategoricalCrossentropy(LossFunctionWrapper): """Computes the crossentropy loss between the labels and predictions. Use this crossentropy loss function when there are two or more label classes. We expect labels to be provided as integers. If you want to provide labels using `one-hot` representation, please use `CategoricalCrossentropy` loss. There should be `# classes` floating point values per feature for `y_pred` and a single floating point value per feature for `y_true`. In the snippet below, there is a single floating point value per example for `y_true` and `# classes` floating pointing values per example for `y_pred`. The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is `[batch_size, num_classes]`. Standalone usage: >>> y_true = [1, 2] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> scce = tf.keras.losses.SparseCategoricalCrossentropy() >>> scce(y_true, y_pred).numpy() 1.177 >>> # Calling with 'sample_weight'. >>> scce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy() 0.814 >>> # Using 'sum' reduction type. >>> scce = tf.keras.losses.SparseCategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.SUM) >>> scce(y_true, y_pred).numpy() 2.354 >>> # Using 'none' reduction type. >>> scce = tf.keras.losses.SparseCategoricalCrossentropy( ... reduction=tf.keras.losses.Reduction.NONE) >>> scce(y_true, y_pred).numpy() array([0.0513, 2.303], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy()) ``` """ def __init__(self, from_logits=False, reduction=losses_utils.ReductionV2.AUTO, name='sparse_categorical_crossentropy'): """Initializes `SparseCategoricalCrossentropy` instance. Args: from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. **Note - Using from_logits=True may be more numerically stable. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'sparse_categorical_crossentropy'. """ super(SparseCategoricalCrossentropy, self).__init__( sparse_categorical_crossentropy, name=name, reduction=reduction, from_logits=from_logits) @keras_export('keras.losses.Hinge') class Hinge(LossFunctionWrapper): """Computes the hinge loss between `y_true` and `y_pred`. `loss = maximum(1 - y_true * y_pred, 0)` `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.Hinge() >>> h(y_true, y_pred).numpy() 1.3 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.55 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.Hinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 2.6 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.Hinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.1, 1.5], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Hinge()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'): """Initializes `Hinge` instance. Args: reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'hinge'. """ super(Hinge, self).__init__(hinge, name=name, reduction=reduction) @keras_export('keras.losses.SquaredHinge') class SquaredHinge(LossFunctionWrapper): """Computes the squared hinge loss between `y_true` and `y_pred`. `loss = square(maximum(1 - y_true * y_pred, 0))` `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.SquaredHinge() >>> h(y_true, y_pred).numpy() 1.86 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.73 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.SquaredHinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 3.72 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.SquaredHinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.46, 2.26], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.SquaredHinge()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='squared_hinge'): """Initializes `SquaredHinge` instance. Args: reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'squared_hinge'. """ super(SquaredHinge, self).__init__( squared_hinge, name=name, reduction=reduction) @keras_export('keras.losses.CategoricalHinge') class CategoricalHinge(LossFunctionWrapper): """Computes the categorical hinge loss between `y_true` and `y_pred`. `loss = maximum(neg - pos + 1, 0)` where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)` Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.CategoricalHinge() >>> h(y_true, y_pred).numpy() 1.4 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.6 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.CategoricalHinge( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 2.8 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.CategoricalHinge( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([1.2, 1.6], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalHinge()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='categorical_hinge'): """Initializes `CategoricalHinge` instance. Args: reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'categorical_hinge'. """ super(CategoricalHinge, self).__init__( categorical_hinge, name=name, reduction=reduction) @keras_export('keras.losses.Poisson') class Poisson(LossFunctionWrapper): """Computes the Poisson loss between `y_true` and `y_pred`. `loss = y_pred - y_true * log(y_pred)` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [0., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> p = tf.keras.losses.Poisson() >>> p(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> p(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.4 >>> # Using 'sum' reduction type. >>> p = tf.keras.losses.Poisson( ... reduction=tf.keras.losses.Reduction.SUM) >>> p(y_true, y_pred).numpy() 0.999 >>> # Using 'none' reduction type. >>> p = tf.keras.losses.Poisson( ... reduction=tf.keras.losses.Reduction.NONE) >>> p(y_true, y_pred).numpy() array([0.999, 0.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Poisson()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'): """Initializes `Poisson` instance. Args: reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'poisson'. """ super(Poisson, self).__init__(poisson, name=name, reduction=reduction) @keras_export('keras.losses.LogCosh') class LogCosh(LossFunctionWrapper): """Computes the logarithm of the hyperbolic cosine of the prediction error. `logcosh = log((exp(x) + exp(-x))/2)`, where x is the error `y_pred - y_true`. Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [0., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> l = tf.keras.losses.LogCosh() >>> l(y_true, y_pred).numpy() 0.108 >>> # Calling with 'sample_weight'. >>> l(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.087 >>> # Using 'sum' reduction type. >>> l = tf.keras.losses.LogCosh( ... reduction=tf.keras.losses.Reduction.SUM) >>> l(y_true, y_pred).numpy() 0.217 >>> # Using 'none' reduction type. >>> l = tf.keras.losses.LogCosh( ... reduction=tf.keras.losses.Reduction.NONE) >>> l(y_true, y_pred).numpy() array([0.217, 0.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.LogCosh()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='log_cosh'): """Initializes `LogCosh` instance. Args: reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'log_cosh'. """ super(LogCosh, self).__init__(log_cosh, name=name, reduction=reduction) @keras_export('keras.losses.KLDivergence') class KLDivergence(LossFunctionWrapper): """Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`. `loss = y_true * log(y_true / y_pred)` See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> kl = tf.keras.losses.KLDivergence() >>> kl(y_true, y_pred).numpy() 0.458 >>> # Calling with 'sample_weight'. >>> kl(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.366 >>> # Using 'sum' reduction type. >>> kl = tf.keras.losses.KLDivergence( ... reduction=tf.keras.losses.Reduction.SUM) >>> kl(y_true, y_pred).numpy() 0.916 >>> # Using 'none' reduction type. >>> kl = tf.keras.losses.KLDivergence( ... reduction=tf.keras.losses.Reduction.NONE) >>> kl(y_true, y_pred).numpy() array([0.916, -3.08e-06], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.KLDivergence()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='kl_divergence'): """Initializes `KLDivergence` instance. Args: reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'kl_divergence'. """ super(KLDivergence, self).__init__( kl_divergence, name=name, reduction=reduction) @keras_export('keras.losses.Huber') class Huber(LossFunctionWrapper): """Computes the Huber loss between `y_true` and `y_pred`. For each value x in `error = y_true - y_pred`: ``` loss = 0.5 * x^2 if |x| <= d loss = 0.5 * d^2 + d * (|x| - d) if |x| > d ``` where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> h = tf.keras.losses.Huber() >>> h(y_true, y_pred).numpy() 0.155 >>> # Calling with 'sample_weight'. >>> h(y_true, y_pred, sample_weight=[1, 0]).numpy() 0.09 >>> # Using 'sum' reduction type. >>> h = tf.keras.losses.Huber( ... reduction=tf.keras.losses.Reduction.SUM) >>> h(y_true, y_pred).numpy() 0.31 >>> # Using 'none' reduction type. >>> h = tf.keras.losses.Huber( ... reduction=tf.keras.losses.Reduction.NONE) >>> h(y_true, y_pred).numpy() array([0.18, 0.13], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Huber()) ``` """ def __init__(self, delta=1.0, reduction=losses_utils.ReductionV2.AUTO, name='huber_loss'): """Initializes `Huber` instance. Args: delta: A float, the point where the Huber loss function changes from a quadratic to linear. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. Defaults to 'huber_loss'. """ super(Huber, self).__init__( huber, name=name, reduction=reduction, delta=delta) @keras_export('keras.metrics.mean_squared_error', 'keras.metrics.mse', 'keras.metrics.MSE', 'keras.losses.mean_squared_error', 'keras.losses.mse', 'keras.losses.MSE') @dispatch.add_dispatch_support def mean_squared_error(y_true, y_pred): """Computes the mean squared error between labels and predictions. After computing the squared distance between the inputs, the mean value over the last dimension is returned. `loss = mean(square(y_true - y_pred), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1) def _ragged_tensor_apply_loss(loss_fn, y_true, y_pred): """Apply a loss function on a per batch basis. Args: loss_fn: The loss function y_true: truth values (RaggedTensor) y_pred: predicted values (RaggedTensor) Returns: Loss-function result. A dense tensor if the output has a single dimension (per-batch loss value); a ragged tensor otherwise. """ def rt_is_equiv_dense(rt): """Returns true if this RaggedTensor has the same row_lenghts across all ragged dimensions and thus can be converted to a dense tensor without loss of information. Args: rt: RaggedTensor """ return math_ops.reduce_all([ math_ops.equal( math_ops.reduce_variance(math_ops.cast(row_lens, K.floatx())), constant_op.constant([0.])) for row_lens in rt.nested_row_lengths() ]) def _convert_to_dense(inputs): return tuple(rt.to_tensor() for rt in inputs) def _wrapper(inputs): _, y_pred = inputs if isinstance(y_pred, ragged_tensor.RaggedTensor): return control_flow_ops.cond( rt_is_equiv_dense(y_pred), lambda: loss_fn(*_convert_to_dense(inputs)), lambda: loss_fn(*inputs)) return loss_fn(*inputs) lshape = y_pred.shape.as_list()[1:-1] if len(lshape) > 0: spec = ragged_tensor.RaggedTensorSpec(shape=lshape, dtype=y_pred.dtype) else: spec = tensor_spec.TensorSpec(shape=[], dtype=y_pred.dtype) nested_splits_list = [rt.nested_row_splits for rt in (y_true, y_pred)] assertion_list = ragged_util.assert_splits_match(nested_splits_list) with ops.control_dependencies(assertion_list): return ragged_map_ops.map_fn(_wrapper, elems=(y_true, y_pred), dtype=spec) @dispatch.dispatch_for_types(mean_squared_error, ragged_tensor.RaggedTensor) def _ragged_tensor_mse(y_true, y_pred): """ Implements support for handling RaggedTensors. Args: y_true: RaggedTensor truth values. shape = `[batch_size, d0, .. dN]`. y_pred: RaggedTensor predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared error values. shape = `[batch_size, d0, .. dN-1]`. When the number of dimensions of the batch feature vector [d0, .. dN] is greater than one the return value is a RaggedTensor. Otherwise a Dense tensor with dimensions [batch_size] is returned. """ return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred) @keras_export('keras.metrics.mean_absolute_error', 'keras.metrics.mae', 'keras.metrics.MAE', 'keras.losses.mean_absolute_error', 'keras.losses.mae', 'keras.losses.MAE') @dispatch.add_dispatch_support def mean_absolute_error(y_true, y_pred): """Computes the mean absolute error between labels and predictions. `loss = mean(abs(y_true - y_pred), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) return K.mean(math_ops.abs(y_pred - y_true), axis=-1) @dispatch.dispatch_for_types(mean_absolute_error, ragged_tensor.RaggedTensor) def _ragged_tensor_mae(y_true, y_pred): """ RaggedTensor adapter for mean_absolute_error""" return _ragged_tensor_apply_loss(mean_absolute_error, y_true, y_pred) @keras_export('keras.metrics.mean_absolute_percentage_error', 'keras.metrics.mape', 'keras.metrics.MAPE', 'keras.losses.mean_absolute_percentage_error', 'keras.losses.mape', 'keras.losses.MAPE') @dispatch.add_dispatch_support def mean_absolute_percentage_error(y_true, y_pred): """Computes the mean absolute percentage error between `y_true` and `y_pred`. `loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)` Standalone usage: >>> y_true = np.random.random(size=(2, 3)) >>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... 100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) diff = math_ops.abs( (y_true - y_pred) / K.maximum(math_ops.abs(y_true), K.epsilon())) return 100. * K.mean(diff, axis=-1) @dispatch.dispatch_for_types(mean_absolute_percentage_error, ragged_tensor.RaggedTensor) def _ragged_tensor_mape(y_true, y_pred): """ Support RaggedTensors.""" return _ragged_tensor_apply_loss(mean_absolute_percentage_error, y_true, y_pred) @keras_export('keras.metrics.mean_squared_logarithmic_error', 'keras.metrics.msle', 'keras.metrics.MSLE', 'keras.losses.mean_squared_logarithmic_error', 'keras.losses.msle', 'keras.losses.MSLE') @dispatch.add_dispatch_support def mean_squared_logarithmic_error(y_true, y_pred): """Computes the mean squared logarithmic error between `y_true` and `y_pred`. `loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_true = np.maximum(y_true, 1e-7) >>> y_pred = np.maximum(y_pred, 1e-7) >>> assert np.allclose( ... loss.numpy(), ... np.mean( ... np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) first_log = math_ops.log(K.maximum(y_pred, K.epsilon()) + 1.) second_log = math_ops.log(K.maximum(y_true, K.epsilon()) + 1.) return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1) @dispatch.dispatch_for_types(mean_squared_logarithmic_error, ragged_tensor.RaggedTensor) def _ragged_tensor_msle(y_true, y_pred): """ Implements support for handling RaggedTensors.""" return _ragged_tensor_apply_loss(mean_squared_logarithmic_error, y_true, y_pred) def _maybe_convert_labels(y_true): """Converts binary labels into -1/1.""" are_zeros = math_ops.equal(y_true, 0) are_ones = math_ops.equal(y_true, 1) is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones)) def _convert_binary_labels(): # Convert the binary labels to -1 or 1. return 2. * y_true - 1. updated_y_true = smart_cond.smart_cond(is_binary, _convert_binary_labels, lambda: y_true) return updated_y_true @keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge') @dispatch.add_dispatch_support def squared_hinge(y_true, y_pred): """Computes the squared hinge loss between `y_true` and `y_pred`. `loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)` Standalone usage: >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.squared_hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1)) Args: y_true: The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) y_true = _maybe_convert_labels(y_true) return K.mean( math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1) @keras_export('keras.metrics.hinge', 'keras.losses.hinge') @dispatch.add_dispatch_support def hinge(y_true, y_pred): """Computes the hinge loss between `y_true` and `y_pred`. `loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)` Standalone usage: >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1)) Args: y_true: The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are provided they will be converted to -1 or 1. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Hinge loss values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) y_true = _maybe_convert_labels(y_true) return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1) @keras_export('keras.losses.categorical_hinge') @dispatch.add_dispatch_support def categorical_hinge(y_true, y_pred): """Computes the categorical hinge loss between `y_true` and `y_pred`. `loss = maximum(neg - pos + 1, 0)` where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)` Standalone usage: >>> y_true = np.random.randint(0, 3, size=(2,)) >>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.categorical_hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> pos = np.sum(y_true * y_pred, axis=-1) >>> neg = np.amax((1. - y_true) * y_pred, axis=-1) >>> assert np.array_equal(loss.numpy(), np.maximum(0., neg - pos + 1.)) Args: y_true: The ground truth values. `y_true` values are expected to be either `{-1, +1}` or `{0, 1}` (i.e. a one-hot-encoded tensor). y_pred: The predicted values. Returns: Categorical hinge loss values. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) pos = math_ops.reduce_sum(y_true * y_pred, axis=-1) neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1) zero = math_ops.cast(0., y_pred.dtype) return math_ops.maximum(neg - pos + 1., zero) @keras_export('keras.losses.huber', v1=[]) @dispatch.add_dispatch_support def huber(y_true, y_pred, delta=1.0): """Computes Huber loss value. For each value x in `error = y_true - y_pred`: ``` loss = 0.5 * x^2 if |x| <= d loss = 0.5 * d^2 + d * (|x| - d) if |x| > d ``` where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss Args: y_true: tensor of true targets. y_pred: tensor of predicted targets. delta: A float, the point where the Huber loss function changes from a quadratic to linear. Returns: Tensor with one scalar loss entry per sample. """ y_pred = math_ops.cast(y_pred, dtype=K.floatx()) y_true = math_ops.cast(y_true, dtype=K.floatx()) delta = math_ops.cast(delta, dtype=K.floatx()) error = math_ops.subtract(y_pred, y_true) abs_error = math_ops.abs(error) half = ops.convert_to_tensor_v2_with_dispatch(0.5, dtype=abs_error.dtype) return K.mean( array_ops.where_v2( abs_error <= delta, half * math_ops.pow(error, 2), half * math_ops.pow(delta, 2) + delta * (abs_error - delta)), axis=-1) @keras_export('keras.losses.log_cosh', 'keras.losses.logcosh', 'keras.metrics.log_cosh', 'keras.metrics.logcosh') @dispatch.add_dispatch_support def log_cosh(y_true, y_pred): """Logarithm of the hyperbolic cosine of the prediction error. `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly like the mean squared error, but will not be so strongly affected by the occasional wildly incorrect prediction. Standalone usage: >>> y_true = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.logcosh(y_true, y_pred) >>> assert loss.shape == (2,) >>> x = y_pred - y_true >>> assert np.allclose( ... loss.numpy(), ... np.mean(x + np.log(np.exp(-2. * x) + 1.) - math_ops.log(2.), axis=-1), ... atol=1e-5) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Logcosh error values. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) def _logcosh(x): return x + nn.softplus(-2. * x) - math_ops.cast(math_ops.log(2.), x.dtype) return K.mean(_logcosh(y_pred - y_true), axis=-1) @keras_export('keras.metrics.categorical_crossentropy', 'keras.losses.categorical_crossentropy') @dispatch.add_dispatch_support def categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0): """Computes the categorical crossentropy loss. Standalone usage: >>> y_true = [[0, 1, 0], [0, 0, 1]] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.0513, 2.303], dtype=float32) Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For example, if `0.1`, use `0.1 / num_classes` for non-target labels and `0.9 + 0.1 / num_classes` for target labels. Returns: Categorical crossentropy loss value. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) label_smoothing = ops.convert_to_tensor_v2_with_dispatch( label_smoothing, dtype=K.floatx()) def _smooth_labels(): num_classes = math_ops.cast(array_ops.shape(y_true)[-1], y_pred.dtype) return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes) y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels, lambda: y_true) return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits) @dispatch.dispatch_for_types(categorical_crossentropy, ragged_tensor.RaggedTensor) def _ragged_tensor_categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0): """ Implements support for handling RaggedTensors. Expected shape: (batch, sequence_len, n_classes) with sequence_len being variable per batch. Return shape: (batch, sequence_len). When used by CategoricalCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the loss over the number of elements independent of the batch. E.g. if the RaggedTensor has 2 batches with [2, 1] values respectivly the resulting loss is the sum of the individual loss values divided by 3. """ fn = functools.partial( categorical_crossentropy, from_logits=from_logits, label_smoothing=label_smoothing) return _ragged_tensor_apply_loss(fn, y_true, y_pred) @keras_export('keras.metrics.sparse_categorical_crossentropy', 'keras.losses.sparse_categorical_crossentropy') @dispatch.add_dispatch_support def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1): """Computes the sparse categorical crossentropy loss. Standalone usage: >>> y_true = [1, 2] >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]] >>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.0513, 2.303], dtype=float32) Args: y_true: Ground truth values. y_pred: The predicted values. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. axis: (Optional) Defaults to -1. The dimension along which the entropy is computed. Returns: Sparse categorical crossentropy loss value. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) return K.sparse_categorical_crossentropy( y_true, y_pred, from_logits=from_logits, axis=axis) @keras_export('keras.metrics.binary_crossentropy', 'keras.losses.binary_crossentropy') @dispatch.add_dispatch_support def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0): """Computes the binary crossentropy loss. Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> loss = tf.keras.losses.binary_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.916 , 0.714], dtype=float32) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. from_logits: Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a probability distribution. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels by squeezing them towards 0.5 That is, using `1. - 0.5 * label_smoothing` for the target class and `0.5 * label_smoothing` for the non-target class. Returns: Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) label_smoothing = ops.convert_to_tensor_v2_with_dispatch( label_smoothing, dtype=K.floatx()) def _smooth_labels(): return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels, lambda: y_true) return K.mean( K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1) @dispatch.dispatch_for_types(binary_crossentropy, ragged_tensor.RaggedTensor) def _ragged_tensor_binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0): """ Implements support for handling RaggedTensors. Expected shape: (batch, sequence_len) with sequence_len being variable per batch. Return shape: (batch,); returns the per batch mean of the loss values. When used by BinaryCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the per batch losses over the number of batches. """ fn = functools.partial( binary_crossentropy, from_logits=from_logits, label_smoothing=label_smoothing) return _ragged_tensor_apply_loss(fn, y_true, y_pred) @keras_export('keras.metrics.kl_divergence', 'keras.metrics.kullback_leibler_divergence', 'keras.metrics.kld', 'keras.metrics.KLD', 'keras.losses.kl_divergence', 'keras.losses.kullback_leibler_divergence', 'keras.losses.kld', 'keras.losses.KLD') @dispatch.add_dispatch_support def kl_divergence(y_true, y_pred): """Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`. `loss = y_true * log(y_true / y_pred)` See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1) >>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1) >>> assert np.array_equal( ... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1)) Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. Returns: A `Tensor` with loss. Raises: TypeError: If `y_true` cannot be cast to the `y_pred.dtype`. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) y_true = K.clip(y_true, K.epsilon(), 1) y_pred = K.clip(y_pred, K.epsilon(), 1) return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1) @keras_export('keras.metrics.poisson', 'keras.losses.poisson') @dispatch.add_dispatch_support def poisson(y_true, y_pred): """Computes the Poisson loss between y_true and y_pred. The Poisson loss is the mean of the elements of the `Tensor` `y_pred - y_true * log(y_pred)`. Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.poisson(y_true, y_pred) >>> assert loss.shape == (2,) >>> y_pred = y_pred + 1e-7 >>> assert np.allclose( ... loss.numpy(), np.mean(y_pred - y_true * np.log(y_pred), axis=-1), ... atol=1e-5) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Poisson loss value. shape = `[batch_size, d0, .. dN-1]`. Raises: InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes. """ y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1) @keras_export( 'keras.losses.cosine_similarity', v1=[ 'keras.metrics.cosine_proximity', 'keras.metrics.cosine', 'keras.losses.cosine_proximity', 'keras.losses.cosine', 'keras.losses.cosine_similarity', ]) @dispatch.add_dispatch_support def cosine_similarity(y_true, y_pred, axis=-1): """Computes the cosine similarity between labels and predictions. Note that it is a number between -1 and 1. When it is a negative number between -1 and 0, 0 indicates orthogonality and values closer to -1 indicate greater similarity. The values closer to 1 indicate greater dissimilarity. This makes it usable as a loss function in a setting where you try to maximize the proximity between predictions and targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity will be 0 regardless of the proximity between predictions and targets. `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))` Standalone usage: >>> y_true = [[0., 1.], [1., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]] >>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1) >>> loss.numpy() array([-0., -0.999, 0.999], dtype=float32) Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. axis: Axis along which to determine similarity. Returns: Cosine similarity tensor. """ y_true = nn.l2_normalize(y_true, axis=axis) y_pred = nn.l2_normalize(y_pred, axis=axis) return -math_ops.reduce_sum(y_true * y_pred, axis=axis) @keras_export('keras.losses.CosineSimilarity') class CosineSimilarity(LossFunctionWrapper): """Computes the cosine similarity between labels and predictions. Note that it is a number between -1 and 1. When it is a negative number between -1 and 0, 0 indicates orthogonality and values closer to -1 indicate greater similarity. The values closer to 1 indicate greater dissimilarity. This makes it usable as a loss function in a setting where you try to maximize the proximity between predictions and targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity will be 0 regardless of the proximity between predictions and targets. `loss = -sum(l2_norm(y_true) * l2_norm(y_pred))` Standalone usage: >>> y_true = [[0., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1) >>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]] >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]] >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]] >>> # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)) >>> # = -((0. + 0.) + (0.5 + 0.5)) / 2 >>> cosine_loss(y_true, y_pred).numpy() -0.5 >>> # Calling with 'sample_weight'. >>> cosine_loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() -0.0999 >>> # Using 'sum' reduction type. >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1, ... reduction=tf.keras.losses.Reduction.SUM) >>> cosine_loss(y_true, y_pred).numpy() -0.999 >>> # Using 'none' reduction type. >>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1, ... reduction=tf.keras.losses.Reduction.NONE) >>> cosine_loss(y_true, y_pred).numpy() array([-0., -0.999], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.CosineSimilarity(axis=1)) ``` Args: axis: (Optional) Defaults to -1. The dimension along which the cosine similarity is computed. reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial] (https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the op. """ def __init__(self, axis=-1, reduction=losses_utils.ReductionV2.AUTO, name='cosine_similarity'): super(CosineSimilarity, self).__init__( cosine_similarity, reduction=reduction, name=name, axis=axis) # Aliases. bce = BCE = binary_crossentropy mse = MSE = mean_squared_error mae = MAE = mean_absolute_error mape = MAPE = mean_absolute_percentage_error msle = MSLE = mean_squared_logarithmic_error kld = KLD = kullback_leibler_divergence = kl_divergence logcosh = log_cosh huber_loss = huber def is_categorical_crossentropy(loss): result = ((isinstance(loss, CategoricalCrossentropy) or (isinstance(loss, LossFunctionWrapper) and loss.fn == categorical_crossentropy) or (hasattr(loss, '__name__') and loss.__name__ == 'categorical_crossentropy') or (loss == 'categorical_crossentropy'))) return result @keras_export('keras.losses.serialize') def serialize(loss): """Serializes loss function or `Loss` instance. Args: loss: A Keras `Loss` instance or a loss function. Returns: Loss configuration dictionary. """ return serialize_keras_object(loss) @keras_export('keras.losses.deserialize') def deserialize(name, custom_objects=None): """Deserializes a serialized loss class/function instance. Args: name: Loss configuration. custom_objects: Optional dictionary mapping names (strings) to custom objects (classes and functions) to be considered during deserialization. Returns: A Keras `Loss` instance or a loss function. """ return deserialize_keras_object( name, module_objects=globals(), custom_objects=custom_objects, printable_module_name='loss function') @keras_export('keras.losses.get') def get(identifier): """Retrieves a Keras loss as a `function`/`Loss` class instance. The `identifier` may be the string name of a loss function or `Loss` class. >>> loss = tf.keras.losses.get("categorical_crossentropy") >>> type(loss) <class 'function'> >>> loss = tf.keras.losses.get("CategoricalCrossentropy") >>> type(loss) <class '...tensorflow.python.keras.losses.CategoricalCrossentropy'> You can also specify `config` of the loss to this function by passing dict containing `class_name` and `config` as an identifier. Also note that the `class_name` must map to a `Loss` class >>> identifier = {"class_name": "CategoricalCrossentropy", ... "config": {"from_logits": True}} >>> loss = tf.keras.losses.get(identifier) >>> type(loss) <class '...tensorflow.python.keras.losses.CategoricalCrossentropy'> Args: identifier: A loss identifier. One of None or string name of a loss function/class or loss configuration dictionary or a loss function or a loss class instance Returns: A Keras loss as a `function`/ `Loss` class instance. Raises: ValueError: If `identifier` cannot be interpreted. """ if identifier is None: return None if isinstance(identifier, six.string_types): identifier = str(identifier) return deserialize(identifier) if isinstance(identifier, dict): return deserialize(identifier) elif callable(identifier): return identifier else: raise ValueError( 'Could not interpret loss function identifier: {}'.format(identifier)) LABEL_DTYPES_FOR_LOSSES = { losses_impl.sparse_softmax_cross_entropy: 'int32', sparse_categorical_crossentropy: 'int32' }
codeparrot/github-code-clean
# # Cython/Python language types # from __future__ import absolute_import import copy import re try: reduce except NameError: from functools import reduce from .Code import UtilityCode, LazyUtilityCode, TempitaUtilityCode from . import StringEncoding from . import Naming from .Errors import error class BaseType(object): # # Base class for all Cython types including pseudo-types. # List of attribute names of any subtypes subtypes = [] _empty_declaration = None def can_coerce_to_pyobject(self, env): return False def cast_code(self, expr_code): return "((%s)%s)" % (self.empty_declaration_code(), expr_code) def empty_declaration_code(self): if self._empty_declaration is None: self._empty_declaration = self.declaration_code('') return self._empty_declaration def specialization_name(self): # This is not entirely robust. safe = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789' all = [] for c in self.empty_declaration_code().replace("unsigned ", "unsigned_").replace("long long", "long_long").replace(" ", "__"): if c in safe: all.append(c) else: all.append('_%x_' % ord(c)) return ''.join(all) def base_declaration_code(self, base_code, entity_code): if entity_code: return "%s %s" % (base_code, entity_code) else: return base_code def __deepcopy__(self, memo): """ Types never need to be copied, if we do copy, Unfortunate Things Will Happen! """ return self def get_fused_types(self, result=None, seen=None, subtypes=None): subtypes = subtypes or self.subtypes if not subtypes: return None if result is None: result = [] seen = set() for attr in subtypes: list_or_subtype = getattr(self, attr) if list_or_subtype: if isinstance(list_or_subtype, BaseType): list_or_subtype.get_fused_types(result, seen) else: for subtype in list_or_subtype: subtype.get_fused_types(result, seen) return result def specialize_fused(self, env): if env.fused_to_specific: return self.specialize(env.fused_to_specific) return self @property def is_fused(self): """ Whether this type or any of its subtypes is a fused type """ # Add this indirection for the is_fused property to allow overriding # get_fused_types in subclasses. return self.get_fused_types() def deduce_template_params(self, actual): """ Deduce any template params in this (argument) type given the actual argument type. http://en.cppreference.com/w/cpp/language/function_template#Template_argument_deduction """ if self == actual: return {} else: return None def __lt__(self, other): """ For sorting. The sorting order should correspond to the preference of conversion from Python types. Override to provide something sensible. This is only implemented so that python 3 doesn't trip """ return id(type(self)) < id(type(other)) def py_type_name(self): """ Return the name of the Python type that can coerce to this type. """ def typeof_name(self): """ Return the string with which fused python functions can be indexed. """ if self.is_builtin_type or self.py_type_name() == 'object': index_name = self.py_type_name() else: index_name = str(self) return index_name def check_for_null_code(self, cname): """ Return the code for a NULL-check in case an UnboundLocalError should be raised if an entry of this type is referenced before assignment. Returns None if no check should be performed. """ return None def invalid_value(self): """ Returns the most invalid value an object of this type can assume as a C expression string. Returns None if no such value exists. """ class PyrexType(BaseType): # # Base class for all Cython types # # is_pyobject boolean Is a Python object type # is_extension_type boolean Is a Python extension type # is_final_type boolean Is a final extension type # is_numeric boolean Is a C numeric type # is_int boolean Is a C integer type # is_float boolean Is a C floating point type # is_complex boolean Is a C complex type # is_void boolean Is the C void type # is_array boolean Is a C array type # is_ptr boolean Is a C pointer type # is_null_ptr boolean Is the type of NULL # is_reference boolean Is a C reference type # is_const boolean Is a C const type. # is_cfunction boolean Is a C function type # is_struct_or_union boolean Is a C struct or union type # is_struct boolean Is a C struct type # is_enum boolean Is a C enum type # is_typedef boolean Is a typedef type # is_string boolean Is a C char * type # is_pyunicode_ptr boolean Is a C PyUNICODE * type # is_cpp_string boolean Is a C++ std::string type # is_unicode_char boolean Is either Py_UCS4 or Py_UNICODE # is_returncode boolean Is used only to signal exceptions # is_error boolean Is the dummy error type # is_buffer boolean Is buffer access type # has_attributes boolean Has C dot-selectable attributes # default_value string Initial value # entry Entry The Entry for this type # # declaration_code(entity_code, # for_display = 0, dll_linkage = None, pyrex = 0) # Returns a code fragment for the declaration of an entity # of this type, given a code fragment for the entity. # * If for_display, this is for reading by a human in an error # message; otherwise it must be valid C code. # * If dll_linkage is not None, it must be 'DL_EXPORT' or # 'DL_IMPORT', and will be added to the base type part of # the declaration. # * If pyrex = 1, this is for use in a 'cdef extern' # statement of a Cython include file. # # assignable_from(src_type) # Tests whether a variable of this type can be # assigned a value of type src_type. # # same_as(other_type) # Tests whether this type represents the same type # as other_type. # # as_argument_type(): # Coerces array and C function types into pointer type for use as # a formal argument type. # is_pyobject = 0 is_unspecified = 0 is_extension_type = 0 is_final_type = 0 is_builtin_type = 0 is_numeric = 0 is_int = 0 is_float = 0 is_complex = 0 is_void = 0 is_array = 0 is_ptr = 0 is_null_ptr = 0 is_reference = 0 is_const = 0 is_cfunction = 0 is_struct_or_union = 0 is_cpp_class = 0 is_cpp_string = 0 is_struct = 0 is_enum = 0 is_typedef = 0 is_string = 0 is_pyunicode_ptr = 0 is_unicode_char = 0 is_returncode = 0 is_error = 0 is_buffer = 0 is_ctuple = 0 is_memoryviewslice = 0 has_attributes = 0 default_value = "" def resolve(self): # If a typedef, returns the base type. return self def specialize(self, values): # TODO(danilo): Override wherever it makes sense. return self def literal_code(self, value): # Returns a C code fragment representing a literal # value of this type. return str(value) def __str__(self): return self.declaration_code("", for_display = 1).strip() def same_as(self, other_type, **kwds): return self.same_as_resolved_type(other_type.resolve(), **kwds) def same_as_resolved_type(self, other_type): return self == other_type or other_type is error_type def subtype_of(self, other_type): return self.subtype_of_resolved_type(other_type.resolve()) def subtype_of_resolved_type(self, other_type): return self.same_as(other_type) def assignable_from(self, src_type): return self.assignable_from_resolved_type(src_type.resolve()) def assignable_from_resolved_type(self, src_type): return self.same_as(src_type) def as_argument_type(self): return self def is_complete(self): # A type is incomplete if it is an unsized array, # a struct whose attributes are not defined, etc. return 1 def is_simple_buffer_dtype(self): return (self.is_int or self.is_float or self.is_complex or self.is_pyobject or self.is_extension_type or self.is_ptr) def struct_nesting_depth(self): # Returns the number levels of nested structs. This is # used for constructing a stack for walking the run-time # type information of the struct. return 1 def global_init_code(self, entry, code): # abstract pass def needs_nonecheck(self): return 0 def public_decl(base_code, dll_linkage): if dll_linkage: return "%s(%s)" % (dll_linkage, base_code) else: return base_code def create_typedef_type(name, base_type, cname, is_external=0): is_fused = base_type.is_fused if base_type.is_complex or is_fused: if is_external: if is_fused: msg = "Fused" else: msg = "Complex" raise ValueError("%s external typedefs not supported" % msg) return base_type else: return CTypedefType(name, base_type, cname, is_external) class CTypedefType(BaseType): # # Pseudo-type defined with a ctypedef statement in a # 'cdef extern from' block. # Delegates most attribute lookups to the base type. # (Anything not defined here or in the BaseType is delegated.) # # qualified_name string # typedef_name string # typedef_cname string # typedef_base_type PyrexType # typedef_is_external bool is_typedef = 1 typedef_is_external = 0 to_py_utility_code = None from_py_utility_code = None subtypes = ['typedef_base_type'] def __init__(self, name, base_type, cname, is_external=0): assert not base_type.is_complex self.typedef_name = name self.typedef_cname = cname self.typedef_base_type = base_type self.typedef_is_external = is_external def invalid_value(self): return self.typedef_base_type.invalid_value() def resolve(self): return self.typedef_base_type.resolve() def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if pyrex or for_display: base_code = self.typedef_name else: base_code = public_decl(self.typedef_cname, dll_linkage) return self.base_declaration_code(base_code, entity_code) def as_argument_type(self): return self def cast_code(self, expr_code): # If self is really an array (rather than pointer), we can't cast. # For example, the gmp mpz_t. if self.typedef_base_type.is_array: base_type = self.typedef_base_type.base_type return CPtrType(base_type).cast_code(expr_code) else: return BaseType.cast_code(self, expr_code) def __repr__(self): return "<CTypedefType %s>" % self.typedef_cname def __str__(self): return self.typedef_name def _create_utility_code(self, template_utility_code, template_function_name): type_name = type_identifier(self.typedef_cname) utility_code = template_utility_code.specialize( type = self.typedef_cname, TypeName = type_name) function_name = template_function_name % type_name return utility_code, function_name def create_to_py_utility_code(self, env): if self.typedef_is_external: if not self.to_py_utility_code: base_type = self.typedef_base_type if type(base_type) is CIntType: self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name() env.use_utility_code(TempitaUtilityCode.load_cached( "CIntToPy", "TypeConversion.c", context={"TYPE": self.empty_declaration_code(), "TO_PY_FUNCTION": self.to_py_function})) return True elif base_type.is_float: pass # XXX implement! elif base_type.is_complex: pass # XXX implement! pass if self.to_py_utility_code: env.use_utility_code(self.to_py_utility_code) return True # delegation return self.typedef_base_type.create_to_py_utility_code(env) def create_from_py_utility_code(self, env): if self.typedef_is_external: if not self.from_py_utility_code: base_type = self.typedef_base_type if type(base_type) is CIntType: self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name() env.use_utility_code(TempitaUtilityCode.load_cached( "CIntFromPy", "TypeConversion.c", context={"TYPE": self.empty_declaration_code(), "FROM_PY_FUNCTION": self.from_py_function})) return True elif base_type.is_float: pass # XXX implement! elif base_type.is_complex: pass # XXX implement! if self.from_py_utility_code: env.use_utility_code(self.from_py_utility_code) return True # delegation return self.typedef_base_type.create_from_py_utility_code(env) def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): if to_py_function is None: to_py_function = self.to_py_function return self.typedef_base_type.to_py_call_code( source_code, result_code, result_type, to_py_function) def from_py_call_code(self, source_code, result_code, error_pos, code, from_py_function=None, error_condition=None): if from_py_function is None: from_py_function = self.from_py_function if error_condition is None: error_condition = self.error_condition(result_code) return self.typedef_base_type.from_py_call_code( source_code, result_code, error_pos, code, from_py_function, error_condition) def overflow_check_binop(self, binop, env, const_rhs=False): env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) type = self.empty_declaration_code() name = self.specialization_name() if binop == "lshift": env.use_utility_code(TempitaUtilityCode.load_cached( "LeftShift", "Overflow.c", context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed})) else: if const_rhs: binop += "_const" _load_overflow_base(env) env.use_utility_code(TempitaUtilityCode.load_cached( "SizeCheck", "Overflow.c", context={'TYPE': type, 'NAME': name})) env.use_utility_code(TempitaUtilityCode.load_cached( "Binop", "Overflow.c", context={'TYPE': type, 'NAME': name, 'BINOP': binop})) return "__Pyx_%s_%s_checking_overflow" % (binop, name) def error_condition(self, result_code): if self.typedef_is_external: if self.exception_value: condition = "(%s == (%s)%s)" % ( result_code, self.typedef_cname, self.exception_value) if self.exception_check: condition += " && PyErr_Occurred()" return condition # delegation return self.typedef_base_type.error_condition(result_code) def __getattr__(self, name): return getattr(self.typedef_base_type, name) def py_type_name(self): return self.typedef_base_type.py_type_name() def can_coerce_to_pyobject(self, env): return self.typedef_base_type.can_coerce_to_pyobject(env) class MemoryViewSliceType(PyrexType): is_memoryviewslice = 1 has_attributes = 1 scope = None # These are special cased in Defnode from_py_function = None to_py_function = None exception_value = None exception_check = True subtypes = ['dtype'] def __init__(self, base_dtype, axes): """ MemoryViewSliceType(base, axes) Base is the C base type; axes is a list of (access, packing) strings, where access is one of 'full', 'direct' or 'ptr' and packing is one of 'contig', 'strided' or 'follow'. There is one (access, packing) tuple for each dimension. the access specifiers determine whether the array data contains pointers that need to be dereferenced along that axis when retrieving/setting: 'direct' -- No pointers stored in this dimension. 'ptr' -- Pointer stored in this dimension. 'full' -- Check along this dimension, don't assume either. the packing specifiers specify how the array elements are layed-out in memory. 'contig' -- The data are contiguous in memory along this dimension. At most one dimension may be specified as 'contig'. 'strided' -- The data aren't contiguous along this dimenison. 'follow' -- Used for C/Fortran contiguous arrays, a 'follow' dimension has its stride automatically computed from extents of the other dimensions to ensure C or Fortran memory layout. C-contiguous memory has 'direct' as the access spec, 'contig' as the *last* axis' packing spec and 'follow' for all other packing specs. Fortran-contiguous memory has 'direct' as the access spec, 'contig' as the *first* axis' packing spec and 'follow' for all other packing specs. """ from . import MemoryView self.dtype = base_dtype self.axes = axes self.ndim = len(axes) self.flags = MemoryView.get_buf_flags(self.axes) self.is_c_contig, self.is_f_contig = MemoryView.is_cf_contig(self.axes) assert not (self.is_c_contig and self.is_f_contig) self.mode = MemoryView.get_mode(axes) self.writable_needed = False if not self.dtype.is_fused: self.dtype_name = MemoryView.mangle_dtype_name(self.dtype) def __hash__(self): return hash(self.__class__) ^ hash(self.dtype) ^ hash(tuple(self.axes)) def __eq__(self, other): if isinstance(other, BaseType): return self.same_as_resolved_type(other) else: return False def same_as_resolved_type(self, other_type): return ((other_type.is_memoryviewslice and self.dtype.same_as(other_type.dtype) and self.axes == other_type.axes) or other_type is error_type) def needs_nonecheck(self): return True def is_complete(self): # incomplete since the underlying struct doesn't have a cython.memoryview object. return 0 def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): # XXX: we put these guards in for now... assert not pyrex assert not dll_linkage from . import MemoryView return self.base_declaration_code( MemoryView.memviewslice_cname, entity_code) def attributes_known(self): if self.scope is None: from . import Symtab self.scope = scope = Symtab.CClassScope( 'mvs_class_'+self.specialization_suffix(), None, visibility='extern') scope.parent_type = self scope.directives = {} scope.declare_var('_data', c_char_ptr_type, None, cname='data', is_cdef=1) return True def declare_attribute(self, attribute, env, pos): from . import MemoryView, Options scope = self.scope if attribute == 'shape': scope.declare_var('shape', c_array_type(c_py_ssize_t_type, Options.buffer_max_dims), pos, cname='shape', is_cdef=1) elif attribute == 'strides': scope.declare_var('strides', c_array_type(c_py_ssize_t_type, Options.buffer_max_dims), pos, cname='strides', is_cdef=1) elif attribute == 'suboffsets': scope.declare_var('suboffsets', c_array_type(c_py_ssize_t_type, Options.buffer_max_dims), pos, cname='suboffsets', is_cdef=1) elif attribute in ("copy", "copy_fortran"): ndim = len(self.axes) to_axes_c = [('direct', 'contig')] to_axes_f = [('direct', 'contig')] if ndim - 1: to_axes_c = [('direct', 'follow')]*(ndim-1) + to_axes_c to_axes_f = to_axes_f + [('direct', 'follow')]*(ndim-1) to_memview_c = MemoryViewSliceType(self.dtype, to_axes_c) to_memview_f = MemoryViewSliceType(self.dtype, to_axes_f) for to_memview, cython_name in [(to_memview_c, "copy"), (to_memview_f, "copy_fortran")]: entry = scope.declare_cfunction(cython_name, CFuncType(self, [CFuncTypeArg("memviewslice", self, None)]), pos=pos, defining=1, cname=MemoryView.copy_c_or_fortran_cname(to_memview)) #entry.utility_code_definition = \ env.use_utility_code(MemoryView.get_copy_new_utility(pos, self, to_memview)) MemoryView.use_cython_array_utility_code(env) elif attribute in ("is_c_contig", "is_f_contig"): # is_c_contig and is_f_contig functions for (c_or_f, cython_name) in (('c', 'is_c_contig'), ('f', 'is_f_contig')): is_contig_name = \ MemoryView.get_is_contig_func_name(c_or_f, self.ndim) cfunctype = CFuncType( return_type=c_bint_type, args=[CFuncTypeArg("memviewslice", self, None)], exception_value="-1", ) entry = scope.declare_cfunction(cython_name, cfunctype, pos=pos, defining=1, cname=is_contig_name) entry.utility_code_definition = MemoryView.get_is_contig_utility( attribute == 'is_c_contig', self.ndim) return True def specialization_name(self): return super(MemoryViewSliceType,self).specialization_name() \ + '_' + self.specialization_suffix() def specialization_suffix(self): return "%s_%s" % (self.axes_to_name(), self.dtype_name) def can_coerce_to_pyobject(self, env): return True def check_for_null_code(self, cname): return cname + '.memview' def create_from_py_utility_code(self, env): from . import MemoryView, Buffer # We don't have 'code', so use a LazyUtilityCode with a callback. def lazy_utility_callback(code): context['dtype_typeinfo'] = Buffer.get_type_information_cname(code, self.dtype) return TempitaUtilityCode.load( "ObjectToMemviewSlice", "MemoryView_C.c", context=context) env.use_utility_code(Buffer.acquire_utility_code) env.use_utility_code(MemoryView.memviewslice_init_code) env.use_utility_code(LazyUtilityCode(lazy_utility_callback)) if self.is_c_contig: c_or_f_flag = "__Pyx_IS_C_CONTIG" elif self.is_f_contig: c_or_f_flag = "__Pyx_IS_F_CONTIG" else: c_or_f_flag = "0" suffix = self.specialization_suffix() funcname = "__Pyx_PyObject_to_MemoryviewSlice_" + suffix context = dict( MemoryView.context, buf_flag = self.flags, ndim = self.ndim, axes_specs = ', '.join(self.axes_to_code()), dtype_typedecl = self.dtype.empty_declaration_code(), struct_nesting_depth = self.dtype.struct_nesting_depth(), c_or_f_flag = c_or_f_flag, funcname = funcname, ) self.from_py_function = funcname return True def create_to_py_utility_code(self, env): self._dtype_to_py_func, self._dtype_from_py_func = self.dtype_object_conversion_funcs(env) return True def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): assert self._dtype_to_py_func assert self._dtype_from_py_func to_py_func = "(PyObject *(*)(char *)) " + self._dtype_to_py_func from_py_func = "(int (*)(char *, PyObject *)) " + self._dtype_from_py_func tup = (result_code, source_code, self.ndim, to_py_func, from_py_func, self.dtype.is_pyobject) return "%s = __pyx_memoryview_fromslice(%s, %s, %s, %s, %d);" % tup def dtype_object_conversion_funcs(self, env): get_function = "__pyx_memview_get_%s" % self.dtype_name set_function = "__pyx_memview_set_%s" % self.dtype_name context = dict( get_function = get_function, set_function = set_function, ) if self.dtype.is_pyobject: utility_name = "MemviewObjectToObject" else: to_py = self.dtype.create_to_py_utility_code(env) from_py = self.dtype.create_from_py_utility_code(env) if not (to_py or from_py): return "NULL", "NULL" if not self.dtype.to_py_function: get_function = "NULL" if not self.dtype.from_py_function: set_function = "NULL" utility_name = "MemviewDtypeToObject" error_condition = (self.dtype.error_condition('value') or 'PyErr_Occurred()') context.update( to_py_function = self.dtype.to_py_function, from_py_function = self.dtype.from_py_function, dtype = self.dtype.empty_declaration_code(), error_condition = error_condition, ) utility = TempitaUtilityCode.load_cached( utility_name, "MemoryView_C.c", context=context) env.use_utility_code(utility) return get_function, set_function def axes_to_code(self): """Return a list of code constants for each axis""" from . import MemoryView d = MemoryView._spec_to_const return ["(%s | %s)" % (d[a], d[p]) for a, p in self.axes] def axes_to_name(self): """Return an abbreviated name for our axes""" from . import MemoryView d = MemoryView._spec_to_abbrev return "".join(["%s%s" % (d[a], d[p]) for a, p in self.axes]) def error_condition(self, result_code): return "!%s.memview" % result_code def __str__(self): from . import MemoryView axes_code_list = [] for idx, (access, packing) in enumerate(self.axes): flag = MemoryView.get_memoryview_flag(access, packing) if flag == "strided": axes_code_list.append(":") else: if flag == 'contiguous': have_follow = [p for a, p in self.axes[idx - 1:idx + 2] if p == 'follow'] if have_follow or self.ndim == 1: flag = '1' axes_code_list.append("::" + flag) if self.dtype.is_pyobject: dtype_name = self.dtype.name else: dtype_name = self.dtype return "%s[%s]" % (dtype_name, ", ".join(axes_code_list)) def specialize(self, values): """This does not validate the base type!!""" dtype = self.dtype.specialize(values) if dtype is not self.dtype: return MemoryViewSliceType(dtype, self.axes) return self def cast_code(self, expr_code): return expr_code class BufferType(BaseType): # # Delegates most attribute lookups to the base type. # (Anything not defined here or in the BaseType is delegated.) # # dtype PyrexType # ndim int # mode str # negative_indices bool # cast bool # is_buffer bool # writable bool is_buffer = 1 writable = True subtypes = ['dtype'] def __init__(self, base, dtype, ndim, mode, negative_indices, cast): self.base = base self.dtype = dtype self.ndim = ndim self.buffer_ptr_type = CPtrType(dtype) self.mode = mode self.negative_indices = negative_indices self.cast = cast def as_argument_type(self): return self def specialize(self, values): dtype = self.dtype.specialize(values) if dtype is not self.dtype: return BufferType(self.base, dtype, self.ndim, self.mode, self.negative_indices, self.cast) return self def __getattr__(self, name): return getattr(self.base, name) def __repr__(self): return "<BufferType %r>" % self.base def __str__(self): # avoid ', ', as fused functions split the signature string on ', ' cast_str = '' if self.cast: cast_str = ',cast=True' return "%s[%s,ndim=%d%s]" % (self.base, self.dtype, self.ndim, cast_str) def assignable_from(self, other_type): if other_type.is_buffer: return (self.same_as(other_type, compare_base=False) and self.base.assignable_from(other_type.base)) return self.base.assignable_from(other_type) def same_as(self, other_type, compare_base=True): if not other_type.is_buffer: return other_type.same_as(self.base) return (self.dtype.same_as(other_type.dtype) and self.ndim == other_type.ndim and self.mode == other_type.mode and self.cast == other_type.cast and (not compare_base or self.base.same_as(other_type.base))) class PyObjectType(PyrexType): # # Base class for all Python object types (reference-counted). # # buffer_defaults dict or None Default options for bu name = "object" is_pyobject = 1 default_value = "0" buffer_defaults = None is_extern = False is_subclassed = False is_gc_simple = False def __str__(self): return "Python object" def __repr__(self): return "<PyObjectType>" def can_coerce_to_pyobject(self, env): return True def default_coerced_ctype(self): """The default C type that this Python type coerces to, or None.""" return None def assignable_from(self, src_type): # except for pointers, conversion will be attempted return not src_type.is_ptr or src_type.is_string or src_type.is_pyunicode_ptr def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if pyrex or for_display: base_code = "object" else: base_code = public_decl("PyObject", dll_linkage) entity_code = "*%s" % entity_code return self.base_declaration_code(base_code, entity_code) def as_pyobject(self, cname): if (not self.is_complete()) or self.is_extension_type: return "(PyObject *)" + cname else: return cname def py_type_name(self): return "object" def __lt__(self, other): """ Make sure we sort highest, as instance checking on py_type_name ('object') is always true """ return False def global_init_code(self, entry, code): code.put_init_var_to_py_none(entry, nanny=False) def check_for_null_code(self, cname): return cname builtin_types_that_cannot_create_refcycles = set([ 'bool', 'int', 'long', 'float', 'complex', 'bytearray', 'bytes', 'unicode', 'str', 'basestring' ]) class BuiltinObjectType(PyObjectType): # objstruct_cname string Name of PyObject struct is_builtin_type = 1 has_attributes = 1 base_type = None module_name = '__builtin__' # fields that let it look like an extension type vtabslot_cname = None vtabstruct_cname = None vtabptr_cname = None typedef_flag = True is_external = True decl_type = 'PyObject' def __init__(self, name, cname, objstruct_cname=None): self.name = name self.cname = cname self.typeptr_cname = "(&%s)" % cname self.objstruct_cname = objstruct_cname self.is_gc_simple = name in builtin_types_that_cannot_create_refcycles if name == 'type': # Special case the type type, as many C API calls (and other # libraries) actually expect a PyTypeObject* for type arguments. self.decl_type = objstruct_cname def set_scope(self, scope): self.scope = scope if scope: scope.parent_type = self def __str__(self): return "%s object" % self.name def __repr__(self): return "<%s>"% self.cname def default_coerced_ctype(self): if self.name in ('bytes', 'bytearray'): return c_char_ptr_type elif self.name == 'bool': return c_bint_type elif self.name == 'float': return c_double_type return None def assignable_from(self, src_type): if isinstance(src_type, BuiltinObjectType): if self.name == 'basestring': return src_type.name in ('str', 'unicode', 'basestring') else: return src_type.name == self.name elif src_type.is_extension_type: # FIXME: This is an ugly special case that we currently # keep supporting. It allows users to specify builtin # types as external extension types, while keeping them # compatible with the real builtin types. We already # generate a warning for it. Big TODO: remove! return (src_type.module_name == '__builtin__' and src_type.name == self.name) else: return True def typeobj_is_available(self): return True def attributes_known(self): return True def subtype_of(self, type): return type.is_pyobject and type.assignable_from(self) def type_check_function(self, exact=True): type_name = self.name if type_name == 'str': type_check = 'PyString_Check' elif type_name == 'basestring': type_check = '__Pyx_PyBaseString_Check' elif type_name == 'bytearray': type_check = 'PyByteArray_Check' elif type_name == 'frozenset': type_check = 'PyFrozenSet_Check' else: type_check = 'Py%s_Check' % type_name.capitalize() if exact and type_name not in ('bool', 'slice'): type_check += 'Exact' return type_check def isinstance_code(self, arg): return '%s(%s)' % (self.type_check_function(exact=False), arg) def type_test_code(self, arg, notnone=False, exact=True): type_check = self.type_check_function(exact=exact) check = 'likely(%s(%s))' % (type_check, arg) if not notnone: check += '||((%s) == Py_None)' % arg if self.name == 'basestring': name = '(PY_MAJOR_VERSION < 3 ? "basestring" : "str")' space_for_name = 16 else: name = '"%s"' % self.name # avoid wasting too much space but limit number of different format strings space_for_name = (len(self.name) // 16 + 1) * 16 error = '(PyErr_Format(PyExc_TypeError, "Expected %%.%ds, got %%.200s", %s, Py_TYPE(%s)->tp_name), 0)' % ( space_for_name, name, arg) return check + '||' + error def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if pyrex or for_display: base_code = self.name else: base_code = public_decl(self.decl_type, dll_linkage) entity_code = "*%s" % entity_code return self.base_declaration_code(base_code, entity_code) def as_pyobject(self, cname): if self.decl_type == 'PyObject': return cname else: return "(PyObject *)" + cname def cast_code(self, expr_code, to_object_struct = False): return "((%s*)%s)" % ( to_object_struct and self.objstruct_cname or self.decl_type, # self.objstruct_cname may be None expr_code) def py_type_name(self): return self.name class PyExtensionType(PyObjectType): # # A Python extension type. # # name string # scope CClassScope Attribute namespace # visibility string # typedef_flag boolean # base_type PyExtensionType or None # module_name string or None Qualified name of defining module # objstruct_cname string Name of PyObject struct # objtypedef_cname string Name of PyObject struct typedef # typeobj_cname string or None C code fragment referring to type object # typeptr_cname string or None Name of pointer to external type object # vtabslot_cname string Name of C method table member # vtabstruct_cname string Name of C method table struct # vtabptr_cname string Name of pointer to C method table # vtable_cname string Name of C method table definition # defered_declarations [thunk] Used to declare class hierarchies in order is_extension_type = 1 has_attributes = 1 objtypedef_cname = None def __init__(self, name, typedef_flag, base_type, is_external=0): self.name = name self.scope = None self.typedef_flag = typedef_flag if base_type is not None: base_type.is_subclassed = True self.base_type = base_type self.module_name = None self.objstruct_cname = None self.typeobj_cname = None self.typeptr_cname = None self.vtabslot_cname = None self.vtabstruct_cname = None self.vtabptr_cname = None self.vtable_cname = None self.is_external = is_external self.defered_declarations = [] def set_scope(self, scope): self.scope = scope if scope: scope.parent_type = self def needs_nonecheck(self): return True def subtype_of_resolved_type(self, other_type): if other_type.is_extension_type or other_type.is_builtin_type: return self is other_type or ( self.base_type and self.base_type.subtype_of(other_type)) else: return other_type is py_object_type def typeobj_is_available(self): # Do we have a pointer to the type object? return self.typeptr_cname def typeobj_is_imported(self): # If we don't know the C name of the type object but we do # know which module it's defined in, it will be imported. return self.typeobj_cname is None and self.module_name is not None def assignable_from(self, src_type): if self == src_type: return True if isinstance(src_type, PyExtensionType): if src_type.base_type is not None: return self.assignable_from(src_type.base_type) if isinstance(src_type, BuiltinObjectType): # FIXME: This is an ugly special case that we currently # keep supporting. It allows users to specify builtin # types as external extension types, while keeping them # compatible with the real builtin types. We already # generate a warning for it. Big TODO: remove! return (self.module_name == '__builtin__' and self.name == src_type.name) return False def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0, deref = 0): if pyrex or for_display: base_code = self.name else: if self.typedef_flag: objstruct = self.objstruct_cname else: objstruct = "struct %s" % self.objstruct_cname base_code = public_decl(objstruct, dll_linkage) if deref: assert not entity_code else: entity_code = "*%s" % entity_code return self.base_declaration_code(base_code, entity_code) def type_test_code(self, py_arg, notnone=False): none_check = "((%s) == Py_None)" % py_arg type_check = "likely(__Pyx_TypeTest(%s, %s))" % ( py_arg, self.typeptr_cname) if notnone: return type_check else: return "likely(%s || %s)" % (none_check, type_check) def attributes_known(self): return self.scope is not None def __str__(self): return self.name def __repr__(self): return "<PyExtensionType %s%s>" % (self.scope.class_name, ("", " typedef")[self.typedef_flag]) def py_type_name(self): if not self.module_name: return self.name return "__import__(%r, None, None, ['']).%s" % (self.module_name, self.name) class CType(PyrexType): # # Base class for all C types (non-reference-counted). # # to_py_function string C function for converting to Python object # from_py_function string C function for constructing from Python object # to_py_function = None from_py_function = None exception_value = None exception_check = 1 def create_to_py_utility_code(self, env): return self.to_py_function is not None def create_from_py_utility_code(self, env): return self.from_py_function is not None def can_coerce_to_pyobject(self, env): return self.create_to_py_utility_code(env) def error_condition(self, result_code): conds = [] if self.is_string or self.is_pyunicode_ptr: conds.append("(!%s)" % result_code) elif self.exception_value is not None: conds.append("(%s == (%s)%s)" % (result_code, self.sign_and_name(), self.exception_value)) if self.exception_check: conds.append("PyErr_Occurred()") if len(conds) > 0: return " && ".join(conds) else: return 0 def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): func = self.to_py_function if to_py_function is None else to_py_function assert func if self.is_string or self.is_cpp_string: if result_type.is_builtin_type: result_type_name = result_type.name if result_type_name in ('bytes', 'str', 'unicode'): func = func.replace("Object", result_type_name.title(), 1) elif result_type_name == 'bytearray': func = func.replace("Object", "ByteArray", 1) return '%s = %s(%s)' % ( result_code, func, source_code or 'NULL') def from_py_call_code(self, source_code, result_code, error_pos, code, from_py_function=None, error_condition=None): return '%s = %s(%s); %s' % ( result_code, from_py_function or self.from_py_function, source_code, code.error_goto_if(error_condition or self.error_condition(result_code), error_pos)) class CConstType(BaseType): is_const = 1 def __init__(self, const_base_type): self.const_base_type = const_base_type if const_base_type.has_attributes and const_base_type.scope is not None: from . import Symtab self.scope = Symtab.CConstScope(const_base_type.scope) def __repr__(self): return "<CConstType %s>" % repr(self.const_base_type) def __str__(self): return self.declaration_code("", for_display=1) def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if for_display or pyrex: return "const " + self.const_base_type.declaration_code(entity_code, for_display, dll_linkage, pyrex) else: return self.const_base_type.declaration_code("const %s" % entity_code, for_display, dll_linkage, pyrex) def specialize(self, values): base_type = self.const_base_type.specialize(values) if base_type == self.const_base_type: return self else: return CConstType(base_type) def deduce_template_params(self, actual): return self.const_base_type.deduce_template_params(actual) def can_coerce_to_pyobject(self, env): return self.const_base_type.can_coerce_to_pyobject(env) def create_to_py_utility_code(self, env): if self.const_base_type.create_to_py_utility_code(env): self.to_py_function = self.const_base_type.to_py_function return True def __getattr__(self, name): return getattr(self.const_base_type, name) class FusedType(CType): """ Represents a Fused Type. All it needs to do is keep track of the types it aggregates, as it will be replaced with its specific version wherever needed. See http://wiki.cython.org/enhancements/fusedtypes types [PyrexType] is the list of types to be fused name str the name of the ctypedef """ is_fused = 1 exception_check = 0 def __init__(self, types, name=None): # Use list rather than set to preserve order (list should be short). flattened_types = [] for t in types: if t.is_fused: # recursively merge in subtypes for subtype in t.types: if subtype not in flattened_types: flattened_types.append(subtype) elif t not in flattened_types: flattened_types.append(t) self.types = flattened_types self.name = name def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if pyrex or for_display: return self.name raise Exception("This may never happen, please report a bug") def __repr__(self): return 'FusedType(name=%r)' % self.name def specialize(self, values): return values[self] def get_fused_types(self, result=None, seen=None): if result is None: return [self] if self not in seen: result.append(self) seen.add(self) class CVoidType(CType): # # C "void" type # is_void = 1 to_py_function = "__Pyx_void_to_None" def __repr__(self): return "<CVoidType>" def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if pyrex or for_display: base_code = "void" else: base_code = public_decl("void", dll_linkage) return self.base_declaration_code(base_code, entity_code) def is_complete(self): return 0 class InvisibleVoidType(CVoidType): # # For use with C++ constructors and destructors return types. # Acts like void, but does not print out a declaration. # def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if pyrex or for_display: base_code = "[void]" else: base_code = public_decl("", dll_linkage) return self.base_declaration_code(base_code, entity_code) class CNumericType(CType): # # Base class for all C numeric types. # # rank integer Relative size # signed integer 0 = unsigned, 1 = unspecified, 2 = explicitly signed # is_numeric = 1 default_value = "0" has_attributes = True scope = None sign_words = ("unsigned ", "", "signed ") def __init__(self, rank, signed = 1): self.rank = rank if rank > 0 and signed == SIGNED: # Signed is meaningless for anything but char, and complicates # type promotion. signed = 1 self.signed = signed def sign_and_name(self): s = self.sign_words[self.signed] n = rank_to_type_name[self.rank] return s + n def __repr__(self): return "<CNumericType %s>" % self.sign_and_name() def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): type_name = self.sign_and_name() if pyrex or for_display: base_code = type_name.replace('PY_LONG_LONG', 'long long') else: base_code = public_decl(type_name, dll_linkage) return self.base_declaration_code(base_code, entity_code) def attributes_known(self): if self.scope is None: from . import Symtab self.scope = scope = Symtab.CClassScope( '', None, visibility="extern") scope.parent_type = self scope.directives = {} scope.declare_cfunction( "conjugate", CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True), pos=None, defining=1, cname=" ") return True def __lt__(self, other): """Sort based on rank, preferring signed over unsigned""" if other.is_numeric: return self.rank > other.rank and self.signed >= other.signed # Prefer numeric types over others return True def py_type_name(self): if self.rank <= 4: return "(int, long)" return "float" class ForbidUseClass: def __repr__(self): raise RuntimeError() def __str__(self): raise RuntimeError() ForbidUse = ForbidUseClass() class CIntType(CNumericType): is_int = 1 typedef_flag = 0 to_py_function = None from_py_function = None exception_value = -1 def can_coerce_to_pyobject(self, env): return True def create_to_py_utility_code(self, env): if type(self).to_py_function is None: self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name() env.use_utility_code(TempitaUtilityCode.load_cached( "CIntToPy", "TypeConversion.c", context={"TYPE": self.empty_declaration_code(), "TO_PY_FUNCTION": self.to_py_function})) return True def create_from_py_utility_code(self, env): if type(self).from_py_function is None: self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name() env.use_utility_code(TempitaUtilityCode.load_cached( "CIntFromPy", "TypeConversion.c", context={"TYPE": self.empty_declaration_code(), "FROM_PY_FUNCTION": self.from_py_function})) return True def get_to_py_type_conversion(self): if self.rank < list(rank_to_type_name).index('int'): # This assumes sizeof(short) < sizeof(int) return "PyInt_FromLong" else: # Py{Int|Long}_From[Unsigned]Long[Long] Prefix = "Int" SignWord = "" TypeName = "Long" if not self.signed: Prefix = "Long" SignWord = "Unsigned" if self.rank >= list(rank_to_type_name).index('PY_LONG_LONG'): Prefix = "Long" TypeName = "LongLong" return "Py%s_From%s%s" % (Prefix, SignWord, TypeName) def assignable_from_resolved_type(self, src_type): return src_type.is_int or src_type.is_enum or src_type is error_type def invalid_value(self): if rank_to_type_name[int(self.rank)] == 'char': return "'?'" else: # We do not really know the size of the type, so return # a 32-bit literal and rely on casting to final type. It will # be negative for signed ints, which is good. return "0xbad0bad0" def overflow_check_binop(self, binop, env, const_rhs=False): env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) type = self.empty_declaration_code() name = self.specialization_name() if binop == "lshift": env.use_utility_code(TempitaUtilityCode.load_cached( "LeftShift", "Overflow.c", context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed})) else: if const_rhs: binop += "_const" if type in ('int', 'long', 'long long'): env.use_utility_code(TempitaUtilityCode.load_cached( "BaseCaseSigned", "Overflow.c", context={'INT': type, 'NAME': name})) elif type in ('unsigned int', 'unsigned long', 'unsigned long long'): env.use_utility_code(TempitaUtilityCode.load_cached( "BaseCaseUnsigned", "Overflow.c", context={'UINT': type, 'NAME': name})) elif self.rank <= 1: # sizeof(short) < sizeof(int) return "__Pyx_%s_%s_no_overflow" % (binop, name) else: _load_overflow_base(env) env.use_utility_code(TempitaUtilityCode.load_cached( "SizeCheck", "Overflow.c", context={'TYPE': type, 'NAME': name})) env.use_utility_code(TempitaUtilityCode.load_cached( "Binop", "Overflow.c", context={'TYPE': type, 'NAME': name, 'BINOP': binop})) return "__Pyx_%s_%s_checking_overflow" % (binop, name) def _load_overflow_base(env): env.use_utility_code(UtilityCode.load("Common", "Overflow.c")) for type in ('int', 'long', 'long long'): env.use_utility_code(TempitaUtilityCode.load_cached( "BaseCaseSigned", "Overflow.c", context={'INT': type, 'NAME': type.replace(' ', '_')})) for type in ('unsigned int', 'unsigned long', 'unsigned long long'): env.use_utility_code(TempitaUtilityCode.load_cached( "BaseCaseUnsigned", "Overflow.c", context={'UINT': type, 'NAME': type.replace(' ', '_')})) class CAnonEnumType(CIntType): is_enum = 1 def sign_and_name(self): return 'int' class CReturnCodeType(CIntType): to_py_function = "__Pyx_Owned_Py_None" is_returncode = True exception_check = False class CBIntType(CIntType): to_py_function = "__Pyx_PyBool_FromLong" from_py_function = "__Pyx_PyObject_IsTrue" exception_check = 1 # for C++ bool def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if for_display: base_code = 'bool' elif pyrex: base_code = 'bint' else: base_code = public_decl('int', dll_linkage) return self.base_declaration_code(base_code, entity_code) def __repr__(self): return "<CNumericType bint>" def __str__(self): return 'bint' def py_type_name(self): return "bool" class CPyUCS4IntType(CIntType): # Py_UCS4 is_unicode_char = True # Py_UCS4 coerces from and to single character unicode strings (or # at most two characters on 16bit Unicode builds), but we also # allow Python integers as input. The value range for Py_UCS4 # is 0..1114111, which is checked when converting from an integer # value. to_py_function = "PyUnicode_FromOrdinal" from_py_function = "__Pyx_PyObject_AsPy_UCS4" def create_from_py_utility_code(self, env): env.use_utility_code(UtilityCode.load_cached("ObjectAsUCS4", "TypeConversion.c")) return True def sign_and_name(self): return "Py_UCS4" class CPyUnicodeIntType(CIntType): # Py_UNICODE is_unicode_char = True # Py_UNICODE coerces from and to single character unicode strings, # but we also allow Python integers as input. The value range for # Py_UNICODE is 0..1114111, which is checked when converting from # an integer value. to_py_function = "PyUnicode_FromOrdinal" from_py_function = "__Pyx_PyObject_AsPy_UNICODE" def create_from_py_utility_code(self, env): env.use_utility_code(UtilityCode.load_cached("ObjectAsPyUnicode", "TypeConversion.c")) return True def sign_and_name(self): return "Py_UNICODE" class CPyHashTType(CIntType): to_py_function = "__Pyx_PyInt_FromHash_t" from_py_function = "__Pyx_PyInt_AsHash_t" def sign_and_name(self): return "Py_hash_t" class CPySSizeTType(CIntType): to_py_function = "PyInt_FromSsize_t" from_py_function = "__Pyx_PyIndex_AsSsize_t" def sign_and_name(self): return "Py_ssize_t" class CSSizeTType(CIntType): to_py_function = "PyInt_FromSsize_t" from_py_function = "PyInt_AsSsize_t" def sign_and_name(self): return "Py_ssize_t" class CSizeTType(CIntType): to_py_function = "__Pyx_PyInt_FromSize_t" def sign_and_name(self): return "size_t" class CPtrdiffTType(CIntType): def sign_and_name(self): return "ptrdiff_t" class CFloatType(CNumericType): is_float = 1 to_py_function = "PyFloat_FromDouble" from_py_function = "__pyx_PyFloat_AsDouble" exception_value = -1 def __init__(self, rank, math_h_modifier = ''): CNumericType.__init__(self, rank, 1) self.math_h_modifier = math_h_modifier if rank == RANK_FLOAT: self.from_py_function = "__pyx_PyFloat_AsFloat" def assignable_from_resolved_type(self, src_type): return (src_type.is_numeric and not src_type.is_complex) or src_type is error_type def invalid_value(self): return Naming.PYX_NAN class CComplexType(CNumericType): is_complex = 1 to_py_function = "__pyx_PyComplex_FromComplex" has_attributes = 1 scope = None def __init__(self, real_type): while real_type.is_typedef and not real_type.typedef_is_external: real_type = real_type.typedef_base_type if real_type.is_typedef and real_type.typedef_is_external: # The below is not actually used: Coercions are currently disabled # so that complex types of external types can not be created self.funcsuffix = "_%s" % real_type.specialization_name() elif hasattr(real_type, 'math_h_modifier'): self.funcsuffix = real_type.math_h_modifier else: self.funcsuffix = "_%s" % real_type.specialization_name() self.real_type = real_type CNumericType.__init__(self, real_type.rank + 0.5, real_type.signed) self.binops = {} self.from_parts = "%s_from_parts" % self.specialization_name() self.default_value = "%s(0, 0)" % self.from_parts def __eq__(self, other): if isinstance(self, CComplexType) and isinstance(other, CComplexType): return self.real_type == other.real_type else: return False def __ne__(self, other): if isinstance(self, CComplexType) and isinstance(other, CComplexType): return self.real_type != other.real_type else: return True def __lt__(self, other): if isinstance(self, CComplexType) and isinstance(other, CComplexType): return self.real_type < other.real_type else: # this is arbitrary, but it makes sure we always have # *some* kind of order return False def __hash__(self): return ~hash(self.real_type) def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if pyrex or for_display: real_code = self.real_type.declaration_code("", for_display, dll_linkage, pyrex) base_code = "%s complex" % real_code else: base_code = public_decl(self.sign_and_name(), dll_linkage) return self.base_declaration_code(base_code, entity_code) def sign_and_name(self): real_type_name = self.real_type.specialization_name() real_type_name = real_type_name.replace('long__double','long_double') real_type_name = real_type_name.replace('PY_LONG_LONG','long_long') return Naming.type_prefix + real_type_name + "_complex" def assignable_from(self, src_type): # Temporary hack/feature disabling, see #441 if (not src_type.is_complex and src_type.is_numeric and src_type.is_typedef and src_type.typedef_is_external): return False else: return super(CComplexType, self).assignable_from(src_type) def assignable_from_resolved_type(self, src_type): return (src_type.is_complex and self.real_type.assignable_from_resolved_type(src_type.real_type) or src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type) or src_type is error_type) def attributes_known(self): if self.scope is None: from . import Symtab self.scope = scope = Symtab.CClassScope( '', None, visibility="extern") scope.parent_type = self scope.directives = {} scope.declare_var("real", self.real_type, None, cname="real", is_cdef=True) scope.declare_var("imag", self.real_type, None, cname="imag", is_cdef=True) scope.declare_cfunction( "conjugate", CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True), pos=None, defining=1, cname="__Pyx_c_conj%s" % self.funcsuffix) return True def create_declaration_utility_code(self, env): # This must always be run, because a single CComplexType instance can be shared # across multiple compilations (the one created in the module scope) env.use_utility_code(complex_header_utility_code) env.use_utility_code(complex_real_imag_utility_code) for utility_code in (complex_type_utility_code, complex_from_parts_utility_code, complex_arithmetic_utility_code): env.use_utility_code( utility_code.specialize( self, real_type = self.real_type.empty_declaration_code(), m = self.funcsuffix, is_float = self.real_type.is_float)) return True def can_coerce_to_pyobject(self, env): return True def create_to_py_utility_code(self, env): env.use_utility_code(complex_real_imag_utility_code) env.use_utility_code(complex_to_py_utility_code) return True def create_from_py_utility_code(self, env): self.real_type.create_from_py_utility_code(env) for utility_code in (complex_from_parts_utility_code, complex_from_py_utility_code): env.use_utility_code( utility_code.specialize( self, real_type = self.real_type.empty_declaration_code(), m = self.funcsuffix, is_float = self.real_type.is_float)) self.from_py_function = "__Pyx_PyComplex_As_" + self.specialization_name() return True def lookup_op(self, nargs, op): try: return self.binops[nargs, op] except KeyError: pass try: op_name = complex_ops[nargs, op] self.binops[nargs, op] = func_name = "__Pyx_c_%s%s" % (op_name, self.funcsuffix) return func_name except KeyError: return None def unary_op(self, op): return self.lookup_op(1, op) def binary_op(self, op): return self.lookup_op(2, op) def py_type_name(self): return "complex" def cast_code(self, expr_code): return expr_code complex_ops = { (1, '-'): 'neg', (1, 'zero'): 'is_zero', (2, '+'): 'sum', (2, '-'): 'diff', (2, '*'): 'prod', (2, '/'): 'quot', (2, '=='): 'eq', } complex_header_utility_code = UtilityCode( proto_block='h_code', proto=""" #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif """) complex_real_imag_utility_code = UtilityCode( proto=""" #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif """) complex_type_utility_code = UtilityCode( proto_block='complex_type_declarations', proto=""" #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< %(real_type)s > %(type_name)s; #else typedef %(real_type)s _Complex %(type_name)s; #endif #else typedef struct { %(real_type)s real, imag; } %(type_name)s; #endif """) complex_from_parts_utility_code = UtilityCode( proto_block='utility_code_proto', proto=""" static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s, %(real_type)s); """, impl=""" #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s x, %(real_type)s y) { return ::std::complex< %(real_type)s >(x, y); } #else static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s x, %(real_type)s y) { return x + y*(%(type)s)_Complex_I; } #endif #else static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s x, %(real_type)s y) { %(type)s z; z.real = x; z.imag = y; return z; } #endif """) complex_to_py_utility_code = UtilityCode( proto=""" #define __pyx_PyComplex_FromComplex(z) \\ PyComplex_FromDoubles((double)__Pyx_CREAL(z), \\ (double)__Pyx_CIMAG(z)) """) complex_from_py_utility_code = UtilityCode( proto=""" static %(type)s __Pyx_PyComplex_As_%(type_name)s(PyObject*); """, impl=""" static %(type)s __Pyx_PyComplex_As_%(type_name)s(PyObject* o) { Py_complex cval; #if CYTHON_COMPILING_IN_CPYTHON if (PyComplex_CheckExact(o)) cval = ((PyComplexObject *)o)->cval; else #endif cval = PyComplex_AsCComplex(o); return %(type_name)s_from_parts( (%(real_type)s)cval.real, (%(real_type)s)cval.imag); } """) complex_arithmetic_utility_code = UtilityCode( proto=""" #if CYTHON_CCOMPLEX #define __Pyx_c_eq%(m)s(a, b) ((a)==(b)) #define __Pyx_c_sum%(m)s(a, b) ((a)+(b)) #define __Pyx_c_diff%(m)s(a, b) ((a)-(b)) #define __Pyx_c_prod%(m)s(a, b) ((a)*(b)) #define __Pyx_c_quot%(m)s(a, b) ((a)/(b)) #define __Pyx_c_neg%(m)s(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero%(m)s(z) ((z)==(%(real_type)s)0) #define __Pyx_c_conj%(m)s(z) (::std::conj(z)) #if %(is_float)s #define __Pyx_c_abs%(m)s(z) (::std::abs(z)) #define __Pyx_c_pow%(m)s(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero%(m)s(z) ((z)==0) #define __Pyx_c_conj%(m)s(z) (conj%(m)s(z)) #if %(is_float)s #define __Pyx_c_abs%(m)s(z) (cabs%(m)s(z)) #define __Pyx_c_pow%(m)s(a, b) (cpow%(m)s(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq%(m)s(%(type)s, %(type)s); static CYTHON_INLINE %(type)s __Pyx_c_sum%(m)s(%(type)s, %(type)s); static CYTHON_INLINE %(type)s __Pyx_c_diff%(m)s(%(type)s, %(type)s); static CYTHON_INLINE %(type)s __Pyx_c_prod%(m)s(%(type)s, %(type)s); static CYTHON_INLINE %(type)s __Pyx_c_quot%(m)s(%(type)s, %(type)s); static CYTHON_INLINE %(type)s __Pyx_c_neg%(m)s(%(type)s); static CYTHON_INLINE int __Pyx_c_is_zero%(m)s(%(type)s); static CYTHON_INLINE %(type)s __Pyx_c_conj%(m)s(%(type)s); #if %(is_float)s static CYTHON_INLINE %(real_type)s __Pyx_c_abs%(m)s(%(type)s); static CYTHON_INLINE %(type)s __Pyx_c_pow%(m)s(%(type)s, %(type)s); #endif #endif """, impl=""" #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq%(m)s(%(type)s a, %(type)s b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE %(type)s __Pyx_c_sum%(m)s(%(type)s a, %(type)s b) { %(type)s z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE %(type)s __Pyx_c_diff%(m)s(%(type)s a, %(type)s b) { %(type)s z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE %(type)s __Pyx_c_prod%(m)s(%(type)s a, %(type)s b) { %(type)s z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE %(type)s __Pyx_c_quot%(m)s(%(type)s a, %(type)s b) { %(type)s z; %(real_type)s denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE %(type)s __Pyx_c_neg%(m)s(%(type)s a) { %(type)s z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero%(m)s(%(type)s a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE %(type)s __Pyx_c_conj%(m)s(%(type)s a) { %(type)s z; z.real = a.real; z.imag = -a.imag; return z; } #if %(is_float)s static CYTHON_INLINE %(real_type)s __Pyx_c_abs%(m)s(%(type)s z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt%(m)s(z.real*z.real + z.imag*z.imag); #else return hypot%(m)s(z.real, z.imag); #endif } static CYTHON_INLINE %(type)s __Pyx_c_pow%(m)s(%(type)s a, %(type)s b) { %(type)s z; %(real_type)s r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { %(real_type)s denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod%(m)s(a, a); return __Pyx_c_prod%(m)s(a, a); case 3: z = __Pyx_c_prod%(m)s(a, a); return __Pyx_c_prod%(m)s(z, a); case 4: z = __Pyx_c_prod%(m)s(a, a); return __Pyx_c_prod%(m)s(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs%(m)s(a); theta = atan2%(m)s(a.imag, a.real); } lnr = log%(m)s(r); z_r = exp%(m)s(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos%(m)s(z_theta); z.imag = z_r * sin%(m)s(z_theta); return z; } #endif #endif """) class CPointerBaseType(CType): # common base type for pointer/array types # # base_type CType Reference type subtypes = ['base_type'] def __init__(self, base_type): self.base_type = base_type for char_type in (c_char_type, c_uchar_type, c_schar_type): if base_type.same_as(char_type): self.is_string = 1 break else: if base_type.same_as(c_py_unicode_type): self.is_pyunicode_ptr = 1 if self.is_string and not base_type.is_error: if base_type.signed == 2: self.to_py_function = "__Pyx_PyObject_FromCString" if self.is_ptr: self.from_py_function = "__Pyx_PyObject_AsSString" elif base_type.signed: self.to_py_function = "__Pyx_PyObject_FromString" if self.is_ptr: self.from_py_function = "__Pyx_PyObject_AsString" else: self.to_py_function = "__Pyx_PyObject_FromCString" if self.is_ptr: self.from_py_function = "__Pyx_PyObject_AsUString" self.exception_value = "NULL" elif self.is_pyunicode_ptr and not base_type.is_error: self.to_py_function = "__Pyx_PyUnicode_FromUnicode" if self.is_ptr: self.from_py_function = "__Pyx_PyUnicode_AsUnicode" self.exception_value = "NULL" def py_type_name(self): if self.is_string: return "bytes" elif self.is_pyunicode_ptr: return "unicode" else: return super(CPointerBaseType, self).py_type_name() def literal_code(self, value): if self.is_string: assert isinstance(value, str) return '"%s"' % StringEncoding.escape_byte_string(value) class CArrayType(CPointerBaseType): # base_type CType Element type # size integer or None Number of elements is_array = 1 to_tuple_function = None def __init__(self, base_type, size): super(CArrayType, self).__init__(base_type) self.size = size def __eq__(self, other): if isinstance(other, CType) and other.is_array and self.size == other.size: return self.base_type.same_as(other.base_type) return False def __hash__(self): return hash(self.base_type) + 28 # arbitrarily chosen offset def __repr__(self): return "<CArrayType %s %s>" % (self.size, repr(self.base_type)) def same_as_resolved_type(self, other_type): return ((other_type.is_array and self.base_type.same_as(other_type.base_type)) or other_type is error_type) def assignable_from_resolved_type(self, src_type): # C arrays are assigned by value, either Python containers or C arrays/pointers if src_type.is_pyobject: return True if src_type.is_ptr or src_type.is_array: return self.base_type.assignable_from(src_type.base_type) return False def element_ptr_type(self): return c_ptr_type(self.base_type) def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if self.size is not None: dimension_code = self.size else: dimension_code = "" if entity_code.startswith("*"): entity_code = "(%s)" % entity_code return self.base_type.declaration_code( "%s[%s]" % (entity_code, dimension_code), for_display, dll_linkage, pyrex) def as_argument_type(self): return c_ptr_type(self.base_type) def is_complete(self): return self.size is not None def specialize(self, values): base_type = self.base_type.specialize(values) if base_type == self.base_type: return self else: return CArrayType(base_type, self.size) def deduce_template_params(self, actual): if isinstance(actual, CArrayType): return self.base_type.deduce_template_params(actual.base_type) else: return None def can_coerce_to_pyobject(self, env): return self.base_type.can_coerce_to_pyobject(env) def create_to_py_utility_code(self, env): if self.to_py_function is not None: return self.to_py_function if not self.base_type.create_to_py_utility_code(env): return False base_type = self.base_type.declaration_code("", pyrex=1) safe_typename = re.sub('[^a-zA-Z0-9]', '__', base_type) to_py_function = "__Pyx_carray_to_py_%s" % safe_typename to_tuple_function = "__Pyx_carray_to_tuple_%s" % safe_typename from .UtilityCode import CythonUtilityCode context = { 'cname': to_py_function, 'to_tuple_cname': to_tuple_function, 'base_type': base_type, } env.use_utility_code(CythonUtilityCode.load( "carray.to_py", "CConvert.pyx", outer_module_scope=env.global_scope(), # need access to types declared in module context=context, compiler_directives=dict(env.global_scope().directives))) self.to_tuple_function = to_tuple_function self.to_py_function = to_py_function return True def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None): func = self.to_py_function if to_py_function is None else to_py_function if self.is_string or self.is_pyunicode_ptr: return '%s = %s(%s)' % ( result_code, func, source_code) target_is_tuple = result_type.is_builtin_type and result_type.name == 'tuple' return '%s = %s(%s, %s)' % ( result_code, self.to_tuple_function if target_is_tuple else func, source_code, self.size) def create_from_py_utility_code(self, env): if self.from_py_function is not None: return self.from_py_function if not self.base_type.create_from_py_utility_code(env): return False base_type = self.base_type.declaration_code("", pyrex=1) safe_typename = re.sub('[^a-zA-Z0-9]', '__', base_type) from_py_function = "__Pyx_carray_from_py_%s" % safe_typename from .UtilityCode import CythonUtilityCode context = { 'cname': from_py_function, 'base_type': base_type, } env.use_utility_code(CythonUtilityCode.load( "carray.from_py", "CConvert.pyx", outer_module_scope=env.global_scope(), # need access to types declared in module context=context, compiler_directives=dict(env.global_scope().directives))) self.from_py_function = from_py_function return True def from_py_call_code(self, source_code, result_code, error_pos, code, from_py_function=None, error_condition=None): call_code = "%s(%s, %s, %s)" % ( from_py_function or self.from_py_function, source_code, result_code, self.size) return code.error_goto_if_neg(call_code, error_pos) class CPtrType(CPointerBaseType): # base_type CType Reference type is_ptr = 1 default_value = "0" def __hash__(self): return hash(self.base_type) + 27 # arbitrarily chosen offset def __eq__(self, other): if isinstance(other, CType) and other.is_ptr: return self.base_type.same_as(other.base_type) return False def __ne__(self, other): return not (self == other) def __repr__(self): return "<CPtrType %s>" % repr(self.base_type) def same_as_resolved_type(self, other_type): return ((other_type.is_ptr and self.base_type.same_as(other_type.base_type)) or other_type is error_type) def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): #print "CPtrType.declaration_code: pointer to", self.base_type ### return self.base_type.declaration_code( "*%s" % entity_code, for_display, dll_linkage, pyrex) def assignable_from_resolved_type(self, other_type): if other_type is error_type: return 1 if other_type.is_null_ptr: return 1 if self.base_type.is_const: self = CPtrType(self.base_type.const_base_type) if self.base_type.is_cfunction: if other_type.is_ptr: other_type = other_type.base_type.resolve() if other_type.is_cfunction: return self.base_type.pointer_assignable_from_resolved_type(other_type) else: return 0 if (self.base_type.is_cpp_class and other_type.is_ptr and other_type.base_type.is_cpp_class and other_type.base_type.is_subclass(self.base_type)): return 1 if other_type.is_array or other_type.is_ptr: return self.base_type.is_void or self.base_type.same_as(other_type.base_type) return 0 def specialize(self, values): base_type = self.base_type.specialize(values) if base_type == self.base_type: return self else: return CPtrType(base_type) def deduce_template_params(self, actual): if isinstance(actual, CPtrType): return self.base_type.deduce_template_params(actual.base_type) else: return None def invalid_value(self): return "1" def find_cpp_operation_type(self, operator, operand_type=None): if self.base_type.is_cpp_class: return self.base_type.find_cpp_operation_type(operator, operand_type) return None class CNullPtrType(CPtrType): is_null_ptr = 1 class CReferenceType(BaseType): is_reference = 1 is_fake_reference = 0 def __init__(self, base_type): self.ref_base_type = base_type def __repr__(self): return "<CReferenceType %s>" % repr(self.ref_base_type) def __str__(self): return "%s &" % self.ref_base_type def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): #print "CReferenceType.declaration_code: pointer to", self.base_type ### return self.ref_base_type.declaration_code( "&%s" % entity_code, for_display, dll_linkage, pyrex) def specialize(self, values): base_type = self.ref_base_type.specialize(values) if base_type == self.ref_base_type: return self else: return type(self)(base_type) def deduce_template_params(self, actual): return self.ref_base_type.deduce_template_params(actual) def __getattr__(self, name): return getattr(self.ref_base_type, name) class CFakeReferenceType(CReferenceType): is_fake_reference = 1 def __repr__(self): return "<CFakeReferenceType %s>" % repr(self.ref_base_type) def __str__(self): return "%s [&]" % self.ref_base_type def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): #print "CReferenceType.declaration_code: pointer to", self.base_type ### return "__Pyx_FakeReference<%s> %s" % (self.ref_base_type.empty_declaration_code(), entity_code) class CFuncType(CType): # return_type CType # args [CFuncTypeArg] # has_varargs boolean # exception_value string # exception_check boolean True if PyErr_Occurred check needed # calling_convention string Function calling convention # nogil boolean Can be called without gil # with_gil boolean Acquire gil around function body # templates [string] or None # cached_specialized_types [CFuncType] cached specialized versions of the CFuncType if defined in a pxd # from_fused boolean Indicates whether this is a specialized # C function # is_strict_signature boolean function refuses to accept coerced arguments # (used for optimisation overrides) # is_const_method boolean # is_static_method boolean is_cfunction = 1 original_sig = None cached_specialized_types = None from_fused = False is_const_method = False subtypes = ['return_type', 'args'] def __init__(self, return_type, args, has_varargs = 0, exception_value = None, exception_check = 0, calling_convention = "", nogil = 0, with_gil = 0, is_overridable = 0, optional_arg_count = 0, is_const_method = False, is_static_method=False, templates = None, is_strict_signature = False): self.return_type = return_type self.args = args self.has_varargs = has_varargs self.optional_arg_count = optional_arg_count self.exception_value = exception_value self.exception_check = exception_check self.calling_convention = calling_convention self.nogil = nogil self.with_gil = with_gil self.is_overridable = is_overridable self.is_const_method = is_const_method self.is_static_method = is_static_method self.templates = templates self.is_strict_signature = is_strict_signature def __repr__(self): arg_reprs = list(map(repr, self.args)) if self.has_varargs: arg_reprs.append("...") if self.exception_value: except_clause = " %r" % self.exception_value else: except_clause = "" if self.exception_check: except_clause += "?" return "<CFuncType %s %s[%s]%s>" % ( repr(self.return_type), self.calling_convention_prefix(), ",".join(arg_reprs), except_clause) def calling_convention_prefix(self): cc = self.calling_convention if cc: return cc + " " else: return "" def as_argument_type(self): return c_ptr_type(self) def same_c_signature_as(self, other_type, as_cmethod = 0): return self.same_c_signature_as_resolved_type( other_type.resolve(), as_cmethod) def same_c_signature_as_resolved_type(self, other_type, as_cmethod = 0): #print "CFuncType.same_c_signature_as_resolved_type:", \ # self, other_type, "as_cmethod =", as_cmethod ### if other_type is error_type: return 1 if not other_type.is_cfunction: return 0 if self.is_overridable != other_type.is_overridable: return 0 nargs = len(self.args) if nargs != len(other_type.args): return 0 # When comparing C method signatures, the first argument # is exempt from compatibility checking (the proper check # is performed elsewhere). for i in range(as_cmethod, nargs): if not self.args[i].type.same_as(other_type.args[i].type): return 0 if self.has_varargs != other_type.has_varargs: return 0 if self.optional_arg_count != other_type.optional_arg_count: return 0 if not self.return_type.same_as(other_type.return_type): return 0 if not self.same_calling_convention_as(other_type): return 0 if self.exception_check != other_type.exception_check: return 0 if not self._same_exception_value(other_type.exception_value): return 0 return 1 def _same_exception_value(self, other_exc_value): if self.exception_value == other_exc_value: return 1 if self.exception_check != '+': return 0 if not self.exception_value or not other_exc_value: return 0 if self.exception_value.type != other_exc_value.type: return 0 if self.exception_value.entry and other_exc_value.entry: if self.exception_value.entry.cname != other_exc_value.entry.cname: return 0 if self.exception_value.name != other_exc_value.name: return 0 return 1 def compatible_signature_with(self, other_type, as_cmethod = 0): return self.compatible_signature_with_resolved_type(other_type.resolve(), as_cmethod) def compatible_signature_with_resolved_type(self, other_type, as_cmethod): #print "CFuncType.same_c_signature_as_resolved_type:", \ # self, other_type, "as_cmethod =", as_cmethod ### if other_type is error_type: return 1 if not other_type.is_cfunction: return 0 if not self.is_overridable and other_type.is_overridable: return 0 nargs = len(self.args) if nargs - self.optional_arg_count != len(other_type.args) - other_type.optional_arg_count: return 0 if self.optional_arg_count < other_type.optional_arg_count: return 0 # When comparing C method signatures, the first argument # is exempt from compatibility checking (the proper check # is performed elsewhere). for i in range(as_cmethod, len(other_type.args)): if not self.args[i].type.same_as( other_type.args[i].type): return 0 if self.has_varargs != other_type.has_varargs: return 0 if not self.return_type.subtype_of_resolved_type(other_type.return_type): return 0 if not self.same_calling_convention_as(other_type): return 0 if self.nogil != other_type.nogil: return 0 if not self.exception_check and other_type.exception_check: # a redundant exception check doesn't make functions incompatible, but a missing one does return 0 if not self._same_exception_value(other_type.exception_value): return 0 self.original_sig = other_type.original_sig or other_type return 1 def narrower_c_signature_than(self, other_type, as_cmethod = 0): return self.narrower_c_signature_than_resolved_type(other_type.resolve(), as_cmethod) def narrower_c_signature_than_resolved_type(self, other_type, as_cmethod): if other_type is error_type: return 1 if not other_type.is_cfunction: return 0 nargs = len(self.args) if nargs != len(other_type.args): return 0 for i in range(as_cmethod, nargs): if not self.args[i].type.subtype_of_resolved_type(other_type.args[i].type): return 0 else: self.args[i].needs_type_test = other_type.args[i].needs_type_test \ or not self.args[i].type.same_as(other_type.args[i].type) if self.has_varargs != other_type.has_varargs: return 0 if self.optional_arg_count != other_type.optional_arg_count: return 0 if not self.return_type.subtype_of_resolved_type(other_type.return_type): return 0 if not self.exception_check and other_type.exception_check: # a redundant exception check doesn't make functions incompatible, but a missing one does return 0 if not self._same_exception_value(other_type.exception_value): return 0 return 1 def same_calling_convention_as(self, other): ## XXX Under discussion ... ## callspec_words = ("__stdcall", "__cdecl", "__fastcall") ## cs1 = self.calling_convention ## cs2 = other.calling_convention ## if (cs1 in callspec_words or ## cs2 in callspec_words): ## return cs1 == cs2 ## else: ## return True sc1 = self.calling_convention == '__stdcall' sc2 = other.calling_convention == '__stdcall' return sc1 == sc2 def same_as_resolved_type(self, other_type, as_cmethod = 0): return self.same_c_signature_as_resolved_type(other_type, as_cmethod) \ and self.nogil == other_type.nogil def pointer_assignable_from_resolved_type(self, other_type): return self.same_c_signature_as_resolved_type(other_type) \ and not (self.nogil and not other_type.nogil) def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0, with_calling_convention = 1): arg_decl_list = [] for arg in self.args[:len(self.args)-self.optional_arg_count]: arg_decl_list.append( arg.type.declaration_code("", for_display, pyrex = pyrex)) if self.is_overridable: arg_decl_list.append("int %s" % Naming.skip_dispatch_cname) if self.optional_arg_count: arg_decl_list.append(self.op_arg_struct.declaration_code(Naming.optional_args_cname)) if self.has_varargs: arg_decl_list.append("...") arg_decl_code = ", ".join(arg_decl_list) if not arg_decl_code and not pyrex: arg_decl_code = "void" trailer = "" if (pyrex or for_display) and not self.return_type.is_pyobject: if self.exception_value and self.exception_check: trailer = " except? %s" % self.exception_value elif self.exception_value: trailer = " except %s" % self.exception_value elif self.exception_check == '+': trailer = " except +" elif self.exception_check and for_display: # not spelled out by default, unless for human eyes trailer = " except *" if self.nogil: trailer += " nogil" if not with_calling_convention: cc = '' else: cc = self.calling_convention_prefix() if (not entity_code and cc) or entity_code.startswith("*"): entity_code = "(%s%s)" % (cc, entity_code) cc = "" if self.is_const_method: trailer += " const" return self.return_type.declaration_code( "%s%s(%s)%s" % (cc, entity_code, arg_decl_code, trailer), for_display, dll_linkage, pyrex) def function_header_code(self, func_name, arg_code): if self.is_const_method: trailer = " const" else: trailer = "" return "%s%s(%s)%s" % (self.calling_convention_prefix(), func_name, arg_code, trailer) def signature_string(self): s = self.empty_declaration_code() return s def signature_cast_string(self): s = self.declaration_code("(*)", with_calling_convention=False) return '(%s)' % s def specialize(self, values): result = CFuncType(self.return_type.specialize(values), [arg.specialize(values) for arg in self.args], has_varargs = self.has_varargs, exception_value = self.exception_value, exception_check = self.exception_check, calling_convention = self.calling_convention, nogil = self.nogil, with_gil = self.with_gil, is_overridable = self.is_overridable, optional_arg_count = self.optional_arg_count, is_const_method = self.is_const_method, is_static_method = self.is_static_method, templates = self.templates) result.from_fused = self.is_fused return result def opt_arg_cname(self, arg_name): return self.op_arg_struct.base_type.scope.lookup(arg_name).cname # Methods that deal with Fused Types # All but map_with_specific_entries should be called only on functions # with fused types (and not on their corresponding specific versions). def get_all_specialized_permutations(self, fused_types=None): """ Permute all the types. For every specific instance of a fused type, we want all other specific instances of all other fused types. It returns an iterable of two-tuples of the cname that should prefix the cname of the function, and a dict mapping any fused types to their respective specific types. """ assert self.is_fused if fused_types is None: fused_types = self.get_fused_types() return get_all_specialized_permutations(fused_types) def get_all_specialized_function_types(self): """ Get all the specific function types of this one. """ assert self.is_fused if self.entry.fused_cfunction: return [n.type for n in self.entry.fused_cfunction.nodes] elif self.cached_specialized_types is not None: return self.cached_specialized_types cfunc_entries = self.entry.scope.cfunc_entries cfunc_entries.remove(self.entry) result = [] permutations = self.get_all_specialized_permutations() for cname, fused_to_specific in permutations: new_func_type = self.entry.type.specialize(fused_to_specific) if self.optional_arg_count: # Remember, this method is set by CFuncDeclaratorNode self.declare_opt_arg_struct(new_func_type, cname) new_entry = copy.deepcopy(self.entry) new_func_type.specialize_entry(new_entry, cname) new_entry.type = new_func_type new_func_type.entry = new_entry result.append(new_func_type) cfunc_entries.append(new_entry) self.cached_specialized_types = result return result def get_fused_types(self, result=None, seen=None, subtypes=None): """Return fused types in the order they appear as parameter types""" return super(CFuncType, self).get_fused_types(result, seen, subtypes=['args']) def specialize_entry(self, entry, cname): assert not self.is_fused specialize_entry(entry, cname) def can_coerce_to_pyobject(self, env): # duplicating the decisions from create_to_py_utility_code() here avoids writing out unused code if self.has_varargs or self.optional_arg_count: return False if self.to_py_function is not None: return self.to_py_function for arg in self.args: if not arg.type.is_pyobject and not arg.type.can_coerce_to_pyobject(env): return False if not self.return_type.is_pyobject and not self.return_type.can_coerce_to_pyobject(env): return False return True def create_to_py_utility_code(self, env): # FIXME: it seems we're trying to coerce in more cases than we should if self.to_py_function is not None: return self.to_py_function if not self.can_coerce_to_pyobject(env): return False from .UtilityCode import CythonUtilityCode safe_typename = re.sub('[^a-zA-Z0-9]', '__', self.declaration_code("", pyrex=1)) to_py_function = "__Pyx_CFunc_%s_to_py" % safe_typename for arg in self.args: if not arg.type.is_pyobject and not arg.type.create_from_py_utility_code(env): return False if not self.return_type.is_pyobject and not self.return_type.create_to_py_utility_code(env): return False def declared_type(ctype): type_displayname = str(ctype.declaration_code("", for_display=True)) if ctype.is_pyobject: arg_ctype = type_name = type_displayname if ctype.is_builtin_type: arg_ctype = ctype.name elif not ctype.is_extension_type: type_name = 'object' type_displayname = None else: type_displayname = repr(type_displayname) elif ctype is c_bint_type: type_name = arg_ctype = 'bint' else: type_name = arg_ctype = type_displayname if ctype is c_double_type: type_displayname = 'float' else: type_displayname = repr(type_displayname) return type_name, arg_ctype, type_displayname class Arg(object): def __init__(self, arg_name, arg_type): self.name = arg_name self.type = arg_type self.type_cname, self.ctype, self.type_displayname = declared_type(arg_type) if self.return_type.is_void: except_clause = 'except *' elif self.return_type.is_pyobject: except_clause = '' elif self.exception_value: except_clause = ('except? %s' if self.exception_check else 'except %s') % self.exception_value else: except_clause = 'except *' context = { 'cname': to_py_function, 'args': [Arg(arg.name or 'arg%s' % ix, arg.type) for ix, arg in enumerate(self.args)], 'return_type': Arg('return', self.return_type), 'except_clause': except_clause, } # FIXME: directives come from first defining environment and do not adapt for reuse env.use_utility_code(CythonUtilityCode.load( "cfunc.to_py", "CConvert.pyx", outer_module_scope=env.global_scope(), # need access to types declared in module context=context, compiler_directives=dict(env.global_scope().directives))) self.to_py_function = to_py_function return True def specialize_entry(entry, cname): """ Specialize an entry of a copied fused function or method """ entry.is_fused_specialized = True entry.name = get_fused_cname(cname, entry.name) if entry.is_cmethod: entry.cname = entry.name if entry.is_inherited: entry.cname = StringEncoding.EncodedString( "%s.%s" % (Naming.obj_base_cname, entry.cname)) else: entry.cname = get_fused_cname(cname, entry.cname) if entry.func_cname: entry.func_cname = get_fused_cname(cname, entry.func_cname) def get_fused_cname(fused_cname, orig_cname): """ Given the fused cname id and an original cname, return a specialized cname """ assert fused_cname and orig_cname return StringEncoding.EncodedString('%s%s%s' % (Naming.fused_func_prefix, fused_cname, orig_cname)) def unique(somelist): seen = set() result = [] for obj in somelist: if obj not in seen: result.append(obj) seen.add(obj) return result def get_all_specialized_permutations(fused_types): return _get_all_specialized_permutations(unique(fused_types)) def _get_all_specialized_permutations(fused_types, id="", f2s=()): fused_type, = fused_types[0].get_fused_types() result = [] for newid, specific_type in enumerate(fused_type.types): # f2s = dict(f2s, **{ fused_type: specific_type }) f2s = dict(f2s) f2s.update({ fused_type: specific_type }) if id: cname = '%s_%s' % (id, newid) else: cname = str(newid) if len(fused_types) > 1: result.extend(_get_all_specialized_permutations( fused_types[1:], cname, f2s)) else: result.append((cname, f2s)) return result def specialization_signature_string(fused_compound_type, fused_to_specific): """ Return the signature for a specialization of a fused type. e.g. floating[:] -> 'float' or 'double' cdef fused ft: float[:] double[:] ft -> 'float[:]' or 'double[:]' integral func(floating) -> 'int (*func)(float)' or ... """ fused_types = fused_compound_type.get_fused_types() if len(fused_types) == 1: fused_type = fused_types[0] else: fused_type = fused_compound_type return fused_type.specialize(fused_to_specific).typeof_name() def get_specialized_types(type): """ Return a list of specialized types in their declared order. """ assert type.is_fused if isinstance(type, FusedType): result = list(type.types) for specialized_type in result: specialized_type.specialization_string = specialized_type.typeof_name() else: result = [] for cname, f2s in get_all_specialized_permutations(type.get_fused_types()): specialized_type = type.specialize(f2s) specialized_type.specialization_string = ( specialization_signature_string(type, f2s)) result.append(specialized_type) return result class CFuncTypeArg(BaseType): # name string # cname string # type PyrexType # pos source file position # FIXME: is this the right setup? should None be allowed here? not_none = False or_none = False accept_none = True accept_builtin_subtypes = False subtypes = ['type'] def __init__(self, name, type, pos, cname=None): self.name = name if cname is not None: self.cname = cname else: self.cname = Naming.var_prefix + name self.type = type self.pos = pos self.needs_type_test = False # TODO: should these defaults be set in analyse_types()? def __repr__(self): return "%s:%s" % (self.name, repr(self.type)) def declaration_code(self, for_display = 0): return self.type.declaration_code(self.cname, for_display) def specialize(self, values): return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname) class ToPyStructUtilityCode(object): requires = None def __init__(self, type, forward_decl, env): self.type = type self.header = "static PyObject* %s(%s)" % (type.to_py_function, type.declaration_code('s')) self.forward_decl = forward_decl self.env = env def __eq__(self, other): return isinstance(other, ToPyStructUtilityCode) and self.header == other.header def __hash__(self): return hash(self.header) def get_tree(self): pass def put_code(self, output): code = output['utility_code_def'] proto = output['utility_code_proto'] code.putln("%s {" % self.header) code.putln("PyObject* res;") code.putln("PyObject* member;") code.putln("res = PyDict_New(); if (res == NULL) return NULL;") for member in self.type.scope.var_entries: nameconst_cname = code.get_py_string_const(member.name, identifier=True) code.putln("%s; if (member == NULL) goto bad;" % ( member.type.to_py_call_code('s.%s' % member.cname, 'member', member.type))) code.putln("if (PyDict_SetItem(res, %s, member) < 0) goto bad;" % nameconst_cname) code.putln("Py_DECREF(member);") code.putln("return res;") code.putln("bad:") code.putln("Py_XDECREF(member);") code.putln("Py_DECREF(res);") code.putln("return NULL;") code.putln("}") # This is a bit of a hack, we need a forward declaration # due to the way things are ordered in the module... if self.forward_decl: proto.putln(self.type.empty_declaration_code() + ';') proto.putln(self.header + ";") def inject_tree_and_scope_into(self, module_node): pass class CStructOrUnionType(CType): # name string # cname string # kind string "struct" or "union" # scope StructOrUnionScope, or None if incomplete # typedef_flag boolean # packed boolean # entry Entry is_struct_or_union = 1 has_attributes = 1 exception_check = True def __init__(self, name, kind, scope, typedef_flag, cname, packed=False): self.name = name self.cname = cname self.kind = kind self.scope = scope self.typedef_flag = typedef_flag self.is_struct = kind == 'struct' self.to_py_function = "%s_to_py_%s" % (Naming.convert_func_prefix, self.cname) self.from_py_function = "%s_from_py_%s" % (Naming.convert_func_prefix, self.cname) self.exception_check = True self._convert_to_py_code = None self._convert_from_py_code = None self.packed = packed def create_to_py_utility_code(self, env): if env.outer_scope is None: return False if self._convert_to_py_code is False: return None # tri-state-ish if self._convert_to_py_code is None: is_union = not self.is_struct unsafe_union_types = set() safe_union_types = set() for member in self.scope.var_entries: member_type = member.type if not member_type.create_to_py_utility_code(env): self.to_py_function = None self._convert_to_py_code = False return False if is_union: if member_type.is_ptr or member_type.is_cpp_class: unsafe_union_types.add(member_type) else: safe_union_types.add(member_type) if unsafe_union_types and (safe_union_types or len(unsafe_union_types) > 1): # unsafe mix of safe and unsafe to convert types self.from_py_function = None self._convert_from_py_code = False return False forward_decl = self.entry.visibility != 'extern' and not self.typedef_flag self._convert_to_py_code = ToPyStructUtilityCode(self, forward_decl, env) env.use_utility_code(self._convert_to_py_code) return True def create_from_py_utility_code(self, env): if env.outer_scope is None: return False if self._convert_from_py_code is False: return None # tri-state-ish if self._convert_from_py_code is None: for member in self.scope.var_entries: if not member.type.create_from_py_utility_code(env): self.from_py_function = None self._convert_from_py_code = False return False context = dict( struct_name=self.name, var_entries=self.scope.var_entries, funcname=self.from_py_function, ) from .UtilityCode import CythonUtilityCode self._convert_from_py_code = CythonUtilityCode.load( "FromPyStructUtility" if self.is_struct else "FromPyUnionUtility", "CConvert.pyx", outer_module_scope=env.global_scope(), # need access to types declared in module context=context) env.use_utility_code(self._convert_from_py_code) return True def __repr__(self): return "<CStructOrUnionType %s %s%s>" % ( self.name, self.cname, ("", " typedef")[self.typedef_flag]) def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0): if pyrex or for_display: base_code = self.name else: if self.typedef_flag: base_code = self.cname else: base_code = "%s %s" % (self.kind, self.cname) base_code = public_decl(base_code, dll_linkage) return self.base_declaration_code(base_code, entity_code) def __eq__(self, other): try: return (isinstance(other, CStructOrUnionType) and self.name == other.name) except AttributeError: return False def __lt__(self, other): try: return self.name < other.name except AttributeError: # this is arbitrary, but it makes sure we always have # *some* kind of order return False def __hash__(self): return hash(self.cname) ^ hash(self.kind) def is_complete(self): return self.scope is not None def attributes_known(self): return self.is_complete() def can_be_complex(self): # Does the struct consist of exactly two identical floats? fields = self.scope.var_entries if len(fields) != 2: return False a, b = fields return (a.type.is_float and b.type.is_float and a.type.empty_declaration_code() == b.type.empty_declaration_code()) def struct_nesting_depth(self): child_depths = [x.type.struct_nesting_depth() for x in self.scope.var_entries] return max(child_depths) + 1 def cast_code(self, expr_code): if self.is_struct: return expr_code return super(CStructOrUnionType, self).cast_code(expr_code) cpp_string_conversions = ("std::string",) builtin_cpp_conversions = ("std::pair", "std::vector", "std::list", "std::set", "std::unordered_set", "std::map", "std::unordered_map") class CppClassType(CType): # name string # cname string # scope CppClassScope # templates [string] or None is_cpp_class = 1 has_attributes = 1 exception_check = True namespace = None # For struct-like declaration. kind = "struct" packed = False typedef_flag = False subtypes = ['templates'] def __init__(self, name, scope, cname, base_classes, templates=None, template_type=None): self.name = name self.cname = cname self.scope = scope self.base_classes = base_classes self.operators = [] self.templates = templates self.template_type = template_type self.specializations = {} self.is_cpp_string = cname in cpp_string_conversions def use_conversion_utility(self, from_or_to): pass def maybe_unordered(self): if 'unordered' in self.cname: return 'unordered_' else: return '' def create_from_py_utility_code(self, env): if self.from_py_function is not None: return True if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions: X = "XYZABC" tags = [] declarations = ["cdef extern from *:"] for ix, T in enumerate(self.templates or []): if T.is_pyobject or not T.create_from_py_utility_code(env): return False tags.append(T.specialization_name()) if T.exception_value is not None: # This is a hack due to the except value clause # requiring a const (literal) value of the right # (visible) type. def guess_type(value): if not T.is_typedef and (T.is_numeric or T.is_ptr): return T try: int(value) return c_longlong_type except ValueError: pass try: float(value) return c_double_type except ValueError: pass return T except_type = guess_type(T.exception_value) except_clause = "%s " % T.exception_value if T.exception_check: except_clause = "? %s" % except_clause declarations.append( " ctypedef %s %s '%s'" % ( except_type.declaration_code("", for_display=True), X[ix], T.empty_declaration_code())) else: except_clause = "*" declarations.append( " ctypedef struct %s '%s':\n pass" % ( X[ix], T.empty_declaration_code())) declarations.append( " cdef %s %s_from_py '%s' (object) except %s" % ( X[ix], X[ix], T.from_py_function, except_clause)) if self.cname in cpp_string_conversions: cls = 'string' tags = type_identifier(self), else: cls = self.cname[5:] cname = '__pyx_convert_%s_from_py_%s' % (cls, '__and_'.join(tags)) context = { 'template_type_declarations': '\n'.join(declarations), 'cname': cname, 'maybe_unordered': self.maybe_unordered(), 'type': self.cname, } from .UtilityCode import CythonUtilityCode env.use_utility_code(CythonUtilityCode.load( cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx", context=context)) self.from_py_function = cname return True def create_to_py_utility_code(self, env): if self.to_py_function is not None: return True if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions: X = "XYZABC" tags = [] declarations = ["cdef extern from *:"] for ix, T in enumerate(self.templates or []): if not T.create_to_py_utility_code(env): return False tags.append(T.specialization_name()) declarations.append( " ctypedef struct %s '%s':\n pass" % ( X[ix], T.empty_declaration_code())) declarations.append( " cdef object %s_to_py '%s' (%s)" % ( X[ix], T.to_py_function, X[ix])) if self.cname in cpp_string_conversions: cls = 'string' prefix = 'PyObject_' # gets specialised by explicit type casts in CoerceToPyTypeNode tags = type_identifier(self), else: cls = self.cname[5:] prefix = '' cname = "__pyx_convert_%s%s_to_py_%s" % (prefix, cls, "____".join(tags)) context = { 'template_type_declarations': '\n'.join(declarations), 'cname': cname, 'maybe_unordered': self.maybe_unordered(), 'type': self.cname, } from .UtilityCode import CythonUtilityCode env.use_utility_code(CythonUtilityCode.load( cls.replace('unordered_', '') + ".to_py", "CppConvert.pyx", context=context)) self.to_py_function = cname return True def is_template_type(self): return self.templates is not None and self.template_type is None def get_fused_types(self, result=None, seen=None): if result is None: result = [] seen = set() if self.namespace: self.namespace.get_fused_types(result, seen) if self.templates: for T in self.templates: T.get_fused_types(result, seen) return result def specialize_here(self, pos, template_values=None): if not self.is_template_type(): error(pos, "'%s' type is not a template" % self) return error_type if len(self.templates) != len(template_values): error(pos, "%s templated type receives %d arguments, got %d" % (self.name, len(self.templates), len(template_values))) return error_type has_object_template_param = False for value in template_values: if value.is_pyobject: has_object_template_param = True error(pos, "Python object type '%s' cannot be used as a template argument" % value) if has_object_template_param: return error_type return self.specialize(dict(zip(self.templates, template_values))) def specialize(self, values): if not self.templates and not self.namespace: return self if self.templates is None: self.templates = [] key = tuple(values.items()) if key in self.specializations: return self.specializations[key] template_values = [t.specialize(values) for t in self.templates] specialized = self.specializations[key] = \ CppClassType(self.name, None, self.cname, [], template_values, template_type=self) # Need to do these *after* self.specializations[key] is set # to avoid infinite recursion on circular references. specialized.base_classes = [b.specialize(values) for b in self.base_classes] if self.namespace is not None: specialized.namespace = self.namespace.specialize(values) specialized.scope = self.scope.specialize(values, specialized) return specialized def deduce_template_params(self, actual): if self == actual: return {} # TODO(robertwb): Actual type equality. elif self.empty_declaration_code() == actual.template_type.empty_declaration_code(): return reduce( merge_template_deductions, [formal_param.deduce_template_params(actual_param) for (formal_param, actual_param) in zip(self.templates, actual.templates)], {}) else: return None def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if self.templates: template_strings = [param.declaration_code('', for_display, None, pyrex) for param in self.templates] if for_display: brackets = "[%s]" else: brackets = "<%s> " templates = brackets % ",".join(template_strings) else: templates = "" if pyrex or for_display: base_code = "%s%s" % (self.name, templates) else: base_code = "%s%s" % (self.cname, templates) if self.namespace is not None: base_code = "%s::%s" % (self.namespace.empty_declaration_code(), base_code) base_code = public_decl(base_code, dll_linkage) return self.base_declaration_code(base_code, entity_code) def is_subclass(self, other_type): if self.same_as_resolved_type(other_type): return 1 for base_class in self.base_classes: if base_class.is_subclass(other_type): return 1 return 0 def same_as_resolved_type(self, other_type): if other_type.is_cpp_class: if self == other_type: return 1 elif (self.cname == other_type.cname and self.template_type and other_type.template_type): if self.templates == other_type.templates: return 1 for t1, t2 in zip(self.templates, other_type.templates): if not t1.same_as_resolved_type(t2): return 0 return 1 return 0 def assignable_from_resolved_type(self, other_type): # TODO: handle operator=(...) here? if other_type is error_type: return True return other_type.is_cpp_class and other_type.is_subclass(self) def attributes_known(self): return self.scope is not None def find_cpp_operation_type(self, operator, operand_type=None): operands = [self] if operand_type is not None: operands.append(operand_type) # pos == None => no errors operator_entry = self.scope.lookup_operator_for_types(None, operator, operands) if not operator_entry: return None func_type = operator_entry.type if func_type.is_ptr: func_type = func_type.base_type return func_type.return_type def check_nullary_constructor(self, pos, msg="stack allocated"): constructor = self.scope.lookup(u'<init>') if constructor is not None and best_match([], constructor.all_alternatives()) is None: error(pos, "C++ class must have a nullary constructor to be %s" % msg) class TemplatePlaceholderType(CType): def __init__(self, name): self.name = name def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if entity_code: return self.name + " " + entity_code else: return self.name def specialize(self, values): if self in values: return values[self] else: return self def deduce_template_params(self, actual): return {self: actual} def same_as_resolved_type(self, other_type): if isinstance(other_type, TemplatePlaceholderType): return self.name == other_type.name else: return 0 def __hash__(self): return hash(self.name) def __cmp__(self, other): if isinstance(other, TemplatePlaceholderType): return cmp(self.name, other.name) else: return cmp(type(self), type(other)) def __eq__(self, other): if isinstance(other, TemplatePlaceholderType): return self.name == other.name else: return False class CEnumType(CType): # name string # cname string or None # typedef_flag boolean is_enum = 1 signed = 1 rank = -1 # Ranks below any integer type def __init__(self, name, cname, typedef_flag): self.name = name self.cname = cname self.values = [] self.typedef_flag = typedef_flag self.default_value = "(%s) 0" % self.empty_declaration_code() def __str__(self): return self.name def __repr__(self): return "<CEnumType %s %s%s>" % (self.name, self.cname, ("", " typedef")[self.typedef_flag]) def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if pyrex or for_display: base_code = self.name else: if self.typedef_flag: base_code = self.cname else: base_code = "enum %s" % self.cname base_code = public_decl(base_code, dll_linkage) return self.base_declaration_code(base_code, entity_code) def create_to_py_utility_code(self, env): self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name() env.use_utility_code(TempitaUtilityCode.load_cached( "CIntToPy", "TypeConversion.c", context={"TYPE": self.empty_declaration_code(), "TO_PY_FUNCTION": self.to_py_function})) return True def create_from_py_utility_code(self, env): self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name() env.use_utility_code(TempitaUtilityCode.load_cached( "CIntFromPy", "TypeConversion.c", context={"TYPE": self.empty_declaration_code(), "FROM_PY_FUNCTION": self.from_py_function})) return True def from_py_call_code(self, source_code, result_code, error_pos, code, from_py_function=None, error_condition=None): rhs = "%s(%s)" % ( from_py_function or self.from_py_function, source_code) return '%s = %s;%s' % ( result_code, typecast(self, c_long_type, rhs), ' %s' % code.error_goto_if(error_condition or self.error_condition(result_code), error_pos)) class CTupleType(CType): # components [PyrexType] is_ctuple = True def __init__(self, cname, components): self.cname = cname self.components = components self.size = len(components) self.to_py_function = "%s_to_py_%s" % (Naming.convert_func_prefix, self.cname) self.from_py_function = "%s_from_py_%s" % (Naming.convert_func_prefix, self.cname) self.exception_check = True self._convert_to_py_code = None self._convert_from_py_code = None def __str__(self): return "(%s)" % ", ".join(str(c) for c in self.components) def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): if pyrex or for_display: return str(self) else: return self.base_declaration_code(self.cname, entity_code) def can_coerce_to_pyobject(self, env): for component in self.components: if not component.can_coerce_to_pyobject(env): return False return True def create_to_py_utility_code(self, env): if self._convert_to_py_code is False: return None # tri-state-ish if self._convert_to_py_code is None: for component in self.components: if not component.create_to_py_utility_code(env): self.to_py_function = None self._convert_to_py_code = False return False context = dict( struct_type_decl=self.empty_declaration_code(), components=self.components, funcname=self.to_py_function, size=len(self.components) ) self._convert_to_py_code = TempitaUtilityCode.load( "ToPyCTupleUtility", "TypeConversion.c", context=context) env.use_utility_code(self._convert_to_py_code) return True def create_from_py_utility_code(self, env): if self._convert_from_py_code is False: return None # tri-state-ish if self._convert_from_py_code is None: for component in self.components: if not component.create_from_py_utility_code(env): self.from_py_function = None self._convert_from_py_code = False return False context = dict( struct_type_decl=self.empty_declaration_code(), components=self.components, funcname=self.from_py_function, size=len(self.components) ) self._convert_from_py_code = TempitaUtilityCode.load( "FromPyCTupleUtility", "TypeConversion.c", context=context) env.use_utility_code(self._convert_from_py_code) return True def c_tuple_type(components): components = tuple(components) cname = Naming.ctuple_type_prefix + type_list_identifier(components) tuple_type = CTupleType(cname, components) return tuple_type class UnspecifiedType(PyrexType): # Used as a placeholder until the type can be determined. is_unspecified = 1 def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): return "<unspecified>" def same_as_resolved_type(self, other_type): return False class ErrorType(PyrexType): # Used to prevent propagation of error messages. is_error = 1 exception_value = "0" exception_check = 0 to_py_function = "dummy" from_py_function = "dummy" def create_to_py_utility_code(self, env): return True def create_from_py_utility_code(self, env): return True def declaration_code(self, entity_code, for_display = 0, dll_linkage = None, pyrex = 0): return "<error>" def same_as_resolved_type(self, other_type): return 1 def error_condition(self, result_code): return "dummy" rank_to_type_name = ( "char", # 0 "short", # 1 "int", # 2 "long", # 3 "PY_LONG_LONG", # 4 "float", # 5 "double", # 6 "long double", # 7 ) _rank_to_type_name = list(rank_to_type_name) RANK_INT = _rank_to_type_name.index('int') RANK_LONG = _rank_to_type_name.index('long') RANK_FLOAT = _rank_to_type_name.index('float') UNSIGNED = 0 SIGNED = 2 error_type = ErrorType() unspecified_type = UnspecifiedType() py_object_type = PyObjectType() c_void_type = CVoidType() c_uchar_type = CIntType(0, UNSIGNED) c_ushort_type = CIntType(1, UNSIGNED) c_uint_type = CIntType(2, UNSIGNED) c_ulong_type = CIntType(3, UNSIGNED) c_ulonglong_type = CIntType(4, UNSIGNED) c_char_type = CIntType(0) c_short_type = CIntType(1) c_int_type = CIntType(2) c_long_type = CIntType(3) c_longlong_type = CIntType(4) c_schar_type = CIntType(0, SIGNED) c_sshort_type = CIntType(1, SIGNED) c_sint_type = CIntType(2, SIGNED) c_slong_type = CIntType(3, SIGNED) c_slonglong_type = CIntType(4, SIGNED) c_float_type = CFloatType(5, math_h_modifier='f') c_double_type = CFloatType(6) c_longdouble_type = CFloatType(7, math_h_modifier='l') c_float_complex_type = CComplexType(c_float_type) c_double_complex_type = CComplexType(c_double_type) c_longdouble_complex_type = CComplexType(c_longdouble_type) c_anon_enum_type = CAnonEnumType(-1) c_returncode_type = CReturnCodeType(RANK_INT) c_bint_type = CBIntType(RANK_INT) c_py_unicode_type = CPyUnicodeIntType(RANK_INT-0.5, UNSIGNED) c_py_ucs4_type = CPyUCS4IntType(RANK_LONG-0.5, UNSIGNED) c_py_hash_t_type = CPyHashTType(RANK_LONG+0.5, SIGNED) c_py_ssize_t_type = CPySSizeTType(RANK_LONG+0.5, SIGNED) c_ssize_t_type = CSSizeTType(RANK_LONG+0.5, SIGNED) c_size_t_type = CSizeTType(RANK_LONG+0.5, UNSIGNED) c_ptrdiff_t_type = CPtrdiffTType(RANK_LONG+0.75, SIGNED) c_null_ptr_type = CNullPtrType(c_void_type) c_void_ptr_type = CPtrType(c_void_type) c_void_ptr_ptr_type = CPtrType(c_void_ptr_type) c_char_ptr_type = CPtrType(c_char_type) c_uchar_ptr_type = CPtrType(c_uchar_type) c_char_ptr_ptr_type = CPtrType(c_char_ptr_type) c_int_ptr_type = CPtrType(c_int_type) c_py_unicode_ptr_type = CPtrType(c_py_unicode_type) c_py_ssize_t_ptr_type = CPtrType(c_py_ssize_t_type) c_ssize_t_ptr_type = CPtrType(c_ssize_t_type) c_size_t_ptr_type = CPtrType(c_size_t_type) # GIL state c_gilstate_type = CEnumType("PyGILState_STATE", "PyGILState_STATE", True) c_threadstate_type = CStructOrUnionType("PyThreadState", "struct", None, 1, "PyThreadState") c_threadstate_ptr_type = CPtrType(c_threadstate_type) # the Py_buffer type is defined in Builtin.py c_py_buffer_type = CStructOrUnionType("Py_buffer", "struct", None, 1, "Py_buffer") c_py_buffer_ptr_type = CPtrType(c_py_buffer_type) # Not sure whether the unsigned versions and 'long long' should be in there # long long requires C99 and might be slow, and would always get preferred # when specialization happens through calling and not indexing cy_integral_type = FusedType([c_short_type, c_int_type, c_long_type], name="integral") # Omitting long double as it might be slow cy_floating_type = FusedType([c_float_type, c_double_type], name="floating") cy_numeric_type = FusedType([c_short_type, c_int_type, c_long_type, c_float_type, c_double_type, c_float_complex_type, c_double_complex_type], name="numeric") # buffer-related structs c_buf_diminfo_type = CStructOrUnionType("__Pyx_Buf_DimInfo", "struct", None, 1, "__Pyx_Buf_DimInfo") c_pyx_buffer_type = CStructOrUnionType("__Pyx_Buffer", "struct", None, 1, "__Pyx_Buffer") c_pyx_buffer_ptr_type = CPtrType(c_pyx_buffer_type) c_pyx_buffer_nd_type = CStructOrUnionType("__Pyx_LocalBuf_ND", "struct", None, 1, "__Pyx_LocalBuf_ND") cython_memoryview_type = CStructOrUnionType("__pyx_memoryview_obj", "struct", None, 0, "__pyx_memoryview_obj") memoryviews
codeparrot/github-code-clean
from cStringIO import StringIO from datetime import date, datetime, timedelta import json import time from xml.sax.saxutils import escape import mock from nose.tools import eq_, ok_ from nose.plugins.attrib import attr from django.conf import settings from django.core.exceptions import ValidationError from django.test.utils import override_settings from constance import config from waffle.models import Switch from kuma.core.exceptions import ProgrammingError from kuma.core.tests import override_constance_settings, KumaTestCase from kuma.users.tests import UserTestCase, user from . import (document, revision, doc_rev, normalize_html, create_template_test_users, create_topical_parents_docs) from .. import tasks from ..constants import REDIRECT_CONTENT from ..exceptions import (PageMoveError, DocumentRenderedContentNotAvailable, DocumentRenderingInProgress) from ..jobs import DocumentZoneStackJob from ..models import (Document, Revision, RevisionIP, DocumentZone, TaggedDocument) def _objects_eq(manager, list_): """Assert that the objects contained by `manager` are those in `list_`.""" eq_(set(manager.all()), set(list_)) def redirect_rev(title, redirect_to): return revision( document=document(title=title, save=True), content='REDIRECT [[%s]]' % redirect_to, is_approved=True, save=True) class DocumentTests(UserTestCase): """Tests for the Document model""" @attr('bug875349') def test_json_data(self): # Set up a doc with tags doc, rev = doc_rev('Sample document') doc.save() expected_tags = sorted(['foo', 'bar', 'baz']) expected_review_tags = sorted(['tech', 'editorial']) doc.tags.set(*expected_tags) doc.current_revision.review_tags.set(*expected_review_tags) # Create a translation with some tags de_doc = document(parent=doc, locale='de', save=True) revision(document=de_doc, save=True) expected_l10n_tags = ['inprogress'] de_doc.current_revision.localization_tags.set(*expected_l10n_tags) de_doc.tags.set(*expected_tags) de_doc.current_revision.review_tags.set(*expected_review_tags) # Ensure the doc's json field is empty at first eq_(None, doc.json) # Get JSON data for the doc, and ensure the doc's json field is now # properly populated. data = doc.get_json_data() eq_(json.dumps(data), doc.json) # Load up another copy of the doc from the DB, and check json saved_doc = Document.objects.get(pk=doc.pk) eq_(json.dumps(data), saved_doc.json) # Check the fields stored in JSON of the English doc # (the fields are created in build_json_data in models.py) eq_(doc.title, data['title']) eq_(doc.title, data['label']) eq_(doc.get_absolute_url(), data['url']) eq_(doc.id, data['id']) eq_(doc.slug, data['slug']) result_tags = sorted([str(x) for x in data['tags']]) eq_(expected_tags, result_tags) result_review_tags = sorted([str(x) for x in data['review_tags']]) eq_(expected_review_tags, result_review_tags) eq_(doc.locale, data['locale']) eq_(doc.current_revision.summary, data['summary']) eq_(doc.modified.isoformat(), data['modified']) eq_(doc.current_revision.created.isoformat(), data['last_edit']) # Check fields of translated doc ok_('translations' in data) eq_(de_doc.locale, data['translations'][0]['locale']) result_l10n_tags = sorted([str(x) for x in data['translations'][0]['localization_tags']]) eq_(expected_l10n_tags, result_l10n_tags) result_tags = sorted([str(x) for x in data['translations'][0]['tags']]) eq_(expected_tags, result_tags) result_review_tags = sorted([str(x) for x in data['translations'][0]['review_tags']]) eq_(expected_review_tags, result_review_tags) eq_(de_doc.current_revision.summary, data['translations'][0]['summary']) eq_(de_doc.title, data['translations'][0]['title']) def test_document_is_template(self): """is_template stays in sync with the title""" d = document(title='test') d.save() assert not d.is_template d.slug = 'Template:test' d.save() assert d.is_template d.slug = 'Back-to-document' d.save() assert not d.is_template def test_error_on_delete(self): """Ensure error-on-delete is only thrown when waffle switch active""" switch = Switch.objects.create(name='wiki_error_on_delete') for active in (True, False): switch.active = active switch.save() d = document() d.save() try: d.delete() if active: self.fail('Exception on delete when active') except Exception: if not active: self.fail('No exception on delete when not active') def test_delete_tagged_document(self): """Make sure deleting a tagged doc deletes its tag relationships.""" # TODO: Move to wherever the tests for TaggableMixin are. # This works because Django's delete() sees the `tags` many-to-many # field (actually a manager) and follows the reference. d = document() d.save() d.tags.add('grape') eq_(1, TaggedDocument.objects.count()) d.delete() eq_(0, TaggedDocument.objects.count()) def _test_m2m_inheritance(self, enum_class, attr, direct_attr): """Test a descriptor's handling of parent delegation.""" parent = document() child = document(parent=parent, title='Some Other Title') e1 = enum_class(item_id=1) parent.save() # Make sure child sees stuff set on parent: getattr(parent, attr).add(e1) _objects_eq(getattr(child, attr), [e1]) # Make sure parent sees stuff set on child: child.save() e2 = enum_class(item_id=2) getattr(child, attr).add(e2) _objects_eq(getattr(parent, attr), [e1, e2]) # Assert the data are attached to the parent, not the child: _objects_eq(getattr(parent, direct_attr), [e1, e2]) _objects_eq(getattr(child, direct_attr), []) def test_category_inheritance(self): """A document's categories must always be those of its parent.""" some_category = Document.CATEGORIES[1][0] other_category = Document.CATEGORIES[0][0] # Notice if somebody ever changes the default on the category field, # which would invalidate our test: assert some_category != document().category parent = document(category=some_category) parent.save() child = document(parent=parent, locale='de') child.save() # Make sure child sees stuff set on parent: eq_(some_category, child.category) # Child'd category should revert to parent's on save: child.category = other_category child.save() eq_(some_category, child.category) # Changing the parent category should change the child's: parent.category = other_category parent.save() eq_(other_category, parent.translations.get(locale=child.locale).category) def _test_int_sets_and_descriptors(self, enum_class, attr): """Test our lightweight int sets & descriptors' getting and setting.""" d = document() d.save() _objects_eq(getattr(d, attr), []) i1 = enum_class(item_id=1) getattr(d, attr).add(i1) _objects_eq(getattr(d, attr), [i1]) i2 = enum_class(item_id=2) getattr(d, attr).add(i2) _objects_eq(getattr(d, attr), [i1, i2]) def test_only_localizable_allowed_children(self): """You can't have children for a non-localizable document.""" # Make English rev: en_doc = document(is_localizable=False) en_doc.save() # Make Deutsch translation: de_doc = document(parent=en_doc, locale='de') self.assertRaises(ValidationError, de_doc.save) def test_cannot_make_non_localizable_if_children(self): """You can't make a document non-localizable if it has children.""" # Make English rev: en_doc = document(is_localizable=True) en_doc.save() # Make Deutsch translation: de_doc = document(parent=en_doc, locale='de') de_doc.save() en_doc.is_localizable = False self.assertRaises(ValidationError, en_doc.save) def test_non_english_implies_nonlocalizable(self): d = document(is_localizable=True, locale='de') d.save() assert not d.is_localizable def test_validate_category_on_save(self): """Make sure invalid categories can't be saved. Invalid categories cause errors when viewing documents. """ d = document(category=9999) self.assertRaises(ValidationError, d.save) def test_new_doc_does_not_update_categories(self): """Make sure that creating a new document doesn't change the category of all the other documents.""" d1 = document(category=10) d1.save() assert d1.pk d2 = document(category=00) assert not d2.pk d2._clean_category() d1prime = Document.objects.get(pk=d1.pk) eq_(10, d1prime.category) @attr('doc_translations') def test_other_translations(self): """ parent doc should list all docs for which it is parent A child doc should list all its parent's docs, excluding itself, and including its parent """ parent = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='test', save=True) enfant = document(locale='fr', title='le test', parent=parent, save=True) bambino = document(locale='es', title='el test', parent=parent, save=True) children = Document.objects.filter(parent=parent).order_by('locale').values_list('pk', flat=True) eq_(list(children), list(parent.other_translations.values_list('pk', flat=True))) enfant_translation_pks = enfant.other_translations.values_list('pk', flat=True) ok_(parent.pk in enfant_translation_pks) ok_(bambino.pk in enfant_translation_pks) eq_(False, enfant.pk in enfant_translation_pks) def test_topical_parents(self): d1, d2 = create_topical_parents_docs() ok_(d2.parents == [d1]) d3 = document(title='Smell accessibility') d3.parent_topic = d2 d3.save() ok_(d3.parents == [d1, d2]) @attr('redirect') def test_redirect_url_allows_site_url(self): href = "%s/en-US/Mozilla" % settings.SITE_URL title = "Mozilla" html = REDIRECT_CONTENT % {'href': href, 'title': title} d = document(is_redirect=True, html=html) eq_(href, d.redirect_url()) @attr('redirect') def test_redirect_url_allows_domain_relative_url(self): href = "/en-US/Mozilla" title = "Mozilla" html = REDIRECT_CONTENT % {'href': href, 'title': title} d = document(is_redirect=True, html=html) eq_(href, d.redirect_url()) @attr('redirect') def test_redirect_url_rejects_protocol_relative_url(self): href = "//evilsite.com" title = "Mozilla" html = REDIRECT_CONTENT % {'href': href, 'title': title} d = document(is_redirect=True, html=html) eq_(None, d.redirect_url()) @attr('bug1082034') @attr('redirect') def test_redirect_url_works_for_home_path(self): href = "/" title = "Mozilla" html = REDIRECT_CONTENT % {'href': href, 'title': title} d = document(is_redirect=True, html=html) eq_(href, d.redirect_url()) class PermissionTests(KumaTestCase): def setUp(self): """Set up the permissions, groups, and users needed for the tests""" super(PermissionTests, self).setUp() (self.perms, self.groups, self.users, self.superuser) = ( create_template_test_users()) def test_template_permissions(self): msg = ('should not', 'should') for is_add in (True, False): slug_trials = ( ('test_for_%s', ( (True, self.superuser), (True, self.users['none']), (True, self.users['all']), (True, self.users['add']), (True, self.users['change']), )), ('Template:test_for_%s', ( (True, self.superuser), (False, self.users['none']), (True, self.users['all']), (is_add, self.users['add']), (not is_add, self.users['change']), )) ) for slug_tmpl, trials in slug_trials: for expected, trial_user in trials: slug = slug_tmpl % trial_user.username if is_add: eq_(expected, Document.objects.allows_add_by(trial_user, slug), 'User %s %s able to create %s' % ( trial_user, msg[expected], slug)) else: doc = document(slug=slug, title=slug) eq_(expected, doc.allows_revision_by(trial_user), 'User %s %s able to revise %s' % ( trial_user, msg[expected], slug)) eq_(expected, doc.allows_editing_by(trial_user), 'User %s %s able to edit %s' % ( trial_user, msg[expected], slug)) class DocumentTestsWithFixture(UserTestCase): """Document tests which need the users fixture""" def test_redirect_document_non_redirect(self): """Assert redirect_document on non-redirects returns None.""" eq_(None, document().redirect_document()) def test_redirect_document_external_redirect(self): """Assert redirects to external pages return None.""" eq_(None, revision(content='REDIRECT [http://example.com]', is_approved=True, save=True).document.redirect_document()) def test_redirect_document_nonexistent(self): """Assert redirects to non-existent pages return None.""" eq_(None, revision(content='REDIRECT [[kersmoo]]', is_approved=True, save=True).document.redirect_document()) def test_default_topic_parents_for_translation(self): """A translated document with no topic parent should by default use the translation of its translation parent's topic parent.""" orig_pt = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='test section', save=True) orig = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='test', parent_topic=orig_pt, save=True) trans_pt = document(locale='fr', title='le test section', parent=orig_pt, save=True) trans = document(locale='fr', title='le test', parent=orig, save=True) ok_(trans.parent_topic) eq_(trans.parent_topic.pk, trans_pt.pk) def test_default_topic_with_stub_creation(self): orig_pt = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='test section', save=True) orig = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='test', parent_topic=orig_pt, save=True) trans = document(locale='fr', title='le test', parent=orig, save=True) # There should be a translation topic parent trans_pt = trans.parent_topic ok_(trans_pt) # The locale of the topic parent should match the new translation eq_(trans.locale, trans_pt.locale) # But, the translation's topic parent must *not* be the translation # parent's topic parent ok_(trans_pt.pk != orig_pt.pk) # Still, since the topic parent is an autocreated stub, it shares its # title with the original. eq_(trans_pt.title, orig_pt.title) # Oh, and it should point to the original parent topic as its # translation parent eq_(trans_pt.parent, orig_pt) def test_default_topic_with_path_gaps(self): # Build a path of docs in en-US orig_path = ('MDN', 'web', 'CSS', 'properties', 'banana', 'leaf') docs, doc = [], None for title in orig_path: doc = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title=title, parent_topic=doc, save=True) revision(document=doc, title=title, save=True) docs.append(doc) # Translate, but leave gaps for stubs trans_0 = document(locale='fr', title='le MDN', parent=docs[0], save=True) revision(document=trans_0, title='le MDN', tags="LeTest!", save=True) trans_2 = document(locale='fr', title='le CSS', parent=docs[2], save=True) revision(document=trans_2, title='le CSS', tags="LeTest!", save=True) trans_5 = document(locale='fr', title='le leaf', parent=docs[5], save=True) revision(document=trans_5, title='le ;eaf', tags="LeTest!", save=True) # Make sure trans_2 got the right parent eq_(trans_2.parents[0].pk, trans_0.pk) # Ensure the translated parents and stubs appear properly in the path parents_5 = trans_5.parents eq_(parents_5[0].pk, trans_0.pk) eq_(parents_5[1].locale, trans_5.locale) eq_(parents_5[1].title, docs[1].title) ok_(parents_5[1].current_revision.pk != docs[1].current_revision.pk) eq_(parents_5[2].pk, trans_2.pk) eq_(parents_5[3].locale, trans_5.locale) eq_(parents_5[3].title, docs[3].title) ok_(parents_5[3].current_revision.pk != docs[3].current_revision.pk) eq_(parents_5[4].locale, trans_5.locale) eq_(parents_5[4].title, docs[4].title) ok_(parents_5[4].current_revision.pk != docs[4].current_revision.pk) for p in parents_5: ok_(p.current_revision) if p.pk not in (trans_0.pk, trans_2.pk, trans_5.pk): ok_('NeedsTranslation' in p.current_revision.tags) ok_('TopicStub' in p.current_revision.tags) ok_(p.current_revision.localization_in_progress) def test_repair_breadcrumbs(self): english_top = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='English top', save=True) english_mid = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='English mid', parent_topic=english_top, save=True) english_bottom = document(locale=settings.WIKI_DEFAULT_LANGUAGE, title='English bottom', parent_topic=english_mid, save=True) french_top = document(locale='fr', title='French top', parent=english_top, save=True) french_mid = document(locale='fr', parent=english_mid, parent_topic=english_mid, save=True) french_bottom = document(locale='fr', parent=english_bottom, parent_topic=english_bottom, save=True) french_bottom.repair_breadcrumbs() french_bottom_fixed = Document.objects.get(locale='fr', title=french_bottom.title) eq_(french_mid.id, french_bottom_fixed.parent_topic.id) eq_(french_top.id, french_bottom_fixed.parent_topic.parent_topic.id) def test_code_sample_extraction(self): """Make sure sample extraction works from the model. This is a smaller version of the test from test_content.py""" sample_html = u'<p class="foo">Hello world!</p>' sample_css = u'.foo p { color: red; }' sample_js = u'window.alert("Hi there!");' doc_src = u""" <p>This is a page. Deal with it.</p> <ul id="s2" class="code-sample"> <li><pre class="brush: html">%s</pre></li> <li><pre class="brush: css">%s</pre></li> <li><pre class="brush: js">%s</pre></li> </ul> <p>More content shows up here.</p> """ % (escape(sample_html), escape(sample_css), escape(sample_js)) d1, r1 = doc_rev(doc_src) result = d1.extract_code_sample('s2') eq_(sample_html.strip(), result['html'].strip()) eq_(sample_css.strip(), result['css'].strip()) eq_(sample_js.strip(), result['js'].strip()) class TaggedDocumentTests(UserTestCase): """Tests for tags in Documents and Revisions""" @attr('tags') def test_revision_tags(self): """Change tags on Document by creating Revisions""" d, _ = doc_rev('Sample document') eq_(0, Document.objects.filter(tags__name='foo').count()) eq_(0, Document.objects.filter(tags__name='alpha').count()) r = revision(document=d, content='Update to document', is_approved=True, tags="foo, bar, baz") r.save() eq_(1, Document.objects.filter(tags__name='foo').count()) eq_(0, Document.objects.filter(tags__name='alpha').count()) r = revision(document=d, content='Another update', is_approved=True, tags="alpha, beta, gamma") r.save() eq_(0, Document.objects.filter(tags__name='foo').count()) eq_(1, Document.objects.filter(tags__name='alpha').count()) class RevisionTests(UserTestCase): """Tests for the Revision model""" def test_approved_revision_updates_html(self): """Creating an approved revision updates document.html""" d, _ = doc_rev('Replace document html') assert 'Replace document html' in d.html, \ '"Replace document html" not in %s' % d.html # Creating another approved revision replaces it again r = revision(document=d, content='Replace html again', is_approved=True) r.save() assert 'Replace html again' in d.html, \ '"Replace html again" not in %s' % d.html def test_unapproved_revision_not_updates_html(self): """Creating an unapproved revision does not update document.html""" d, _ = doc_rev('Here to stay') assert 'Here to stay' in d.html, '"Here to stay" not in %s' % d.html # Creating another approved revision keeps initial content r = revision(document=d, content='Fail to replace html', is_approved=False) r.save() assert 'Here to stay' in d.html, '"Here to stay" not in %s' % d.html def test_revision_unicode(self): """Revision containing unicode characters is saved successfully.""" str = u'Firefox informa\xe7\xf5es \u30d8\u30eb' _, r = doc_rev(str) eq_(str, r.content) def test_save_bad_based_on(self): """Saving a Revision with a bad based_on value raises an error.""" r = revision() r.based_on = revision() # Revision of some other unrelated Document self.assertRaises(ProgrammingError, r.save) def test_correct_based_on_to_none(self): """Assure Revision.clean() changes a bad based_on value to None when there is no current_revision of the English document.""" r = revision() r.based_on = revision() # Revision of some other unrelated Document self.assertRaises(ValidationError, r.clean) eq_(None, r.based_on) def test_correct_based_on_to_current_revision(self): """Assure Revision.clean() defaults based_on value to the English doc's current_revision when there is one.""" # Make English rev: en_rev = revision(is_approved=True) en_rev.save() # Make Deutsch translation: de_doc = document(parent=en_rev.document, locale='de') de_doc.save() de_rev = revision(document=de_doc) # Set based_on to a de rev to simulate fixing broken translation source de_rev.based_on = de_rev de_rev.clean() eq_(en_rev.document.current_revision, de_rev.based_on) def test_previous(self): """Revision.previous should return this revision's document's most recent approved revision.""" rev = revision(is_approved=True, save=True) eq_(None, rev.previous) # wait a second so next revision is a different datetime time.sleep(1) next_rev = revision(document=rev.document, content="Updated", is_approved=True) next_rev.save() eq_(rev, next_rev.previous) time.sleep(1) last_rev = revision(document=rev.document, content="Finally", is_approved=True) last_rev.save() eq_(next_rev, last_rev.previous) @attr('toc') def test_show_toc(self): """Setting toc_depth appropriately affects the Document's show_toc property.""" d, r = doc_rev('Toggle table of contents.') assert (r.toc_depth != 0) assert d.show_toc r = revision(document=d, content=r.content, toc_depth=0, is_approved=True) r.save() assert not d.show_toc r = revision(document=d, content=r.content, toc_depth=1, is_approved=True) r.save() assert d.show_toc def test_revert(self): """Reverting to a specific revision.""" d, r = doc_rev('Test reverting') old_id = r.id time.sleep(1) revision(document=d, title='Test reverting', content='An edit to revert', comment='This edit gets reverted', is_approved=True) r.save() time.sleep(1) reverted = d.revert(r, r.creator) ok_('Revert to' in reverted.comment) ok_('Test reverting' == reverted.content) ok_(old_id != reverted.id) def test_revert_review_tags(self): d, r = doc_rev('Test reverting with review tags') r.review_tags.set('technical') time.sleep(1) r2 = revision(document=d, title='Test reverting with review tags', content='An edit to revert', comment='This edit gets reverted', is_approved=True) r2.save() r2.review_tags.set('editorial') reverted = d.revert(r, r.creator) reverted_tags = [t.name for t in reverted.review_tags.all()] ok_('technical' in reverted_tags) ok_('editorial' not in reverted_tags) class GetCurrentOrLatestRevisionTests(UserTestCase): """Tests for current_or_latest_revision.""" def test_single_approved(self): """Get approved revision.""" rev = revision(is_approved=True, save=True) eq_(rev, rev.document.current_or_latest_revision()) def test_multiple_approved(self): """When multiple approved revisions exist, return the most recent.""" r1 = revision(is_approved=True, save=True) r2 = revision(is_approved=True, save=True, document=r1.document) eq_(r2, r2.document.current_or_latest_revision()) def test_latest(self): """Return latest revision when no current exists.""" r1 = revision(is_approved=False, save=True, created=datetime.now() - timedelta(days=1)) r2 = revision(is_approved=False, save=True, document=r1.document) eq_(r2, r1.document.current_or_latest_revision()) class DumpAndLoadJsonTests(UserTestCase): def test_roundtrip(self): # Create some documents and revisions here, rather than use a fixture d1, r1 = doc_rev('Doc 1') d2, r2 = doc_rev('Doc 2') d3, r3 = doc_rev('Doc 3') d4, r4 = doc_rev('Doc 4') d5, r5 = doc_rev('Doc 5') # Since this happens in dev sometimes, break a doc by deleting its # current revision and leaving it with none. d5.current_revision = None d5.save() r5.delete() # The same creator will be used for all the revs, so let's also get a # non-creator user for the upload. creator = r1.creator uploader = self.user_model.objects.exclude(pk=creator.id).all()[0] # Count docs (with revisions) and revisions in DB doc_cnt_db = (Document.objects .filter(current_revision__isnull=False) .count()) rev_cnt_db = (Revision.objects.count()) # Do the dump, capture it, parse the JSON fin = StringIO() Document.objects.dump_json(Document.objects.all(), fin) data_json = fin.getvalue() data = json.loads(data_json) # No objects should come with non-null primary keys for x in data: ok_(not x['pk']) # Count the documents in JSON vs the DB doc_cnt_json = len([x for x in data if x['model'] == 'wiki.document']) eq_(doc_cnt_db, doc_cnt_json, "DB and JSON document counts should match") # Count the revisions in JSON vs the DB rev_cnt_json = len([x for x in data if x['model'] == 'wiki.revision']) eq_(rev_cnt_db, rev_cnt_json, "DB and JSON revision counts should match") # For good measure, ensure no documents missing revisions in the dump. doc_no_rev = (Document.objects .filter(current_revision__isnull=True))[0] no_rev_cnt = len([x for x in data if x['model'] == 'wiki.document' and x['fields']['slug'] == doc_no_rev.slug and x['fields']['locale'] == doc_no_rev.locale]) eq_(0, no_rev_cnt, "There should be no document exported without revision") # Upload the data as JSON, assert that all objects were loaded loaded_cnt = Document.objects.load_json(uploader, StringIO(data_json)) eq_(len(data), loaded_cnt) # Ensure the current revisions of the documents have changed, and that # the creator matches the uploader. for d_orig in (d1, d2, d3, d4): d_curr = Document.objects.get(pk=d_orig.pk) eq_(2, d_curr.revisions.count()) ok_(d_orig.current_revision.id != d_curr.current_revision.id) ok_(d_orig.current_revision.creator_id != d_curr.current_revision.creator_id) eq_(uploader.id, d_curr.current_revision.creator_id) # Everyone out of the pool! Document.objects.all().delete() Revision.objects.all().delete() # Try reloading the data on an empty DB loaded_cnt = Document.objects.load_json(uploader, StringIO(data_json)) eq_(len(data), loaded_cnt) # Count docs (with revisions) and revisions in DB. The imported objects # should have beeen doc/rev pairs. eq_(loaded_cnt / 2, Document.objects.count()) eq_(loaded_cnt / 2, Revision.objects.count()) # The originals should be gone, now. for d_orig in (d1, d2, d3, d4): # The original primary key should have gone away. try: d_curr = Document.objects.get(pk=d_orig.pk) self.fail("This should have been an error") except Document.DoesNotExist: pass # Should be able to fetch document with the original natural key key = d_orig.natural_key() d_curr = Document.objects.get_by_natural_key(*key) eq_(1, d_curr.revisions.count()) eq_(uploader.id, d_curr.current_revision.creator_id) class DeferredRenderingTests(UserTestCase): def setUp(self): super(DeferredRenderingTests, self).setUp() self.rendered_content = 'THIS IS RENDERED' self.raw_content = 'THIS IS NOT RENDERED CONTENT' self.d1, self.r1 = doc_rev('Doc 1') config.KUMA_DOCUMENT_RENDER_TIMEOUT = 600.0 config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT = 7.0 def tearDown(self): super(DeferredRenderingTests, self).tearDown() self.d1.delete() def test_rendering_fields(self): """Defaults for model fields related to rendering should work as expected""" ok_(not self.d1.rendered_html) ok_(not self.d1.defer_rendering) ok_(not self.d1.is_rendering_scheduled) ok_(not self.d1.is_rendering_in_progress) @override_constance_settings(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') def test_get_rendered(self, mock_kumascript_get): """get_rendered() should return rendered content when available, attempt a render() when it's not""" mock_kumascript_get.return_value = (self.rendered_content, None) # First, try getting the rendered version of a document. It should # trigger a call to kumascript. ok_(not self.d1.rendered_html) ok_(not self.d1.render_started_at) ok_(not self.d1.last_rendered_at) result_rendered, _ = self.d1.get_rendered(None, 'http://testserver/') ok_(mock_kumascript_get.called) eq_(self.rendered_content, result_rendered) eq_(self.rendered_content, self.d1.rendered_html) # Next, get a fresh copy of the document and try getting a rendering. # It should *not* call out to kumascript, because the rendered content # should be in the DB. d1_fresh = Document.objects.get(pk=self.d1.pk) eq_(self.rendered_content, d1_fresh.rendered_html) ok_(d1_fresh.render_started_at) ok_(d1_fresh.last_rendered_at) mock_kumascript_get.called = False result_rendered, _ = d1_fresh.get_rendered(None, 'http://testserver/') ok_(not mock_kumascript_get.called) eq_(self.rendered_content, result_rendered) @attr('bug875349') @override_constance_settings(KUMASCRIPT_TIMEOUT=1.0) @override_settings(CELERY_ALWAYS_EAGER=True) @mock.patch('kuma.wiki.kumascript.get') def test_build_json_on_render(self, mock_kumascript_get): """ A document's json field is refreshed on render(), but not on save() """ mock_kumascript_get.return_value = (self.rendered_content, None) # Initially empty json field should be filled in after render() eq_(self.d1.json, None) self.d1.render() # reloading from db to get the updates done in the celery task self.d1 = Document.objects.get(pk=self.d1.pk) ok_(self.d1.json is not None) time.sleep(1.0) # Small clock-tick to age the results. # Change the doc title, saving does not actually change the json field. self.d1.title = "New title" self.d1.save() ok_(self.d1.title != self.d1.get_json_data()['title']) self.d1 = Document.objects.get(pk=self.d1.pk) # However, rendering refreshes the json field. self.d1.render() self.d1 = Document.objects.get(pk=self.d1.pk) eq_(self.d1.title, self.d1.get_json_data()['title']) # In case we logically delete a document with a changed title # we don't update the json blob deleted_title = 'Deleted title' self.d1.title = deleted_title self.d1.save() self.d1.delete() self.d1.render() self.d1 = Document.objects.get(pk=self.d1.pk) ok_(deleted_title != self.d1.get_json_data()['title']) @mock.patch('kuma.wiki.kumascript.get') @override_settings(CELERY_ALWAYS_EAGER=True) def test_get_summary(self, mock_kumascript_get): """ get_summary() should attempt to use rendered """ config.KUMASCRIPT_TIMEOUT = 1.0 mock_kumascript_get.return_value = ('<p>summary!</p>', None) ok_(not self.d1.rendered_html) result_summary = self.d1.get_summary() ok_(not mock_kumascript_get.called) ok_(not self.d1.rendered_html) self.d1.render() ok_(self.d1.rendered_html) ok_(mock_kumascript_get.called) result_summary = self.d1.get_summary() eq_("summary!", result_summary) config.KUMASCRIPT_TIMEOUT = 0.0 @mock.patch('kuma.wiki.kumascript.get') def test_one_render_at_a_time(self, mock_kumascript_get): """Only one in-progress rendering should be allowed for a Document""" mock_kumascript_get.return_value = (self.rendered_content, None) self.d1.render_started_at = datetime.now() self.d1.save() try: self.d1.render('', 'http://testserver/') self.fail("An attempt to render while another appears to be in " "progress should be disallowed") except DocumentRenderingInProgress: pass @mock.patch('kuma.wiki.kumascript.get') def test_render_timeout(self, mock_kumascript_get): """ A rendering that has taken too long is no longer considered in progress """ mock_kumascript_get.return_value = (self.rendered_content, None) timeout = 5.0 config.KUMA_DOCUMENT_RENDER_TIMEOUT = timeout self.d1.render_started_at = (datetime.now() - timedelta(seconds=timeout + 1)) self.d1.save() try: self.d1.render('', 'http://testserver/') except DocumentRenderingInProgress: self.fail("A timed-out rendering should not be considered as " "still in progress") @mock.patch('kuma.wiki.kumascript.get') def test_long_render_sets_deferred(self, mock_kumascript_get): """A rendering that takes more than a desired response time marks the document as in need of deferred rendering in the future.""" config.KUMASCRIPT_TIMEOUT = 1.0 rendered_content = self.rendered_content def my_kumascript_get(self, cache_control, base_url, timeout): time.sleep(1.0) return (rendered_content, None) mock_kumascript_get.side_effect = my_kumascript_get config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT = 2.0 self.d1.render('', 'http://testserver/') ok_(not self.d1.defer_rendering) config.KUMA_DOCUMENT_FORCE_DEFERRED_TIMEOUT = 0.5 self.d1.render('', 'http://testserver/') ok_(self.d1.defer_rendering) config.KUMASCRIPT_TIMEOUT = 0.0 @mock.patch('kuma.wiki.kumascript.get') @mock.patch.object(tasks.render_document, 'delay') def test_schedule_rendering(self, mock_render_document_delay, mock_kumascript_get): mock_kumascript_get.return_value = (self.rendered_content, None) # Scheduling for a non-deferred render should happen on the spot. self.d1.defer_rendering = False self.d1.save() ok_(not self.d1.render_scheduled_at) ok_(not self.d1.last_rendered_at) self.d1.schedule_rendering(None, 'http://testserver/') ok_(self.d1.render_scheduled_at) ok_(self.d1.last_rendered_at) ok_(not mock_render_document_delay.called) ok_(not self.d1.is_rendering_scheduled) # Reset the significant fields and try a deferred render. self.d1.last_rendered_at = None self.d1.render_started_at = None self.d1.render_scheduled_at = None self.d1.defer_rendering = True self.d1.save() # Scheduling for a deferred render should result in a queued task. self.d1.schedule_rendering(None, 'http://testserver/') ok_(self.d1.render_scheduled_at) ok_(not self.d1.last_rendered_at) ok_(mock_render_document_delay.called) # And, since our mock delay() doesn't actually queue a task, this # document should appear to be scheduled for a pending render not yet # in progress. ok_(self.d1.is_rendering_scheduled) ok_(not self.d1.is_rendering_in_progress) @mock.patch('kuma.wiki.kumascript.get') @mock.patch.object(tasks.render_document, 'delay') def test_deferred_vs_immediate_rendering(self, mock_render_document_delay, mock_kumascript_get): mock_kumascript_get.return_value = (self.rendered_content, None) switch = Switch.objects.create(name='wiki_force_immediate_rendering') # When defer_rendering == False, the rendering should be immediate. switch.active = False switch.save() self.d1.rendered_html = '' self.d1.defer_rendering = False self.d1.save() result_rendered, _ = self.d1.get_rendered(None, 'http://testserver/') ok_(not mock_render_document_delay.called) # When defer_rendering == True but the waffle switch forces immediate, # the rendering should be immediate. switch.active = True switch.save() self.d1.rendered_html = '' self.d1.defer_rendering = True self.d1.save() result_rendered, _ = self.d1.get_rendered(None, 'http://testserver/') ok_(not mock_render_document_delay.called) # When defer_rendering == True, the rendering should be deferred and an # exception raised if the content is blank. switch.active = False switch.save() self.d1.rendered_html = '' self.d1.defer_rendering = True self.d1.save() try: result_rendered, _ = self.d1.get_rendered(None, 'http://testserver/') self.fail("We should have gotten a " "DocumentRenderedContentNotAvailable exception") except DocumentRenderedContentNotAvailable: pass ok_(mock_render_document_delay.called) @mock.patch('kuma.wiki.kumascript.get') def test_errors_stored_correctly(self, mock_kumascript_get): errors = [ {'level': 'error', 'message': 'This is a fake error', 'args': ['FakeError']}, ] mock_kumascript_get.return_value = (self.rendered_content, errors) r_rendered, r_errors = self.d1.get_rendered(None, 'http://testserver/') ok_(errors, r_errors) class RenderExpiresTests(UserTestCase): """Tests for max-age and automatic document rebuild""" def test_find_stale_documents(self): now = datetime.now() # Fresh d1 = document(title='Aged 1') d1.render_expires = now + timedelta(seconds=100) d1.save() # Stale, exactly now d2 = document(title='Aged 2') d2.render_expires = now d2.save() # Stale, a little while ago d3 = document(title='Aged 3') d3.render_expires = now - timedelta(seconds=100) d3.save() stale_docs = Document.objects.get_by_stale_rendering() eq_(sorted([d2.pk, d3.pk]), sorted([x.pk for x in stale_docs])) @override_constance_settings(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') def test_update_expires_with_max_age(self, mock_kumascript_get): mock_kumascript_get.return_value = ('MOCK CONTENT', None) max_age = 1000 now = datetime.now() d1 = document(title='Aged 1') d1.render_max_age = max_age d1.save() d1.render() # HACK: Exact time comparisons suck, because execution time. later = now + timedelta(seconds=max_age) ok_(d1.render_expires > later - timedelta(seconds=1)) ok_(d1.render_expires < later + timedelta(seconds=1)) @override_constance_settings(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') def test_update_expires_without_max_age(self, mock_kumascript_get): mock_kumascript_get.return_value = ('MOCK CONTENT', None) now = datetime.now() d1 = document(title='Aged 1') d1.render_expires = now - timedelta(seconds=100) d1.save() d1.render() ok_(not d1.render_expires) @override_constance_settings(KUMASCRIPT_TIMEOUT=1.0) @mock.patch('kuma.wiki.kumascript.get') @mock.patch.object(tasks.render_document, 'delay') def test_render_stale(self, mock_render_document_delay, mock_kumascript_get): mock_kumascript_get.return_value = ('MOCK CONTENT', None) now = datetime.now() earlier = now - timedelta(seconds=1000) d1 = document(title='Aged 3') d1.last_rendered_at = earlier d1.render_expires = now - timedelta(seconds=100) d1.save() tasks.render_stale_documents() d1_fresh = Document.objects.get(pk=d1.pk) ok_(not mock_render_document_delay.called) ok_(d1_fresh.last_rendered_at > earlier) class PageMoveTests(UserTestCase): """Tests for page-moving and associated functionality.""" @attr('move') def test_children_simple(self): """A basic tree with two direct children and no sub-trees on either.""" d1 = document(title='Parent', save=True) d2 = document(title='Child', save=True) d2.parent_topic = d1 d2.save() d3 = document(title='Another child', save=True) d3.parent_topic = d1 d3.save() eq_([d2, d3], d1.get_descendants()) def test_get_descendants_limited(self): """Tests limiting of descendant levels""" def _make_doc(title, parent=None): doc = document(title=title, save=True) if parent: doc.parent_topic = parent doc.save() return doc parent = _make_doc('Parent') child1 = _make_doc('Child 1', parent) child2 = _make_doc('Child 2', parent) grandchild = _make_doc('GrandChild 1', child1) _make_doc('Great GrandChild 1', grandchild) # Test descendant counts eq_(len(parent.get_descendants()), 4) # All eq_(len(parent.get_descendants(1)), 2) eq_(len(parent.get_descendants(2)), 3) eq_(len(parent.get_descendants(0)), 0) eq_(len(child2.get_descendants(10)), 0) eq_(len(grandchild.get_descendants(4)), 1) @attr('move') def test_children_complex(self): """A slightly more complex tree, with multiple children, some of which do/don't have their own children.""" top = document(title='Parent', save=True) c1 = document(title='Child 1', save=True) c1.parent_topic = top c1.save() gc1 = document(title='Child of child 1', save=True) gc1.parent_topic = c1 gc1.save() c2 = document(title='Child 2', save=True) c2.parent_topic = top c2.save() gc2 = document(title='Child of child 2', save=True) gc2.parent_topic = c2 gc2.save() gc3 = document(title='Another child of child 2', save=True) gc3.parent_topic = c2 gc3.save() ggc1 = document(title='Child of the second child of child 2', save=True) ggc1.parent_topic = gc3 ggc1.save() ok_([c1, gc1, c2, gc2, gc3, ggc1] == top.get_descendants()) @attr('move') def test_circular_dependency(self): """Make sure we can detect potential circular dependencies in parent/child relationships.""" # Test detection at one level removed. parent = document(title='Parent of circular-dependency document') child = document(title='Document with circular dependency') child.parent_topic = parent child.save() ok_(child.is_child_of(parent)) # And at two levels removed. grandparent = document(title='Grandparent of ' 'circular-dependency document') parent.parent_topic = grandparent child.save() ok_(child.is_child_of(grandparent)) @attr('move') def test_move_tree(self): """Moving a tree of documents does the correct thing""" # Simple multi-level tree: # # - top # - child1 # - child2 # - grandchild top = revision(title='Top-level parent for tree moves', slug='first-level/parent', is_approved=True, save=True) old_top_id = top.id top_doc = top.document child1 = revision(title='First child of tree-move parent', slug='first-level/second-level/child1', is_approved=True, save=True) old_child1_id = child1.id child1_doc = child1.document child1_doc.parent_topic = top_doc child1_doc.save() child2 = revision(title='Second child of tree-move parent', slug='first-level/second-level/child2', is_approved=True, save=True) old_child2_id = child2.id child2_doc = child2.document child2_doc.parent_topic = top_doc child2.save() grandchild = revision(title='Child of second child of tree-move parent', slug='first-level/second-level/third-level/grandchild', is_approved=True, save=True) old_grandchild_id = grandchild.id grandchild_doc = grandchild.document grandchild_doc.parent_topic = child2_doc grandchild_doc.save() revision(title='New Top-level bucket for tree moves', slug='new-prefix', is_approved=True, save=True) revision(title='New first-level parent for tree moves', slug='new-prefix/first-level', is_approved=True, save=True) # Now we do a simple move: inserting a prefix that needs to be # inherited by the whole tree. top_doc._move_tree('new-prefix/first-level/parent') # And for each document verify three things: # # 1. The new slug is correct. # 2. A new revision was created when the page moved. # 3. A redirect was created. moved_top = Document.objects.get(pk=top_doc.id) eq_('new-prefix/first-level/parent', moved_top.current_revision.slug) ok_(old_top_id != moved_top.current_revision.id) ok_(moved_top.current_revision.slug in Document.objects.get(slug='first-level/parent').redirect_url()) moved_child1 = Document.objects.get(pk=child1_doc.id) eq_('new-prefix/first-level/parent/child1', moved_child1.current_revision.slug) ok_(old_child1_id != moved_child1.current_revision.id) ok_(moved_child1.current_revision.slug in Document.objects.get( slug='first-level/second-level/child1' ).redirect_url()) moved_child2 = Document.objects.get(pk=child2_doc.id) eq_('new-prefix/first-level/parent/child2', moved_child2.current_revision.slug) ok_(old_child2_id != moved_child2.current_revision.id) ok_(moved_child2.current_revision.slug in Document.objects.get( slug='first-level/second-level/child2' ).redirect_url()) moved_grandchild = Document.objects.get(pk=grandchild_doc.id) eq_('new-prefix/first-level/parent/child2/grandchild', moved_grandchild.current_revision.slug) ok_(old_grandchild_id != moved_grandchild.current_revision.id) ok_(moved_grandchild.current_revision.slug in Document.objects.get( slug='first-level/second-level/third-level/grandchild' ).redirect_url()) @attr('move') def test_conflicts(self): top = revision(title='Test page-move conflict detection', slug='test-move-conflict-detection', is_approved=True, save=True) top_doc = top.document child = revision(title='Child of conflict detection test', slug='move-tests/conflict-child', is_approved=True, save=True) child_doc = child.document child_doc.parent_topic = top_doc child_doc.save() # We should find the conflict if it's at the slug the document # will move to. top_conflict = revision(title='Conflicting document for move conflict detection', slug='moved/test-move-conflict-detection', is_approved=True, save=True) eq_([top_conflict.document], top_doc._tree_conflicts('moved/test-move-conflict-detection')) # Or if it will involve a child document. child_conflict = revision(title='Conflicting child for move conflict detection', slug='moved/test-move-conflict-detection/conflict-child', is_approved=True, save=True) eq_([top_conflict.document, child_conflict.document], top_doc._tree_conflicts('moved/test-move-conflict-detection')) # But a redirect should not trigger a conflict. revision(title='Conflicting document for move conflict detection', slug='moved/test-move-conflict-detection', content='REDIRECT <a class="redirect" href="/foo">Foo</a>', document=top_conflict.document, is_approved=True, save=True) eq_([child_conflict.document], top_doc._tree_conflicts('moved/test-move-conflict-detection')) @attr('move') def test_additional_conflicts(self): top = revision(title='WebRTC', slug='WebRTC', content='WebRTC', is_approved=True, save=True) top_doc = top.document child1 = revision(title='WebRTC Introduction', slug='WebRTC/WebRTC_Introduction', content='WebRTC Introduction', is_approved=True, save=True) child1_doc = child1.document child1_doc.parent_topic = top_doc child1_doc.save() child2 = revision(title='Taking webcam photos', slug='WebRTC/Taking_webcam_photos', is_approved=True, save=True) child2_doc = child2.document child2_doc.parent_topic = top_doc child2_doc.save() eq_([], top_doc._tree_conflicts('NativeRTC')) @attr('move') def test_preserve_tags(self): tags = "'moving', 'tests'" rev = revision(title='Test page-move tag preservation', slug='page-move-tags', tags=tags, is_approved=True, save=True) rev.review_tags.set('technical') rev = Revision.objects.get(pk=rev.id) revision(title='New Top-level parent for tree moves', slug='new-top', is_approved=True, save=True) doc = rev.document doc._move_tree('new-top/page-move-tags') moved_doc = Document.objects.get(pk=doc.id) new_rev = moved_doc.current_revision eq_(tags, new_rev.tags) eq_(['technical'], [str(tag) for tag in new_rev.review_tags.all()]) @attr('move') def test_move_tree_breadcrumbs(self): """Moving a tree of documents under an existing doc updates breadcrumbs""" grandpa = revision(title='Top-level parent for breadcrumb move', slug='grandpa', is_approved=True, save=True) grandpa_doc = grandpa.document dad = revision(title='Mid-level parent for breadcrumb move', slug='grandpa/dad', is_approved=True, save=True) dad_doc = dad.document dad_doc.parent_topic = grandpa_doc dad_doc.save() son = revision(title='Bottom-level child for breadcrumb move', slug='grandpa/dad/son', is_approved=True, save=True) son_doc = son.document son_doc.parent_topic = dad_doc son_doc.save() grandma = revision(title='Top-level parent for breadcrumb move', slug='grandma', is_approved=True, save=True) grandma_doc = grandma.document mom = revision(title='Mid-level parent for breadcrumb move', slug='grandma/mom', is_approved=True, save=True) mom_doc = mom.document mom_doc.parent_topic = grandma_doc mom_doc.save() daughter = revision(title='Bottom-level child for breadcrumb move', slug='grandma/mom/daughter', is_approved=True, save=True) daughter_doc = daughter.document daughter_doc.parent_topic = mom_doc daughter_doc.save() # move grandma under grandpa grandma_doc._move_tree('grandpa/grandma') # assert the parent_topics are correctly rooted at grandpa # note we have to refetch these to see any DB changes. grandma_moved = Document.objects.get(locale=grandma_doc.locale, slug='grandpa/grandma') ok_(grandma_moved.parent_topic == grandpa_doc) mom_moved = Document.objects.get(locale=mom_doc.locale, slug='grandpa/grandma/mom') ok_(mom_moved.parent_topic == grandma_moved) @attr('move') def test_move_tree_no_new_parent(self): """Moving a tree to a slug that doesn't exist throws error.""" rev = revision(title='doc to move', slug='doc1', is_approved=True, save=True) doc = rev.document try: doc._move_tree('slug-that-doesnt-exist/doc1') ok_(False, "Moving page under non-existing doc should error.") except: pass @attr('move') @attr('top') def test_move_top_level_docs(self): """Moving a top document to a new slug location""" page_to_move_title = 'Page Move Root' page_to_move_slug = 'Page_Move_Root' page_child_slug = 'Page_Move_Root/Page_Move_Child' page_moved_slug = 'Page_Move_Root_Moved' page_child_moved_slug = 'Page_Move_Root_Moved/Page_Move_Child' page_to_move_doc = document(title=page_to_move_title, slug=page_to_move_slug, save=True) rev = revision(document=page_to_move_doc, title=page_to_move_title, slug=page_to_move_slug, save=True) page_to_move_doc.current_revision = rev page_to_move_doc.save() page_child = revision(title='child', slug=page_child_slug, is_approved=True, save=True) page_child_doc = page_child.document page_child_doc.parent_topic = page_to_move_doc page_child_doc.save() # move page to new slug new_title = page_to_move_title + ' Moved' page_to_move_doc._move_tree(page_moved_slug, user=None, title=new_title) page_to_move_doc = Document.objects.get(slug=page_to_move_slug) page_moved_doc = Document.objects.get(slug=page_moved_slug) page_child_doc = Document.objects.get(slug=page_child_slug) page_child_moved_doc = Document.objects.get(slug=page_child_moved_slug) ok_('REDIRECT' in page_to_move_doc.html) ok_(page_moved_slug in page_to_move_doc.html) ok_(new_title in page_to_move_doc.html) ok_(page_moved_doc) ok_('REDIRECT' in page_child_doc.html) ok_(page_moved_slug in page_child_doc.html) ok_(page_child_moved_doc) # TODO: Fix this assertion? # eq_('admin', page_moved_doc.current_revision.creator.username) @attr('move') def test_mid_move(self): root_title = 'Root' root_slug = 'Root' child_title = 'Child' child_slug = 'Root/Child' moved_child_slug = 'DiffChild' grandchild_title = 'Grandchild' grandchild_slug = 'Root/Child/Grandchild' moved_grandchild_slug = 'DiffChild/Grandchild' root_doc = document(title=root_title, slug=root_slug, save=True) rev = revision(document=root_doc, title=root_title, slug=root_slug, save=True) root_doc.current_revision = rev root_doc.save() child = revision(title=child_title, slug=child_slug, is_approved=True, save=True) child_doc = child.document child_doc.parent_topic = root_doc child_doc.save() grandchild = revision(title=grandchild_title, slug=grandchild_slug, is_approved=True, save=True) grandchild_doc = grandchild.document grandchild_doc.parent_topic = child_doc grandchild_doc.save() child_doc._move_tree(moved_child_slug) redirected_child = Document.objects.get(slug=child_slug) Document.objects.get(slug=moved_child_slug) ok_('REDIRECT' in redirected_child.html) ok_(moved_child_slug in redirected_child.html) redirected_grandchild = Document.objects.get(slug=grandchild_doc.slug) Document.objects.get(slug=moved_grandchild_slug) ok_('REDIRECT' in redirected_grandchild.html) ok_(moved_grandchild_slug in redirected_grandchild.html) @attr('move') def test_move_special(self): root_slug = 'User:foo' child_slug = '%s/child' % root_slug new_root_slug = 'User:foobar' special_root = document(title='User:foo', slug=root_slug, save=True) revision(document=special_root, title=special_root.title, slug=root_slug, save=True) special_child = document(title='User:foo child', slug=child_slug, save=True) revision(document=special_child, title=special_child.title, slug=child_slug, save=True) special_child.parent_topic = special_root special_child.save() original_root_id = special_root.id original_child_id = special_child.id # First move, to new slug. special_root._move_tree(new_root_slug) # Appropriate redirects were left behind. root_redirect = Document.objects.get(locale=special_root.locale, slug=root_slug) ok_(root_redirect.is_redirect) root_redirect_id = root_redirect.id child_redirect = Document.objects.get(locale=special_child.locale, slug=child_slug) ok_(child_redirect.is_redirect) child_redirect_id = child_redirect.id # Moved documents still have the same IDs. moved_root = Document.objects.get(locale=special_root.locale, slug=new_root_slug) eq_(original_root_id, moved_root.id) moved_child = Document.objects.get(locale=special_child.locale, slug='%s/child' % new_root_slug) eq_(original_child_id, moved_child.id) # Second move, back to original slug. moved_root._move_tree(root_slug) # Once again we left redirects behind. root_second_redirect = Document.objects.get(locale=special_root.locale, slug=new_root_slug) ok_(root_second_redirect.is_redirect) child_second_redirect = Document.objects.get(locale=special_child.locale, slug='%s/child' % new_root_slug) ok_(child_second_redirect.is_redirect) # The documents at the original URLs aren't redirects anymore. rerooted_root = Document.objects.get(locale=special_root.locale, slug=root_slug) ok_(not rerooted_root.is_redirect) rerooted_child = Document.objects.get(locale=special_child.locale, slug=child_slug) ok_(not rerooted_child.is_redirect) # The redirects created in the first move no longer exist in the DB. self.assertRaises(Document.DoesNotExist, Document.objects.get, id=root_redirect_id) self.assertRaises(Document.DoesNotExist, Document.objects.get, id=child_redirect_id) def test_fail_message(self): """ When page move fails in moving one of the children, it generates an informative exception message explaining which child document failed. """ top = revision(title='Test page-move error messaging', slug='test-move-error-messaging', is_approved=True, save=True) top_doc = top.document child = revision(title='Child to test page-move error messaging', slug='test-move-error-messaging/child', is_approved=True, save=True) child_doc = child.document child_doc.parent_topic = top_doc child_doc.save() grandchild = revision(title='Grandchild to test page-move error handling', slug='test-move-error-messaging/child/grandchild', is_approved=True, save=True) grandchild_doc = grandchild.document grandchild_doc.parent_topic = child_doc grandchild_doc.save() revision(title='Conflict page for page-move error handling', slug='test-move-error-messaging/moved/grandchild', is_approved=True, save=True) # TODO: Someday when we're on Python 2.7, we can use # assertRaisesRegexp. Until then, we have to manually catch # and inspect the exception. try: child_doc._move_tree('test-move-error-messaging/moved') except PageMoveError as e: err_strings = [ 'with id %s' % grandchild_doc.id, 'https://developer.mozilla.org/%s/docs/%s' % (grandchild_doc.locale, grandchild_doc.slug), "Exception type: <type 'exceptions.Exception'>", 'Exception message: Requested move would overwrite a non-redirect page.', 'in _move_tree', 'in _move_conflicts', 'raise Exception("Requested move would overwrite a non-redirect page.")', ] for s in err_strings: ok_(s in e.args[0]) class DocumentZoneTests(UserTestCase): """Tests for content zones in topic hierarchies""" def test_find_roots(self): """Ensure sub pages can find the content zone root""" root_rev = revision(title='ZoneRoot', slug='ZoneRoot', content='This is the Zone Root', is_approved=True, save=True) root_doc = root_rev.document middle_rev = revision(title='Zonemiddle', slug='Zonemiddle', content='This is the Zone middle', is_approved=True, save=True) middle_doc = middle_rev.document middle_doc.parent_topic = root_doc middle_doc.save() sub_rev = revision(title='SubPage', slug='SubPage', content='This is a subpage', is_approved=True, save=True) sub_doc = sub_rev.document sub_doc.parent_topic = middle_doc sub_doc.save() sub_sub_rev = revision(title='SubSubPage', slug='SubSubPage', content='This is a subsubpage', is_approved=True, save=True) sub_sub_doc = sub_sub_rev.document sub_sub_doc.parent_topic = sub_doc sub_sub_doc.save() other_rev = revision(title='otherPage', slug='otherPage', content='This is an otherpage', is_approved=True, save=True) other_doc = other_rev.document root_zone = DocumentZone(document=root_doc) root_zone.save() middle_zone = DocumentZone(document=middle_doc) middle_zone.save() eq_(self.get_zone_stack(root_doc)[0], root_zone) eq_(self.get_zone_stack(middle_doc)[0], middle_zone) eq_(self.get_zone_stack(sub_doc)[0], middle_zone) eq_(0, len(self.get_zone_stack(other_doc))) zone_stack = self.get_zone_stack(sub_sub_doc) eq_(zone_stack[0], middle_zone) eq_(zone_stack[1], root_zone) def get_zone_stack(self, doc): return DocumentZoneStackJob().get(doc.pk) class DocumentContributorsTests(UserTestCase): def test_get_contributors(self): contrib_1 = user(save=True) revision_1 = revision(creator=contrib_1, save=True) self.assertIn(contrib_1, revision_1.document.get_contributors()) def test_get_contributors_inactive_or_banned(self): contrib_2 = user(save=True) contrib_3 = user(is_active=False, save=True) contrib_4 = user(save=True) contrib_4.bans.create(by=contrib_3, reason='because reasons') revision_2 = revision(creator=contrib_2, save=True) revision(creator=contrib_3, document=revision_2.document, save=True) revision(creator=contrib_4, document=revision_2.document, save=True) contributors = revision_2.document.get_contributors() self.assertIn(contrib_2, contributors) self.assertNotIn(contrib_3, contributors) self.assertNotIn(contrib_4, contributors) class DocumentParsingTests(UserTestCase): """Tests exercising content parsing methods""" def test_get_section_content(self): src = """ <h2>Foo</h2> <p>Bar</p> <h3 id="Quick_Links">Quick Links</h3> <p>Foo, yay</p> <h2>Baz</h2> <p>Baz</p> """ expected = """ <p>Foo, yay</p> """ r = revision(title='Document with sections', slug='document-with-sections', content=src, is_approved=True, save=True) d = r.document result = d.get_section_content('Quick_Links') eq_(normalize_html(expected), normalize_html(result)) def test_cached_content_fields(self): src = """ <h2>First</h2> <p>This is a document</p> <h3 id="Quick_Links">Quick Links</h3> <p>Foo, yay</p> <h3 id="Subnav">Subnav</h3> <p>Bar, yay</p> <h2>Second</h2> <p>Another section</p> <a href="/en-US/docs/document-with-sections">Existing link</a> <a href="/en-US/docs/does-not-exist">New link</a> """ body = """ <h2 id="First">First</h2> <p>This is a document</p> <!-- --> <!-- --> <h2 id="Second">Second</h2> <p>Another section</p> <a href="/en-US/docs/document-with-sections">Existing link</a> <a class="new" href="/en-US/docs/does-not-exist">New link</a> """ quick_links = """ <p>Foo, yay</p> """ subnav = """ <p>Bar, yay</p> """ r = revision(title='Document with sections', slug='document-with-sections', content=src, is_approved=True, save=True) d = r.document eq_(normalize_html(body), normalize_html(d.get_body_html())) eq_(normalize_html(quick_links), normalize_html(d.get_quick_links_html())) eq_(normalize_html(subnav), normalize_html(d.get_zone_subnav_local_html())) def test_bug_982174(self): """Ensure build_json_data uses rendered HTML when available to extract sections generated by KumaScript (bug 982174)""" r = revision(title='Document with sections', slug='document-with-sections', is_approved=True, save=True) d = r.document # Save document with un-rendered content d.html = """ <h2>Section 1</h2> <p>Foo</p> {{ h2_macro('Section 2') }} <p>Bar</p> <h2>Section 3</h2> <p>Foo</p> """ d.save() json_data = d.build_json_data() expected_sections = [ {'id': 'Section_1', 'title': 'Section 1'}, {'id': 'Section_3', 'title': 'Section 3'} ] eq_(expected_sections, json_data['sections']) # Simulate kumascript rendering by updating rendered_html d.rendered_html = """ <h2>Section 1</h2> <p>Foo</p> <h2>Section 2</h2> <p>Bar</p> <h2>Section 3</h2> <p>Foo</p> """ d.save() json_data = d.build_json_data() expected_sections = [ {'id': 'Section_1', 'title': 'Section 1'}, {'id': 'Section_2', 'title': 'Section 2'}, {'id': 'Section_3', 'title': 'Section 3'} ] eq_(expected_sections, json_data['sections']) class RevisionIPTests(UserTestCase): def test_delete_older_than_default_30_days(self): old_date = date.today() - timedelta(days=31) r = revision(created=old_date, save=True) RevisionIP.objects.create(revision=r, ip='127.0.0.1').save() eq_(1, RevisionIP.objects.all().count()) RevisionIP.objects.delete_old() eq_(0, RevisionIP.objects.all().count()) def test_delete_older_than_days_argument(self): rev_date = date.today() - timedelta(days=5) r = revision(created=rev_date, save=True) RevisionIP.objects.create(revision=r, ip='127.0.0.1').save() eq_(1, RevisionIP.objects.all().count()) RevisionIP.objects.delete_old(days=4) eq_(0, RevisionIP.objects.all().count()) def test_delete_older_than_only_deletes_older_than(self): oldest_date = date.today() - timedelta(days=31) r1 = revision(created=oldest_date, save=True) RevisionIP.objects.create(revision=r1, ip='127.0.0.1').save() old_date = date.today() - timedelta(days=29) r1 = revision(created=old_date, save=True) RevisionIP.objects.create(revision=r1, ip='127.0.0.1').save() now_date = date.today() r2 = revision(created=now_date, save=True) RevisionIP.objects.create(revision=r2, ip='127.0.0.1').save() eq_(3, RevisionIP.objects.all().count()) RevisionIP.objects.delete_old() eq_(2, RevisionIP.objects.all().count())
codeparrot/github-code-clean
from typing import Any, Dict EMOJI_NAME_MAPS: Dict[str, Dict[str, Any]] = { # seems like best emoji for happy "1f600": {"canonical_name": "grinning", "aliases": ["happy"]}, "1f603": {"canonical_name": "smiley", "aliases": []}, # the Google emoji for this is not great, so made People/9 'smile' and # renamed this one "1f604": {"canonical_name": "big_smile", "aliases": []}, # from gemoji/Unicode "1f601": {"canonical_name": "grinning_face_with_smiling_eyes", "aliases": []}, # satisfied doesn't seem like a good description of these images "1f606": {"canonical_name": "laughing", "aliases": ["lol"]}, "1f605": {"canonical_name": "sweat_smile", "aliases": []}, # laughter_tears from https://beebom.com/emoji-meanings/ "1f602": {"canonical_name": "joy", "aliases": ["tears", "laughter_tears"]}, "1f923": {"canonical_name": "rolling_on_the_floor_laughing", "aliases": ["rofl"]}, # not sure how the glyphs match relaxed, but both iamcal and gemoji have it "263a": {"canonical_name": "smiling_face", "aliases": ["relaxed"]}, "1f60a": {"canonical_name": "blush", "aliases": []}, # halo comes from gemoji/Unicode "1f607": {"canonical_name": "innocent", "aliases": ["halo"]}, "1f642": {"canonical_name": "smile", "aliases": []}, "1f643": {"canonical_name": "upside_down", "aliases": ["oops"]}, "1f609": {"canonical_name": "wink", "aliases": []}, "1f60c": {"canonical_name": "relieved", "aliases": []}, # in_love from https://beebom.com/emoji-meanings/ "1f60d": {"canonical_name": "heart_eyes", "aliases": ["in_love"]}, # blow_a_kiss from https://beebom.com/emoji-meanings/ "1f618": {"canonical_name": "heart_kiss", "aliases": ["blow_a_kiss"]}, "1f617": {"canonical_name": "kiss", "aliases": []}, "1f619": {"canonical_name": "kiss_smiling_eyes", "aliases": []}, "1f61a": {"canonical_name": "kiss_with_blush", "aliases": []}, "1f60b": {"canonical_name": "yum", "aliases": []}, # crazy from https://beebom.com/emoji-meanings/, seems like best emoji for # joking "1f61b": {"canonical_name": "stuck_out_tongue", "aliases": ["mischievous"]}, "1f61c": {"canonical_name": "stuck_out_tongue_wink", "aliases": ["joking", "crazy"]}, "1f61d": {"canonical_name": "stuck_out_tongue_closed_eyes", "aliases": []}, # kaching suggested by user "1f911": {"canonical_name": "money_face", "aliases": ["kaching"]}, # arms_open seems like a natural addition "1f917": {"canonical_name": "hug", "aliases": ["arms_open"]}, "1f913": {"canonical_name": "nerd", "aliases": ["geek"]}, # several sites suggested this was used for "cool", but cool is taken by # Symbols/137 "1f60e": {"canonical_name": "sunglasses", "aliases": []}, "1f921": {"canonical_name": "clown", "aliases": []}, "1f920": {"canonical_name": "cowboy", "aliases": []}, # https://emojipedia.org/smirking-face/ "1f60f": {"canonical_name": "smirk", "aliases": ["smug"]}, "1f612": {"canonical_name": "unamused", "aliases": []}, "1f61e": {"canonical_name": "disappointed", "aliases": []}, # see People/41 "1f614": {"canonical_name": "pensive", "aliases": ["tired"]}, "1f61f": {"canonical_name": "worried", "aliases": []}, # these seem to better capture the glyphs. This is also what :/ turns into # in Google Hangouts "1f615": {"canonical_name": "oh_no", "aliases": ["half_frown", "concerned", "confused"]}, "1f641": {"canonical_name": "frown", "aliases": ["slight_frown"]}, # sad seemed better than putting another frown as the primary name (see # People/37) "2639": {"canonical_name": "sad", "aliases": ["big_frown"]}, # helpless from https://emojipedia.org/persevering-face/ "1f623": {"canonical_name": "persevere", "aliases": ["helpless"]}, # agony seemed like a good addition "1f616": {"canonical_name": "confounded", "aliases": ["agony"]}, # tired doesn't really match any of the 4 images, put it on People/34 "1f62b": {"canonical_name": "anguish", "aliases": []}, # distraught from https://beebom.com/emoji-meanings/ "1f629": {"canonical_name": "weary", "aliases": ["distraught"]}, "1f624": {"canonical_name": "triumph", "aliases": []}, "1f620": {"canonical_name": "angry", "aliases": []}, # mad and grumpy from https://beebom.com/emoji-meanings/, very_angry to # parallel People/44 and show up in typeahead for "ang.." "1f621": {"canonical_name": "rage", "aliases": ["mad", "grumpy", "very_angry"]}, # blank from https://beebom.com/emoji-meanings/, speechless and poker_face # seemed like good ideas for this "1f636": {"canonical_name": "speechless", "aliases": ["no_mouth", "blank", "poker_face"]}, "1f610": {"canonical_name": "neutral", "aliases": []}, "1f611": {"canonical_name": "expressionless", "aliases": []}, "1f62f": {"canonical_name": "hushed", "aliases": []}, "1f626": {"canonical_name": "frowning", "aliases": []}, # pained from https://beebom.com/emoji-meanings/ "1f627": {"canonical_name": "anguished", "aliases": ["pained"]}, # surprise from https://emojipedia.org/face-with-open-mouth/ "1f62e": {"canonical_name": "open_mouth", "aliases": ["surprise"]}, "1f632": {"canonical_name": "astonished", "aliases": []}, "1f635": {"canonical_name": "dizzy", "aliases": []}, # the alternates are from https://emojipedia.org/flushed-face/. shame # doesn't work with the Google emoji "1f633": {"canonical_name": "flushed", "aliases": ["embarrassed", "blushing"]}, "1f631": {"canonical_name": "scream", "aliases": []}, # scared from https://emojipedia.org/fearful-face/, shock seemed like a # nice addition "1f628": {"canonical_name": "fear", "aliases": ["scared", "shock"]}, "1f630": {"canonical_name": "cold_sweat", "aliases": []}, "1f622": {"canonical_name": "cry", "aliases": []}, # stressed from https://beebom.com/emoji-meanings/. The internet generally # didn't seem to know what to make of the dissapointed_relieved name, and I # got the sense it wasn't an emotion that was often used. Hence replaced it # with exhausted. "1f625": {"canonical_name": "exhausted", "aliases": ["disappointed_relieved", "stressed"]}, "1f924": {"canonical_name": "drooling", "aliases": []}, "1f62d": {"canonical_name": "sob", "aliases": []}, "1f613": {"canonical_name": "sweat", "aliases": []}, "1f62a": {"canonical_name": "sleepy", "aliases": []}, "1f634": {"canonical_name": "sleeping", "aliases": []}, "1f644": {"canonical_name": "rolling_eyes", "aliases": []}, "1f914": {"canonical_name": "thinking", "aliases": []}, "1f925": {"canonical_name": "lying", "aliases": []}, # seems like best emoji for nervous/anxious "1f62c": {"canonical_name": "grimacing", "aliases": ["nervous", "anxious"]}, # zip_it from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/, # lips_sealed from https://emojipedia.org/zipper-mouth-face/, rest seemed # like reasonable additions "1f910": { "canonical_name": "silence", "aliases": ["quiet", "hush", "zip_it", "lips_are_sealed"], }, # queasy seemed like a natural addition "1f922": {"canonical_name": "nauseated", "aliases": ["queasy"]}, "1f927": {"canonical_name": "sneezing", "aliases": []}, "1f637": {"canonical_name": "mask", "aliases": []}, # flu from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/, sick from # https://emojipedia.org/face-with-thermometer/, face_with_thermometer so # it shows up in typeahead (thermometer taken by Objects/82) "1f912": { "canonical_name": "sick", "aliases": ["flu", "face_with_thermometer", "ill", "fever"], }, # hurt and injured from https://beebom.com/emoji-meanings/. Chose hurt as # primary since I think it can cover a wider set of things (e.g. emotional # hurt) "1f915": {"canonical_name": "hurt", "aliases": ["head_bandage", "injured"]}, # devil from https://emojipedia.org/smiling-face-with-horns/, # smiling_face_with_horns from gemoji/Unicode "1f608": { "canonical_name": "smiling_devil", "aliases": ["smiling_imp", "smiling_face_with_horns"], }, # angry_devil from https://beebom.com/emoji-meanings/ "1f47f": {"canonical_name": "devil", "aliases": ["imp", "angry_devil"]}, "1f479": {"canonical_name": "ogre", "aliases": []}, "1f47a": {"canonical_name": "goblin", "aliases": []}, # pile_of_poo from gemoji/Unicode "1f4a9": {"canonical_name": "poop", "aliases": ["pile_of_poo"]}, # alternates seemed like reasonable additions "1f47b": {"canonical_name": "ghost", "aliases": ["boo", "spooky", "haunted"]}, "1f480": {"canonical_name": "skull", "aliases": []}, # alternates seemed like reasonable additions "2620": { "canonical_name": "skull_and_crossbones", "aliases": ["pirate", "death", "hazard", "toxic", "poison"], }, # ufo seemed like a natural addition "1f47d": {"canonical_name": "alien", "aliases": ["ufo"]}, "1f47e": {"canonical_name": "space_invader", "aliases": []}, "1f916": {"canonical_name": "robot", "aliases": []}, # pumpkin seemed like a natural addition "1f383": {"canonical_name": "jack-o-lantern", "aliases": ["pumpkin"]}, "1f63a": {"canonical_name": "smiley_cat", "aliases": []}, "1f638": {"canonical_name": "smile_cat", "aliases": []}, "1f639": {"canonical_name": "joy_cat", "aliases": []}, "1f63b": {"canonical_name": "heart_eyes_cat", "aliases": []}, # smug_cat to parallel People/31 "1f63c": {"canonical_name": "smirk_cat", "aliases": ["smug_cat"]}, "1f63d": {"canonical_name": "kissing_cat", "aliases": []}, # weary_cat from Unicode/gemoji "1f640": {"canonical_name": "scream_cat", "aliases": ["weary_cat"]}, "1f63f": {"canonical_name": "crying_cat", "aliases": []}, # angry_cat to better parallel People/45 "1f63e": {"canonical_name": "angry_cat", "aliases": ["pouting_cat"]}, "1f450": {"canonical_name": "open_hands", "aliases": []}, # praise from # https://emojipedia.org/person-raising-both-hands-in-celebration/ "1f64c": {"canonical_name": "raised_hands", "aliases": ["praise"]}, # applause from https://emojipedia.org/clapping-hands-sign/ "1f44f": {"canonical_name": "clap", "aliases": ["applause"]}, # welcome and thank_you from # https://emojipedia.org/person-with-folded-hands/, namaste from indian # culture "1f64f": {"canonical_name": "pray", "aliases": ["welcome", "thank_you", "namaste"]}, # done_deal seems like a natural addition "1f91d": {"canonical_name": "handshake", "aliases": ["done_deal"]}, "1f44d": {"canonical_name": "+1", "aliases": ["thumbs_up", "like"]}, "1f44e": {"canonical_name": "-1", "aliases": ["thumbs_down"]}, # fist_bump from https://beebom.com/emoji-meanings/ "1f44a": {"canonical_name": "fist_bump", "aliases": ["punch"]}, # used as power in social justice movements "270a": {"canonical_name": "fist", "aliases": ["power"]}, "1f91b": {"canonical_name": "left_fist", "aliases": []}, "1f91c": {"canonical_name": "right_fist", "aliases": []}, "1f91e": {"canonical_name": "fingers_crossed", "aliases": []}, # seems to be mostly used as peace on twitter "270c": {"canonical_name": "peace_sign", "aliases": ["victory"]}, # https://emojipedia.org/sign-of-the-horns/ "1f918": {"canonical_name": "rock_on", "aliases": ["sign_of_the_horns"]}, # got_it seems like a natural addition "1f44c": {"canonical_name": "ok", "aliases": ["got_it"]}, "1f448": {"canonical_name": "point_left", "aliases": []}, "1f449": {"canonical_name": "point_right", "aliases": []}, # :this: is a way of emphasizing the previous message. point_up instead of # point_up_2 so that point_up better matches the other point_*s "1f446": {"canonical_name": "point_up", "aliases": ["this"]}, "1f447": {"canonical_name": "point_down", "aliases": []}, # People/114 is point_up. These seemed better than naming it point_up_2, # and point_of_information means it will come up in typeahead for 'point' "261d": { "canonical_name": "wait_one_second", "aliases": ["point_of_information", "asking_a_question"], }, "270b": {"canonical_name": "hand", "aliases": ["raised_hand"]}, # seems like best emoji for stop, raised_back_of_hand doesn't seem that # useful "1f91a": {"canonical_name": "stop", "aliases": []}, # seems like best emoji for high_five, raised_hand_with_fingers_splayed # doesn't seem that useful "1f590": {"canonical_name": "high_five", "aliases": ["palm"]}, # https://mashable.com/2015/10/23/ios-9-1-emoji-guide/ "1f596": {"canonical_name": "spock", "aliases": ["live_long_and_prosper"]}, # People/119 is a better 'hi', but 'hi' will never show up in the typeahead # due to 'high_five' "1f44b": {"canonical_name": "wave", "aliases": ["hello", "hi"]}, "1f919": {"canonical_name": "call_me", "aliases": []}, # flexed_biceps from gemoji/Unicode, strong seemed like a good addition "1f4aa": {"canonical_name": "muscle", "aliases": []}, "1f595": {"canonical_name": "middle_finger", "aliases": []}, "270d": {"canonical_name": "writing", "aliases": []}, "1f933": {"canonical_name": "selfie", "aliases": []}, # Couldn't figure out why iamcal chose nail_care. Unicode uses nail_polish, # gemoji uses both "1f485": {"canonical_name": "nail_polish", "aliases": ["nail_care"]}, "1f48d": {"canonical_name": "ring", "aliases": []}, "1f484": {"canonical_name": "lipstick", "aliases": []}, # People/18 seems like a better kiss for most circumstances "1f48b": {"canonical_name": "lipstick_kiss", "aliases": []}, # mouth from gemoji/Unicode "1f444": {"canonical_name": "lips", "aliases": ["mouth"]}, "1f445": {"canonical_name": "tongue", "aliases": []}, "1f442": {"canonical_name": "ear", "aliases": []}, "1f443": {"canonical_name": "nose", "aliases": []}, # seems a better feet than Nature/86 (paw_prints) "1f463": {"canonical_name": "footprints", "aliases": ["feet"]}, "1f441": {"canonical_name": "eye", "aliases": []}, # seemed the best emoji for looking "1f440": {"canonical_name": "eyes", "aliases": ["looking"]}, "1f5e3": {"canonical_name": "speaking_head", "aliases": []}, # shadow seems like a good addition "1f464": {"canonical_name": "silhouette", "aliases": ["shadow"]}, # to parallel People/139 "1f465": {"canonical_name": "silhouettes", "aliases": ["shadows"]}, "1f476": {"canonical_name": "baby", "aliases": []}, "1f466": {"canonical_name": "boy", "aliases": []}, "1f467": {"canonical_name": "girl", "aliases": []}, "1f468": {"canonical_name": "man", "aliases": []}, "1f469": {"canonical_name": "woman", "aliases": []}, # It's used on twitter a bunch, either when showing off hair, or in a way # where People/144 would substitute. It'd be nice if there were another # emoji one could use for "good hair", but I think not a big loss to not # have one for Zulip, and not worth the eurocentrism. # '1f471': {'canonical_name': 'X', 'aliases': ['person_with_blond_hair']}, # Added elderly since I think some people prefer that term "1f474": {"canonical_name": "older_man", "aliases": ["elderly_man"]}, # Added elderly since I think some people prefer that term "1f475": {"canonical_name": "older_woman", "aliases": ["elderly_woman"]}, "1f472": {"canonical_name": "gua_pi_mao", "aliases": []}, "1f473": {"canonical_name": "turban", "aliases": []}, # police seems like a more polite term, and matches the Unicode "1f46e": {"canonical_name": "police", "aliases": ["cop"]}, "1f477": {"canonical_name": "construction_worker", "aliases": []}, "1f482": {"canonical_name": "guard", "aliases": []}, # detective from gemoji, sneaky from # https://mashable.com/2015/10/23/ios-9-1-emoji-guide/, agent seems a # reasonable addition "1f575": {"canonical_name": "detective", "aliases": ["spy", "sleuth", "agent", "sneaky"]}, # mrs_claus from https://emojipedia.org/mother-christmas/ "1f936": {"canonical_name": "mother_christmas", "aliases": ["mrs_claus"]}, "1f385": {"canonical_name": "santa", "aliases": []}, "1f478": {"canonical_name": "princess", "aliases": []}, "1f934": {"canonical_name": "prince", "aliases": []}, "1f470": {"canonical_name": "bride", "aliases": []}, "1f935": {"canonical_name": "tuxedo", "aliases": []}, "1f47c": {"canonical_name": "angel", "aliases": []}, # expecting seems like a good addition "1f930": {"canonical_name": "pregnant", "aliases": ["expecting"]}, "1f647": {"canonical_name": "bow", "aliases": []}, # mostly used sassily. person_tipping_hand from # https://emojipedia.org/information-desk-person/ "1f481": {"canonical_name": "information_desk_person", "aliases": ["person_tipping_hand"]}, # no_signal to parallel People/207. Nope seems like a reasonable addition "1f645": {"canonical_name": "no_signal", "aliases": ["nope"]}, "1f646": {"canonical_name": "ok_signal", "aliases": []}, # pick_me seems like a good addition "1f64b": {"canonical_name": "raising_hand", "aliases": ["pick_me"]}, "1f926": {"canonical_name": "face_palm", "aliases": []}, "1f937": {"canonical_name": "shrug", "aliases": []}, "1f64e": {"canonical_name": "person_pouting", "aliases": []}, "1f64d": {"canonical_name": "person_frowning", "aliases": []}, "1f487": {"canonical_name": "haircut", "aliases": []}, "1f486": {"canonical_name": "massage", "aliases": []}, # hover seems like a reasonable addition "1f574": {"canonical_name": "levitating", "aliases": ["hover"]}, "1f483": {"canonical_name": "dancer", "aliases": []}, "1f57a": {"canonical_name": "dancing", "aliases": ["disco"]}, "1f46f": {"canonical_name": "dancers", "aliases": []}, # pedestrian seems like reasonable addition "1f6b6": {"canonical_name": "walking", "aliases": ["pedestrian"]}, "1f3c3": {"canonical_name": "running", "aliases": ["runner"]}, "1f46b": {"canonical_name": "man_and_woman_holding_hands", "aliases": ["man_and_woman_couple"]}, # to parallel People/234 "1f46d": {"canonical_name": "two_women_holding_hands", "aliases": ["women_couple"]}, # to parallel People/234 "1f46c": {"canonical_name": "two_men_holding_hands", "aliases": ["men_couple"]}, # no need for man-woman-boy, since we aren't including the other family # combos "1f46a": {"canonical_name": "family", "aliases": []}, "1f45a": {"canonical_name": "clothing", "aliases": []}, "1f455": {"canonical_name": "shirt", "aliases": ["tshirt"]}, # denim seems like a good addition "1f456": {"canonical_name": "jeans", "aliases": ["denim"]}, # tie is shorter, and a bit more general "1f454": {"canonical_name": "tie", "aliases": []}, "1f457": {"canonical_name": "dress", "aliases": []}, "1f459": {"canonical_name": "bikini", "aliases": []}, "1f458": {"canonical_name": "kimono", "aliases": []}, # I feel like this is always used in the plural "1f460": {"canonical_name": "high_heels", "aliases": []}, # flip_flops seems like a reasonable addition "1f461": {"canonical_name": "sandal", "aliases": ["flip_flops"]}, "1f462": {"canonical_name": "boot", "aliases": []}, "1f45e": {"canonical_name": "shoe", "aliases": []}, # running_shoe is from gemoji, sneaker seems like a reasonable addition "1f45f": {"canonical_name": "athletic_shoe", "aliases": ["sneaker", "running_shoe"]}, "1f452": {"canonical_name": "hat", "aliases": []}, "1f3a9": {"canonical_name": "top_hat", "aliases": []}, # graduate seems like a better word for this "1f393": {"canonical_name": "graduate", "aliases": ["mortar_board"]}, # king and queen seem like good additions "1f451": {"canonical_name": "crown", "aliases": ["queen", "king"]}, # safety and invincibility inspired by # https://mashable.com/2015/10/23/ios-9-1-emoji-guide/. hard_hat and # rescue_worker seem like good additions "26d1": { "canonical_name": "helmet", "aliases": ["hard_hat", "rescue_worker", "safety_first", "invincible"], }, # backpack from gemoji, dominates satchel on Google Trends "1f392": {"canonical_name": "backpack", "aliases": ["satchel"]}, "1f45d": {"canonical_name": "pouch", "aliases": []}, "1f45b": {"canonical_name": "purse", "aliases": []}, "1f45c": {"canonical_name": "handbag", "aliases": []}, "1f4bc": {"canonical_name": "briefcase", "aliases": []}, # glasses seems a more common term than eyeglasses, spectacles seems like a # reasonable synonym to add "1f453": {"canonical_name": "glasses", "aliases": ["spectacles"]}, "1f576": {"canonical_name": "dark_sunglasses", "aliases": []}, "1f302": {"canonical_name": "closed_umbrella", "aliases": []}, "2602": {"canonical_name": "umbrella", "aliases": []}, # Some animals have a Unicode codepoint "<animal>", some have a codepoint # "<animal> face", and some have both. If an animal has just a single # codepoint, we call it <animal>, regardless of what the codepoint is. If # an animal has both, we call the "<animal>" codepoint <animal>, and come # up with something else useful-seeming for the "<animal> face" codepoint. # The reason we chose "<animal> face" for the non-standard name (instead of # giving "<animal>" the non-standard name, as iamcal does) is because the # apple emoji for the "<animal>"s are too realistic. E.g. Apple's Nature/76 # is less plausibly a puppy than this one. "1f436": {"canonical_name": "puppy", "aliases": []}, "1f431": {"canonical_name": "kitten", "aliases": []}, "1f42d": {"canonical_name": "dormouse", "aliases": []}, "1f439": {"canonical_name": "hamster", "aliases": []}, "1f430": {"canonical_name": "bunny", "aliases": []}, "1f98a": {"canonical_name": "fox", "aliases": []}, "1f43b": {"canonical_name": "bear", "aliases": []}, "1f43c": {"canonical_name": "panda", "aliases": []}, "1f428": {"canonical_name": "koala", "aliases": []}, "1f42f": {"canonical_name": "tiger_cub", "aliases": []}, "1f981": {"canonical_name": "lion", "aliases": []}, "1f42e": {"canonical_name": "calf", "aliases": []}, "1f437": {"canonical_name": "piglet", "aliases": []}, "1f43d": {"canonical_name": "pig_nose", "aliases": []}, "1f438": {"canonical_name": "frog", "aliases": []}, "1f435": {"canonical_name": "monkey_face", "aliases": []}, "1f648": {"canonical_name": "see_no_evil", "aliases": []}, "1f649": {"canonical_name": "hear_no_evil", "aliases": []}, "1f64a": {"canonical_name": "speak_no_evil", "aliases": []}, "1f412": {"canonical_name": "monkey", "aliases": []}, # cluck seemed like a good addition "1f414": {"canonical_name": "chicken", "aliases": ["cluck"]}, "1f427": {"canonical_name": "penguin", "aliases": []}, "1f426": {"canonical_name": "bird", "aliases": []}, "1f424": {"canonical_name": "chick", "aliases": ["baby_chick"]}, "1f423": {"canonical_name": "hatching", "aliases": ["hatching_chick"]}, # https://www.iemoji.com/view/emoji/668/animals-nature/front-facing-baby-chick "1f425": {"canonical_name": "new_baby", "aliases": []}, "1f986": {"canonical_name": "duck", "aliases": []}, "1f985": {"canonical_name": "eagle", "aliases": []}, "1f989": {"canonical_name": "owl", "aliases": []}, "1f987": {"canonical_name": "bat", "aliases": []}, "1f43a": {"canonical_name": "wolf", "aliases": []}, "1f417": {"canonical_name": "boar", "aliases": []}, "1f434": {"canonical_name": "pony", "aliases": []}, "1f984": {"canonical_name": "unicorn", "aliases": []}, # buzz seemed like a reasonable addition "1f41d": {"canonical_name": "bee", "aliases": ["buzz", "honeybee"]}, # caterpillar seemed like a reasonable addition "1f41b": {"canonical_name": "bug", "aliases": ["caterpillar"]}, "1f98b": {"canonical_name": "butterfly", "aliases": []}, "1f40c": {"canonical_name": "snail", "aliases": []}, # spiral_shell from Unicode/gemoji, the others seemed like reasonable # additions "1f41a": {"canonical_name": "shell", "aliases": ["seashell", "conch", "spiral_shell"]}, # Unicode/gemoji have lady_beetle; hopefully with ladybug we get both the # people that prefer lady_beetle (with beetle) and ladybug. There is also # ladybird, but seems a bit much for this to complete for bird. "1f41e": {"canonical_name": "beetle", "aliases": ["ladybug"]}, "1f41c": {"canonical_name": "ant", "aliases": []}, "1f577": {"canonical_name": "spider", "aliases": []}, "1f578": {"canonical_name": "web", "aliases": ["spider_web"]}, # tortoise seemed like a reasonable addition "1f422": {"canonical_name": "turtle", "aliases": ["tortoise"]}, # put in a few animal sounds, including this one "1f40d": {"canonical_name": "snake", "aliases": ["hiss"]}, "1f98e": {"canonical_name": "lizard", "aliases": ["gecko"]}, "1f982": {"canonical_name": "scorpion", "aliases": []}, "1f980": {"canonical_name": "crab", "aliases": []}, "1f991": {"canonical_name": "squid", "aliases": []}, "1f419": {"canonical_name": "octopus", "aliases": []}, "1f990": {"canonical_name": "shrimp", "aliases": []}, "1f420": {"canonical_name": "tropical_fish", "aliases": []}, "1f41f": {"canonical_name": "fish", "aliases": []}, "1f421": {"canonical_name": "blowfish", "aliases": []}, "1f42c": {"canonical_name": "dolphin", "aliases": ["flipper"]}, "1f988": {"canonical_name": "shark", "aliases": []}, "1f433": {"canonical_name": "whale", "aliases": []}, # https://emojipedia.org/whale/ "1f40b": {"canonical_name": "humpback_whale", "aliases": []}, "1f40a": {"canonical_name": "crocodile", "aliases": []}, "1f406": {"canonical_name": "leopard", "aliases": []}, "1f405": {"canonical_name": "tiger", "aliases": []}, "1f403": {"canonical_name": "water_buffalo", "aliases": []}, "1f402": {"canonical_name": "ox", "aliases": ["bull"]}, "1f404": {"canonical_name": "cow", "aliases": []}, "1f98c": {"canonical_name": "deer", "aliases": []}, # https://emojipedia.org/dromedary-camel/ "1f42a": {"canonical_name": "arabian_camel", "aliases": []}, "1f42b": {"canonical_name": "camel", "aliases": []}, "1f418": {"canonical_name": "elephant", "aliases": []}, "1f98f": {"canonical_name": "rhinoceros", "aliases": []}, "1f98d": {"canonical_name": "gorilla", "aliases": []}, "1f40e": {"canonical_name": "horse", "aliases": []}, "1f416": {"canonical_name": "pig", "aliases": ["oink"]}, "1f410": {"canonical_name": "goat", "aliases": []}, "1f40f": {"canonical_name": "ram", "aliases": []}, "1f411": {"canonical_name": "sheep", "aliases": ["baa"]}, "1f415": {"canonical_name": "dog", "aliases": ["woof"]}, "1f429": {"canonical_name": "poodle", "aliases": []}, "1f408": {"canonical_name": "cat", "aliases": ["meow"]}, # alarm seemed like a fun addition "1f413": {"canonical_name": "rooster", "aliases": ["alarm", "cock-a-doodle-doo"]}, "1f983": {"canonical_name": "turkey", "aliases": []}, "1f54a": {"canonical_name": "dove", "aliases": ["dove_of_peace"]}, "1f407": {"canonical_name": "rabbit", "aliases": []}, "1f401": {"canonical_name": "mouse", "aliases": []}, "1f400": {"canonical_name": "rat", "aliases": []}, "1f43f": {"canonical_name": "chipmunk", "aliases": []}, # paws seemed like reasonable addition. Put feet at People/135 "1f43e": {"canonical_name": "paw_prints", "aliases": ["paws"]}, "1f409": {"canonical_name": "dragon", "aliases": []}, "1f432": {"canonical_name": "dragon_face", "aliases": []}, "1f335": {"canonical_name": "cactus", "aliases": []}, "1f384": {"canonical_name": "holiday_tree", "aliases": []}, "1f332": {"canonical_name": "evergreen_tree", "aliases": []}, "1f333": {"canonical_name": "tree", "aliases": ["deciduous_tree"]}, "1f334": {"canonical_name": "palm_tree", "aliases": []}, # sprout seemed like a reasonable addition "1f331": {"canonical_name": "seedling", "aliases": ["sprout"]}, # seemed like the best emoji for plant "1f33f": {"canonical_name": "herb", "aliases": ["plant"]}, # clover seemed like a reasonable addition "2618": {"canonical_name": "shamrock", "aliases": ["clover"]}, # lucky seems more useful "1f340": {"canonical_name": "lucky", "aliases": ["four_leaf_clover"]}, "1f38d": {"canonical_name": "bamboo", "aliases": []}, # https://emojipedia.org/tanabata-tree/ "1f38b": {"canonical_name": "wish_tree", "aliases": ["tanabata_tree"]}, # seemed like good additions. Used fall instead of autumn, since don't have # the rest of the seasons, and could imagine someone using both meanings of # fall. "1f343": {"canonical_name": "leaves", "aliases": ["wind", "fall"]}, "1f342": {"canonical_name": "fallen_leaf", "aliases": []}, "1f341": {"canonical_name": "maple_leaf", "aliases": []}, "1f344": {"canonical_name": "mushroom", "aliases": []}, # harvest seems more useful "1f33e": {"canonical_name": "harvest", "aliases": ["ear_of_rice"]}, "1f490": {"canonical_name": "bouquet", "aliases": []}, # seems like the best emoji for flower "1f337": {"canonical_name": "tulip", "aliases": ["flower"]}, "1f339": {"canonical_name": "rose", "aliases": []}, # crushed suggest by a user "1f940": {"canonical_name": "wilted_flower", "aliases": ["crushed"]}, "1f33b": {"canonical_name": "sunflower", "aliases": []}, "1f33c": {"canonical_name": "blossom", "aliases": []}, "1f338": {"canonical_name": "cherry_blossom", "aliases": []}, "1f33a": {"canonical_name": "hibiscus", "aliases": []}, "1f30e": {"canonical_name": "earth_americas", "aliases": []}, "1f30d": {"canonical_name": "earth_africa", "aliases": []}, "1f30f": {"canonical_name": "earth_asia", "aliases": []}, "1f315": {"canonical_name": "full_moon", "aliases": []}, # too many useless moons. Don't seem to get much use on twitter, and clog # up typeahead for moon. # '1f316': {'canonical_name': 'X', 'aliases': ['waning_crescent_moon']}, # '1f317': {'canonical_name': 'X', 'aliases': ['last_quarter_moon']}, # '1f318': {'canonical_name': 'X', 'aliases': ['waning_crescent_moon']}, "1f311": {"canonical_name": "new_moon", "aliases": []}, # '1f312': {'canonical_name': 'X', 'aliases': ['waxing_crescent_moon']}, # '1f313': {'canonical_name': 'X', 'aliases': ['first_quarter_moon']}, "1f314": {"canonical_name": "waxing_moon", "aliases": []}, "1f31a": {"canonical_name": "new_moon_face", "aliases": []}, "1f31d": {"canonical_name": "moon_face", "aliases": []}, "1f31e": {"canonical_name": "sun_face", "aliases": []}, # goodnight seems way more useful "1f31b": {"canonical_name": "goodnight", "aliases": []}, # '1f31c': {'canonical_name': 'X', 'aliases': ['last_quarter_moon_with_face']}, # seems like the best emoji for moon "1f319": {"canonical_name": "moon", "aliases": []}, # dizzy taken by People/54, had to come up with something else "1f4ab": {"canonical_name": "seeing_stars", "aliases": []}, "2b50": {"canonical_name": "star", "aliases": []}, # glowing_star from gemoji/Unicode "1f31f": {"canonical_name": "glowing_star", "aliases": []}, # glamour seems like a reasonable addition "2728": {"canonical_name": "sparkles", "aliases": ["glamour"]}, # high_voltage from gemoji/Unicode "26a1": {"canonical_name": "high_voltage", "aliases": ["zap"]}, # https://emojipedia.org/fire/ "1f525": {"canonical_name": "fire", "aliases": ["lit", "hot", "flame"]}, # explosion and crash seem like reasonable additions "1f4a5": {"canonical_name": "boom", "aliases": ["explosion", "crash", "collision"]}, # meteor seems like a reasonable addition "2604": {"canonical_name": "comet", "aliases": ["meteor"]}, "2600": {"canonical_name": "sunny", "aliases": []}, "1f324": {"canonical_name": "mostly_sunny", "aliases": []}, # partly_cloudy for the glass half empty people "26c5": {"canonical_name": "partly_sunny", "aliases": ["partly_cloudy"]}, "1f325": {"canonical_name": "cloudy", "aliases": []}, # sunshowers seems like a more fun term "1f326": { "canonical_name": "sunshowers", "aliases": ["sun_and_rain", "partly_sunny_with_rain"], }, # pride and lgbtq seem like reasonable additions "1f308": {"canonical_name": "rainbow", "aliases": ["pride", "lgbtq"]}, # overcast seems like a good addition "2601": {"canonical_name": "cloud", "aliases": ["overcast"]}, # suggested by user typing these into their typeahead. "1f327": {"canonical_name": "rainy", "aliases": ["soaked", "drenched"]}, # thunderstorm seems better for this emoji, and thunder_and_rain more # evocative than thunder_cloud_and_rain "26c8": {"canonical_name": "thunderstorm", "aliases": ["thunder_and_rain"]}, # lightning_storm seemed better than lightning_cloud "1f329": {"canonical_name": "lightning", "aliases": ["lightning_storm"]}, # snowy to parallel sunny, cloudy, etc; snowstorm seems like a good # addition "1f328": {"canonical_name": "snowy", "aliases": ["snowstorm"]}, "2603": {"canonical_name": "snowman", "aliases": []}, # don't need two snowmen. frosty is nice because it's a weather (primary # benefit) and also a snowman (one that suffered from not having snow, in # fact) "26c4": {"canonical_name": "frosty", "aliases": []}, "2744": {"canonical_name": "snowflake", "aliases": []}, # the internet didn't seem to have a good use for this emoji. windy is a # good weather that is otherwise not represented. mother_nature from # https://emojipedia.org/wind-blowing-face/ "1f32c": {"canonical_name": "windy", "aliases": ["mother_nature"]}, "1f4a8": {"canonical_name": "dash", "aliases": []}, # tornado_cloud comes from the Unicode, but e.g. gemoji drops the cloud "1f32a": {"canonical_name": "tornado", "aliases": []}, # hazy seemed like a good addition "1f32b": {"canonical_name": "fog", "aliases": ["hazy"]}, "1f30a": {"canonical_name": "ocean", "aliases": []}, # drop seems better than droplet, since could be used for its other # meanings. water drop partly so that it shows up in typeahead for water "1f4a7": {"canonical_name": "drop", "aliases": ["water_drop"]}, "1f4a6": {"canonical_name": "sweat_drops", "aliases": []}, "2614": {"canonical_name": "umbrella_with_rain", "aliases": []}, "1f34f": {"canonical_name": "green_apple", "aliases": []}, "1f34e": {"canonical_name": "apple", "aliases": []}, "1f350": {"canonical_name": "pear", "aliases": []}, # An argument for not calling this orange is to save the color for a color # swatch, but we can deal with that when it happens. Mandarin is from # https://emojipedia.org/tangerine/, also like that it has a second meaning "1f34a": {"canonical_name": "orange", "aliases": ["tangerine", "mandarin"]}, "1f34b": {"canonical_name": "lemon", "aliases": []}, "1f34c": {"canonical_name": "banana", "aliases": []}, "1f349": {"canonical_name": "watermelon", "aliases": []}, "1f347": {"canonical_name": "grapes", "aliases": []}, "1f353": {"canonical_name": "strawberry", "aliases": []}, "1f348": {"canonical_name": "melon", "aliases": []}, "1f352": {"canonical_name": "cherries", "aliases": []}, "1f351": {"canonical_name": "peach", "aliases": []}, "1f34d": {"canonical_name": "pineapple", "aliases": []}, "1f95d": {"canonical_name": "kiwi", "aliases": []}, "1f951": {"canonical_name": "avocado", "aliases": []}, "1f345": {"canonical_name": "tomato", "aliases": []}, "1f346": {"canonical_name": "eggplant", "aliases": []}, "1f952": {"canonical_name": "cucumber", "aliases": []}, "1f955": {"canonical_name": "carrot", "aliases": []}, # maize is from Unicode "1f33d": {"canonical_name": "corn", "aliases": ["maize"]}, # chili_pepper seems like a reasonable addition "1f336": {"canonical_name": "hot_pepper", "aliases": ["chili_pepper"]}, "1f954": {"canonical_name": "potato", "aliases": []}, # yam seems better than sweet_potato, since we already have a potato (not a # strong argument, but is better on the typeahead not to have emoji that # share long prefixes) "1f360": {"canonical_name": "yam", "aliases": ["sweet_potato"]}, "1f330": {"canonical_name": "chestnut", "aliases": []}, "1f95c": {"canonical_name": "peanuts", "aliases": []}, "1f36f": {"canonical_name": "honey", "aliases": []}, "1f950": {"canonical_name": "croissant", "aliases": []}, "1f35e": {"canonical_name": "bread", "aliases": []}, "1f956": {"canonical_name": "baguette", "aliases": []}, "1f9c0": {"canonical_name": "cheese", "aliases": []}, "1f95a": {"canonical_name": "egg", "aliases": []}, # already have an egg in Foods/31, though I guess wouldn't be a big deal to # add it here. "1f373": {"canonical_name": "cooking", "aliases": []}, "1f953": {"canonical_name": "bacon", "aliases": []}, # there's no lunch and dinner, which is a small negative against adding # breakfast "1f95e": {"canonical_name": "pancakes", "aliases": ["breakfast"]}, # There is already shrimp in Nature/51, and tempura seems like a better # description "1f364": {"canonical_name": "tempura", "aliases": []}, # drumstick seems like a better description "1f357": {"canonical_name": "drumstick", "aliases": ["poultry"]}, "1f356": {"canonical_name": "meat", "aliases": []}, "1f355": {"canonical_name": "pizza", "aliases": []}, "1f32d": {"canonical_name": "hotdog", "aliases": []}, "1f354": {"canonical_name": "hamburger", "aliases": []}, "1f35f": {"canonical_name": "fries", "aliases": []}, # https://emojipedia.org/stuffed-flatbread/ "1f959": { "canonical_name": "doner_kebab", "aliases": ["shawarma", "souvlaki", "stuffed_flatbread"], }, "1f32e": {"canonical_name": "taco", "aliases": []}, "1f32f": {"canonical_name": "burrito", "aliases": []}, "1f957": {"canonical_name": "salad", "aliases": []}, # I think Foods/49 is a better :food: "1f958": {"canonical_name": "paella", "aliases": []}, "1f35d": {"canonical_name": "spaghetti", "aliases": []}, # seems like the best noodles? maybe this should be Foods/47? Noodles seem # like a bigger thing in east asia than in europe, so going with that. "1f35c": {"canonical_name": "ramen", "aliases": ["noodles"]}, # seems like the best :food:. Also a reasonable :soup:, though the Google # one is indeed more a pot of food (the Unicode) than a soup "1f372": {"canonical_name": "food", "aliases": ["soup", "stew"]}, # naruto is actual name, and I think don't need this to autocomplete for # "fish" "1f365": {"canonical_name": "naruto", "aliases": []}, "1f363": {"canonical_name": "sushi", "aliases": []}, "1f371": {"canonical_name": "bento", "aliases": []}, "1f35b": {"canonical_name": "curry", "aliases": []}, "1f35a": {"canonical_name": "rice", "aliases": []}, # onigiri is actual name, and I think don't need this to typeahead complete # for "rice" "1f359": {"canonical_name": "onigiri", "aliases": []}, # leaving rice_cracker in, so that we have something for cracker "1f358": {"canonical_name": "senbei", "aliases": ["rice_cracker"]}, "1f362": {"canonical_name": "oden", "aliases": []}, "1f361": {"canonical_name": "dango", "aliases": []}, "1f367": {"canonical_name": "shaved_ice", "aliases": []}, # seemed like the best emoji for gelato "1f368": {"canonical_name": "ice_cream", "aliases": ["gelato"]}, # already have ice_cream in Foods/60, and soft_serve seems like a # potentially fun emoji to have in conjunction with ice_cream. Put in # soft_ice_cream so it typeahead completes on ice_cream as well. "1f366": {"canonical_name": "soft_serve", "aliases": ["soft_ice_cream"]}, "1f370": {"canonical_name": "cake", "aliases": []}, "1f382": {"canonical_name": "birthday", "aliases": []}, # flan seems like a reasonable addition "1f36e": {"canonical_name": "custard", "aliases": ["flan"]}, "1f36d": {"canonical_name": "lollipop", "aliases": []}, "1f36c": {"canonical_name": "candy", "aliases": []}, "1f36b": {"canonical_name": "chocolate", "aliases": []}, "1f37f": {"canonical_name": "popcorn", "aliases": []}, # donut dominates doughnut on # https://trends.google.com/trends/explore?q=doughnut,donut "1f369": {"canonical_name": "donut", "aliases": ["doughnut"]}, "1f36a": {"canonical_name": "cookie", "aliases": []}, "1f95b": {"canonical_name": "milk", "aliases": ["glass_of_milk"]}, "1f37c": {"canonical_name": "baby_bottle", "aliases": []}, "2615": {"canonical_name": "coffee", "aliases": []}, "1f375": {"canonical_name": "tea", "aliases": []}, "1f376": {"canonical_name": "sake", "aliases": []}, "1f37a": {"canonical_name": "beer", "aliases": []}, "1f37b": {"canonical_name": "beers", "aliases": []}, "1f942": {"canonical_name": "clink", "aliases": ["toast"]}, "1f377": {"canonical_name": "wine", "aliases": []}, # tumbler means something different in india, and don't want to use # shot_glass given our policy of using school-age-appropriate terms "1f943": {"canonical_name": "small_glass", "aliases": []}, "1f378": {"canonical_name": "cocktail", "aliases": []}, "1f379": {"canonical_name": "tropical_drink", "aliases": []}, "1f37e": {"canonical_name": "champagne", "aliases": []}, "1f944": {"canonical_name": "spoon", "aliases": []}, # Added eating_utensils so this would show up in typeahead for eat. "1f374": {"canonical_name": "fork_and_knife", "aliases": ["eating_utensils"]}, # Seems like the best emoji for hungry and meal. fork_and_knife_and_plate # is from gemoji/Unicode, and I think is better than the shorter iamcal # version in this case. The rest just seemed like good additions. "1f37d": { "canonical_name": "hungry", "aliases": ["meal", "table_setting", "fork_and_knife_with_plate", "lets_eat"], }, # most people interested in this sport call it football "26bd": {"canonical_name": "football", "aliases": ["soccer"]}, "1f3c0": {"canonical_name": "basketball", "aliases": []}, # to distinguish from Activity/1, but is also the Unicode name "1f3c8": {"canonical_name": "american_football", "aliases": []}, "26be": {"canonical_name": "baseball", "aliases": []}, "1f3be": {"canonical_name": "tennis", "aliases": []}, "1f3d0": {"canonical_name": "volleyball", "aliases": []}, "1f3c9": {"canonical_name": "rugby", "aliases": []}, # https://emojipedia.org/billiards/ suggests this is actually used for # billiards, not for "unlucky" or "losing" or some other connotation of # 8ball. The Unicode name is billiards. "1f3b1": {"canonical_name": "billiards", "aliases": ["pool", "8_ball"]}, # ping pong is the Unicode name, and seems slightly more popular on # https://trends.google.com/trends/explore?q=table%20tennis,ping%20pong "1f3d3": {"canonical_name": "ping_pong", "aliases": ["table_tennis"]}, "1f3f8": {"canonical_name": "badminton", "aliases": []}, # gooooooooal seems more useful of a name, though arguably this isn't the # best emoji for it "1f945": {"canonical_name": "gooooooooal", "aliases": ["goal"]}, "1f3d2": {"canonical_name": "ice_hockey", "aliases": []}, "1f3d1": {"canonical_name": "field_hockey", "aliases": []}, # would say bat, but taken by Nature/30 "1f3cf": {"canonical_name": "cricket", "aliases": ["cricket_bat"]}, # hole_in_one seems like a more useful name to have. Sent golf to # Activity/39 "26f3": {"canonical_name": "hole_in_one", "aliases": []}, # archery seems like a reasonable addition "1f3f9": {"canonical_name": "bow_and_arrow", "aliases": ["archery"]}, "1f3a3": {"canonical_name": "fishing", "aliases": []}, "1f94a": {"canonical_name": "boxing_glove", "aliases": []}, # keikogi and dogi are the actual names for this, I believe. black_belt is # I think a more useful name here "1f94b": {"canonical_name": "black_belt", "aliases": ["keikogi", "dogi", "martial_arts"]}, "26f8": {"canonical_name": "ice_skate", "aliases": []}, "1f3bf": {"canonical_name": "ski", "aliases": []}, "26f7": {"canonical_name": "skier", "aliases": []}, "1f3c2": {"canonical_name": "snowboarder", "aliases": []}, # lift is both what lifters call it, and potentially can be used more # generally than weight_lift. The others seemed like good additions. "1f3cb": {"canonical_name": "lift", "aliases": ["work_out", "weight_lift", "gym"]}, # The decisions on tenses here and in the rest of the sports section are # mostly from gut feel. The Unicode itself is all over the place. "1f93a": {"canonical_name": "fencing", "aliases": []}, "1f93c": {"canonical_name": "wrestling", "aliases": []}, # seemed like reasonable additions "1f938": {"canonical_name": "cartwheel", "aliases": ["acrobatics", "gymnastics", "tumbling"]}, # seemed the best emoji for sports "26f9": {"canonical_name": "ball", "aliases": ["sports"]}, "1f93e": {"canonical_name": "handball", "aliases": []}, "1f3cc": {"canonical_name": "golf", "aliases": []}, "1f3c4": {"canonical_name": "surf", "aliases": []}, "1f3ca": {"canonical_name": "swim", "aliases": []}, "1f93d": {"canonical_name": "water_polo", "aliases": []}, # rest seem like reasonable additions "1f6a3": {"canonical_name": "rowboat", "aliases": ["crew", "sculling", "rowing"]}, # horse_riding seems like a reasonable addition "1f3c7": {"canonical_name": "horse_racing", "aliases": ["horse_riding"]}, # at least in the US: this = cyclist, Activity/53 = mountain biker, and # motorcyclist = biker. Mainly from googling around and personal # experience. E.g. https://grammarist.com/usage/cyclist-biker/ for cyclist # and biker, # https://www.theguardian.com/lifeandstyle/2010/oct/24/bike-snobs-guide-cycling-tribes # for mountain biker (I've never heard the term "mountain cyclist", and # they are the only group on that page that gets "biker" instead of # "cyclist") "1f6b4": {"canonical_name": "cyclist", "aliases": []}, # see Activity/51 "1f6b5": {"canonical_name": "mountain_biker", "aliases": []}, "1f3bd": {"canonical_name": "running_shirt", "aliases": []}, # I feel like people call sports medals "medals", and military medals # "military medals". Also see Activity/56 "1f3c5": {"canonical_name": "medal", "aliases": []}, # See Activity/55. military_medal is the gemoji/Unicode "1f396": {"canonical_name": "military_medal", "aliases": []}, # gold and number_one seem like good additions "1f947": {"canonical_name": "first_place", "aliases": ["gold", "number_one"]}, # to parallel Activity/57 "1f948": {"canonical_name": "second_place", "aliases": ["silver"]}, # to parallel Activity/57 "1f949": {"canonical_name": "third_place", "aliases": ["bronze"]}, # seemed the best emoji for winner "1f3c6": {"canonical_name": "trophy", "aliases": ["winner"]}, "1f3f5": {"canonical_name": "rosette", "aliases": []}, "1f397": {"canonical_name": "reminder_ribbon", "aliases": []}, # don't need ticket and admission_ticket (see Activity/64), so made one of # them :pass:. "1f3ab": {"canonical_name": "pass", "aliases": []}, # see Activity/63 "1f39f": {"canonical_name": "ticket", "aliases": []}, "1f3aa": {"canonical_name": "circus", "aliases": []}, "1f939": {"canonical_name": "juggling", "aliases": []}, # rest seem like good additions "1f3ad": {"canonical_name": "performing_arts", "aliases": ["drama", "theater"]}, # rest seem like good additions "1f3a8": {"canonical_name": "art", "aliases": ["palette", "painting"]}, # action seems more useful than clapper, and clapper doesn't seem like that # common of a term "1f3ac": {"canonical_name": "action", "aliases": []}, # seem like good additions "1f3a4": {"canonical_name": "microphone", "aliases": ["mike", "mic"]}, "1f3a7": {"canonical_name": "headphones", "aliases": []}, "1f3bc": {"canonical_name": "musical_score", "aliases": []}, # piano seems more useful than musical_keyboard "1f3b9": {"canonical_name": "piano", "aliases": ["musical_keyboard"]}, "1f941": {"canonical_name": "drum", "aliases": []}, "1f3b7": {"canonical_name": "saxophone", "aliases": []}, "1f3ba": {"canonical_name": "trumpet", "aliases": []}, "1f3b8": {"canonical_name": "guitar", "aliases": []}, "1f3bb": {"canonical_name": "violin", "aliases": []}, # dice seems more useful "1f3b2": {"canonical_name": "dice", "aliases": ["die"]}, # direct_hit from gemoji/Unicode, and seems more useful. bulls_eye seemed # like a reasonable addition "1f3af": {"canonical_name": "direct_hit", "aliases": ["darts", "bulls_eye"]}, # strike seemed more useful than bowling "1f3b3": {"canonical_name": "strike", "aliases": ["bowling"]}, "1f3ae": {"canonical_name": "video_game", "aliases": []}, # gambling seemed more useful than slot_machine "1f3b0": {"canonical_name": "slot_machine", "aliases": []}, # the Google emoji for this is not red "1f697": {"canonical_name": "car", "aliases": []}, # rideshare seems like a reasonable addition "1f695": {"canonical_name": "taxi", "aliases": ["rideshare"]}, # the Google emoji for this is not blue. recreational_vehicle is from # gemoji/Unicode, jeep seemed like a good addition "1f699": {"canonical_name": "recreational_vehicle", "aliases": ["jeep"]}, # school_bus seemed like a reasonable addition, even though the twitter # glyph for this doesn't really look like a school bus "1f68c": {"canonical_name": "bus", "aliases": ["school_bus"]}, "1f68e": {"canonical_name": "trolley", "aliases": []}, "1f3ce": {"canonical_name": "racecar", "aliases": []}, "1f693": {"canonical_name": "police_car", "aliases": []}, "1f691": {"canonical_name": "ambulance", "aliases": []}, # https://trends.google.com/trends/explore?q=fire%20truck,fire%20engine "1f692": {"canonical_name": "fire_truck", "aliases": ["fire_engine"]}, "1f690": {"canonical_name": "minibus", "aliases": []}, # moving_truck and truck for Places/11 and Places/12 seem much better than # the iamcal names "1f69a": {"canonical_name": "moving_truck", "aliases": []}, # see Places/11 for truck. Rest seem reasonable additions. "1f69b": { "canonical_name": "truck", "aliases": ["tractor-trailer", "big_rig", "semi_truck", "transport_truck"], }, "1f69c": {"canonical_name": "tractor", "aliases": []}, # kick_scooter and scooter seem better for Places/14 and Places /16 than # scooter and motor_scooter. "1f6f4": {"canonical_name": "kick_scooter", "aliases": []}, "1f6b2": {"canonical_name": "bike", "aliases": ["bicycle"]}, # see Places/14. Called motor_bike (or bike) in India "1f6f5": {"canonical_name": "scooter", "aliases": ["motor_bike"]}, "1f3cd": {"canonical_name": "motorcycle", "aliases": []}, # siren seems more useful. alert seems like a reasonable addition "1f6a8": {"canonical_name": "siren", "aliases": ["rotating_light", "alert"]}, "1f694": {"canonical_name": "oncoming_police_car", "aliases": []}, "1f68d": {"canonical_name": "oncoming_bus", "aliases": []}, # car to parallel e.g. Places/1 "1f698": {"canonical_name": "oncoming_car", "aliases": ["oncoming_automobile"]}, "1f696": {"canonical_name": "oncoming_taxi", "aliases": []}, # ski_lift seems like a good addition "1f6a1": {"canonical_name": "aerial_tramway", "aliases": ["ski_lift"]}, # gondola seems more useful "1f6a0": {"canonical_name": "gondola", "aliases": ["mountain_cableway"]}, "1f69f": {"canonical_name": "suspension_railway", "aliases": []}, # train_car seems like a reasonable addition "1f683": {"canonical_name": "railway_car", "aliases": ["train_car"]}, # this does not seem like a good emoji for train, especially compared to # Places/33. streetcar seems like a good addition. "1f68b": {"canonical_name": "tram", "aliases": ["streetcar"]}, "1f69e": {"canonical_name": "mountain_railway", "aliases": []}, # elevated_train seems like a reasonable addition "1f69d": {"canonical_name": "monorail", "aliases": ["elevated_train"]}, # from gemoji/Unicode. Also, don't thin we need two bullettrain's "1f684": {"canonical_name": "high_speed_train", "aliases": []}, # Google, Wikipedia, etc. prefer bullet train to bullettrain "1f685": {"canonical_name": "bullet_train", "aliases": []}, "1f688": {"canonical_name": "light_rail", "aliases": []}, "1f682": {"canonical_name": "train", "aliases": ["steam_locomotive"]}, # oncoming_train seems better than train2 "1f686": {"canonical_name": "oncoming_train", "aliases": []}, # saving metro for Symbols/108. The tunnel makes subway more appropriate # anyway. "1f687": {"canonical_name": "subway", "aliases": []}, # all the glyphs of oncoming vehicles have names like oncoming_*. The # alternate names are to parallel the alternates to Places/27. "1f68a": { "canonical_name": "oncoming_tram", "aliases": ["oncoming_streetcar", "oncoming_trolley"], }, "1f689": {"canonical_name": "station", "aliases": []}, "1f681": {"canonical_name": "helicopter", "aliases": []}, "1f6e9": {"canonical_name": "small_airplane", "aliases": []}, "2708": {"canonical_name": "airplane", "aliases": []}, # take_off seems more useful than airplane_departure. departure also seems # more useful than airplane_departure. Arguably departure should be the # primary, since arrival is probably more useful than landing in Places/42, # but going with this for now. "1f6eb": {"canonical_name": "take_off", "aliases": ["departure", "airplane_departure"]}, # parallel to Places/41 "1f6ec": {"canonical_name": "landing", "aliases": ["arrival", "airplane_arrival"]}, "1f680": {"canonical_name": "rocket", "aliases": []}, "1f6f0": {"canonical_name": "satellite", "aliases": []}, "1f4ba": {"canonical_name": "seat", "aliases": []}, "1f6f6": {"canonical_name": "canoe", "aliases": []}, "26f5": {"canonical_name": "boat", "aliases": ["sailboat"]}, "1f6e5": {"canonical_name": "motor_boat", "aliases": []}, "1f6a4": {"canonical_name": "speedboat", "aliases": []}, # yacht and cruise seem like reasonable additions "1f6f3": {"canonical_name": "passenger_ship", "aliases": ["yacht", "cruise"]}, "26f4": {"canonical_name": "ferry", "aliases": []}, "1f6a2": {"canonical_name": "ship", "aliases": []}, "2693": {"canonical_name": "anchor", "aliases": []}, # there already is a construction in Places/82, and work_in_progress seems # like a useful thing to have. Construction_zone seems better than the # Unicode construction_sign, and is there partly so this autocompletes for # construction. "1f6a7": {"canonical_name": "work_in_progress", "aliases": ["construction_zone"]}, # alternates from https://emojipedia.org/fuel-pump/. Unicode is fuel_pump, # not fuelpump "26fd": {"canonical_name": "fuel_pump", "aliases": ["gas_pump", "petrol_pump"]}, # not sure why iamcal removed the space "1f68f": {"canonical_name": "bus_stop", "aliases": []}, # https://emojipedia.org/vertical-traffic-light/ thinks this is the more # common of the two traffic lights, so putting traffic_light on this one "1f6a6": {"canonical_name": "traffic_light", "aliases": ["vertical_traffic_light"]}, # see Places/57 "1f6a5": {"canonical_name": "horizontal_traffic_light", "aliases": []}, # road_trip from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/ "1f5fa": {"canonical_name": "map", "aliases": ["world_map", "road_trip"]}, # rock_carving, statue, and tower seem more general and less culturally # specific, for Places/60, 61, and 63. "1f5ff": {"canonical_name": "rock_carving", "aliases": ["moyai"]}, # new_york from https://emojipedia.org/statue-of-liberty/. see Places/60 # for statue "1f5fd": {"canonical_name": "statue", "aliases": ["new_york", "statue_of_liberty"]}, "26f2": {"canonical_name": "fountain", "aliases": []}, # see Places/60 "1f5fc": {"canonical_name": "tower", "aliases": ["tokyo_tower"]}, # choosing this as the castle since castles are a way bigger thing in # europe than japan, and shiro is a pretty reasonable name for Places/65 "1f3f0": {"canonical_name": "castle", "aliases": []}, # see Places/64 "1f3ef": {"canonical_name": "shiro", "aliases": []}, "1f3df": {"canonical_name": "stadium", "aliases": []}, "1f3a1": {"canonical_name": "ferris_wheel", "aliases": []}, "1f3a2": {"canonical_name": "roller_coaster", "aliases": []}, # merry_go_round seems like a good addition "1f3a0": {"canonical_name": "carousel", "aliases": ["merry_go_round"]}, # beach_umbrella seems more useful "26f1": {"canonical_name": "beach_umbrella", "aliases": []}, "1f3d6": {"canonical_name": "beach", "aliases": []}, "1f3dd": {"canonical_name": "island", "aliases": []}, "26f0": {"canonical_name": "mountain", "aliases": []}, "1f3d4": {"canonical_name": "snowy_mountain", "aliases": []}, # already lots of other mountains, otherwise would rename this like # Places/60 "1f5fb": {"canonical_name": "mount_fuji", "aliases": []}, "1f30b": {"canonical_name": "volcano", "aliases": []}, "1f3dc": {"canonical_name": "desert", "aliases": []}, # campsite from https://emojipedia.org/camping/, I think Places/79 is a # better camping "1f3d5": {"canonical_name": "campsite", "aliases": []}, "26fa": {"canonical_name": "tent", "aliases": ["camping"]}, "1f6e4": {"canonical_name": "railway_track", "aliases": ["train_tracks"]}, # road is used much more frequently at # https://trends.google.com/trends/explore?q=road,motorway "1f6e3": {"canonical_name": "road", "aliases": ["motorway"]}, "1f3d7": {"canonical_name": "construction", "aliases": []}, "1f3ed": {"canonical_name": "factory", "aliases": []}, "1f3e0": {"canonical_name": "house", "aliases": []}, # suburb seems more useful "1f3e1": {"canonical_name": "suburb", "aliases": []}, "1f3d8": {"canonical_name": "houses", "aliases": []}, # condemned seemed like a good addition "1f3da": {"canonical_name": "derelict_house", "aliases": ["condemned"]}, "1f3e2": {"canonical_name": "office", "aliases": []}, "1f3ec": {"canonical_name": "department_store", "aliases": []}, "1f3e3": {"canonical_name": "japan_post", "aliases": []}, "1f3e4": {"canonical_name": "post_office", "aliases": []}, "1f3e5": {"canonical_name": "hospital", "aliases": []}, "1f3e6": {"canonical_name": "bank", "aliases": []}, "1f3e8": {"canonical_name": "hotel", "aliases": []}, "1f3ea": {"canonical_name": "convenience_store", "aliases": []}, "1f3eb": {"canonical_name": "school", "aliases": []}, "1f3e9": {"canonical_name": "love_hotel", "aliases": []}, "1f492": {"canonical_name": "wedding", "aliases": []}, "1f3db": {"canonical_name": "classical_building", "aliases": []}, "26ea": {"canonical_name": "church", "aliases": []}, "1f54c": {"canonical_name": "mosque", "aliases": []}, "1f54d": {"canonical_name": "synagogue", "aliases": []}, "1f54b": {"canonical_name": "kaaba", "aliases": []}, "26e9": {"canonical_name": "shinto_shrine", "aliases": []}, "1f5fe": {"canonical_name": "japan", "aliases": []}, # rice_scene seems like a strange name to have. gemoji alternate is # moon_ceremony "1f391": {"canonical_name": "moon_ceremony", "aliases": []}, "1f3de": {"canonical_name": "national_park", "aliases": []}, # ocean_sunrise to parallel Places/109 "1f305": {"canonical_name": "sunrise", "aliases": ["ocean_sunrise"]}, "1f304": {"canonical_name": "mountain_sunrise", "aliases": []}, # shooting_star and wish seem like way better descriptions. gemoji/Unicode # is shooting_star "1f320": {"canonical_name": "shooting_star", "aliases": ["wish"]}, "1f387": {"canonical_name": "sparkler", "aliases": []}, "1f386": {"canonical_name": "fireworks", "aliases": []}, "1f307": {"canonical_name": "city_sunrise", "aliases": []}, "1f306": {"canonical_name": "sunset", "aliases": []}, # city and skyline seem more useful than cityscape "1f3d9": {"canonical_name": "city", "aliases": ["skyline"]}, "1f303": {"canonical_name": "night", "aliases": []}, # night_sky seems like a good addition "1f30c": {"canonical_name": "milky_way", "aliases": ["night_sky"]}, "1f309": {"canonical_name": "bridge", "aliases": []}, "1f301": {"canonical_name": "foggy", "aliases": []}, "231a": {"canonical_name": "watch", "aliases": []}, # Unicode/gemoji is mobile_phone. The rest seem like good additions "1f4f1": {"canonical_name": "mobile_phone", "aliases": ["smartphone", "iphone", "android"]}, "1f4f2": {"canonical_name": "calling", "aliases": []}, # gemoji has laptop, even though the Google emoji for this does not look # like a laptop "1f4bb": {"canonical_name": "computer", "aliases": ["laptop"]}, "2328": {"canonical_name": "keyboard", "aliases": []}, "1f5a5": {"canonical_name": "desktop_computer", "aliases": []}, "1f5a8": {"canonical_name": "printer", "aliases": []}, # gemoji/Unicode is computer_mouse "1f5b1": {"canonical_name": "computer_mouse", "aliases": []}, "1f5b2": {"canonical_name": "trackball", "aliases": []}, # arcade seems like a reasonable addition "1f579": {"canonical_name": "joystick", "aliases": ["arcade"]}, # vise seems like a reasonable addition "1f5dc": {"canonical_name": "compression", "aliases": ["vise"]}, # gold record seems more useful, idea came from # https://11points.com/11-emoji-different-meanings-think/ "1f4bd": {"canonical_name": "gold_record", "aliases": ["minidisc"]}, "1f4be": {"canonical_name": "floppy_disk", "aliases": []}, "1f4bf": {"canonical_name": "cd", "aliases": []}, "1f4c0": {"canonical_name": "dvd", "aliases": []}, # videocassette from gemoji/Unicode "1f4fc": {"canonical_name": "vhs", "aliases": ["videocassette"]}, "1f4f7": {"canonical_name": "camera", "aliases": []}, # both of these seem more useful than camera_with_flash "1f4f8": {"canonical_name": "taking_a_picture", "aliases": ["say_cheese"]}, # video_recorder seems like a reasonable addition "1f4f9": {"canonical_name": "video_camera", "aliases": ["video_recorder"]}, "1f3a5": {"canonical_name": "movie_camera", "aliases": []}, # seems like the best emoji for movie "1f4fd": {"canonical_name": "projector", "aliases": ["movie"]}, "1f39e": {"canonical_name": "film", "aliases": []}, # both of these seem more useful than telephone_receiver "1f4de": {"canonical_name": "landline", "aliases": ["home_phone"]}, "260e": {"canonical_name": "phone", "aliases": ["telephone"]}, "1f4df": {"canonical_name": "pager", "aliases": []}, "1f4e0": {"canonical_name": "fax", "aliases": []}, "1f4fa": {"canonical_name": "tv", "aliases": ["television"]}, "1f4fb": {"canonical_name": "radio", "aliases": []}, "1f399": {"canonical_name": "studio_microphone", "aliases": []}, # volume seems more useful "1f39a": {"canonical_name": "volume", "aliases": ["level_slider"]}, "1f39b": {"canonical_name": "control_knobs", "aliases": []}, "23f1": {"canonical_name": "stopwatch", "aliases": []}, "23f2": {"canonical_name": "timer", "aliases": []}, "23f0": {"canonical_name": "alarm_clock", "aliases": []}, "1f570": {"canonical_name": "mantelpiece_clock", "aliases": []}, # times_up and time_ticking seem more useful than the hourglass names "231b": {"canonical_name": "times_up", "aliases": ["hourglass_done"]}, # seems like the better hourglass. Also see Objects/36 "23f3": {"canonical_name": "time_ticking", "aliases": ["hourglass"]}, "1f4e1": {"canonical_name": "satellite_antenna", "aliases": []}, # seems like a reasonable addition "1f50b": {"canonical_name": "battery", "aliases": ["full_battery"]}, "1f50c": {"canonical_name": "electric_plug", "aliases": []}, # light_bulb seems better and from Unicode/gemoji. idea seems like a good # addition "1f4a1": {"canonical_name": "light_bulb", "aliases": ["bulb", "idea"]}, "1f526": {"canonical_name": "flashlight", "aliases": []}, "1f56f": {"canonical_name": "candle", "aliases": []}, # seems like a reasonable addition "1f5d1": {"canonical_name": "wastebasket", "aliases": ["trash_can"]}, # https://www.iemoji.com/view/emoji/1173/objects/oil-drum "1f6e2": {"canonical_name": "oil_drum", "aliases": ["commodities"]}, # losing money from https://emojipedia.org/money-with-wings/, # easy_come_easy_go seems like a reasonable addition "1f4b8": { "canonical_name": "losing_money", "aliases": ["easy_come_easy_go", "money_with_wings"], }, # I think the _bills, _banknotes etc versions of these are arguably more # fun to use in chat, and certainly match the glyphs better "1f4b5": {"canonical_name": "dollar_bills", "aliases": []}, "1f4b4": {"canonical_name": "yen_banknotes", "aliases": []}, "1f4b6": {"canonical_name": "euro_banknotes", "aliases": []}, "1f4b7": {"canonical_name": "pound_notes", "aliases": []}, "1f4b0": {"canonical_name": "money", "aliases": []}, "1f4b3": {"canonical_name": "credit_card", "aliases": ["debit_card"]}, "1f48e": {"canonical_name": "gem", "aliases": ["crystal"]}, # justice seems more useful "2696": {"canonical_name": "justice", "aliases": ["scales", "balance"]}, # fixing, at_work, and working_on_it seem like useful concepts for # workplace chat "1f527": {"canonical_name": "fixing", "aliases": ["wrench"]}, "1f528": {"canonical_name": "hammer", "aliases": ["maintenance", "handyman", "handywoman"]}, "2692": {"canonical_name": "at_work", "aliases": ["hammer_and_pick"]}, # something that might be useful for chat.zulip.org, even "1f6e0": {"canonical_name": "working_on_it", "aliases": ["hammer_and_wrench", "tools"]}, "26cf": {"canonical_name": "mine", "aliases": ["pick"]}, # screw is somewhat inappropriate, but not openly so, so leaving it in "1f529": {"canonical_name": "nut_and_bolt", "aliases": ["screw"]}, "2699": {"canonical_name": "gear", "aliases": ["settings", "mechanical", "engineer"]}, "26d3": {"canonical_name": "chains", "aliases": []}, "1f52b": {"canonical_name": "gun", "aliases": []}, "1f4a3": {"canonical_name": "bomb", "aliases": []}, # betrayed from https://www.iemoji.com/view/emoji/786/objects/kitchen-knife "1f52a": {"canonical_name": "knife", "aliases": ["hocho", "betrayed"]}, # rated_for_violence from # https://www.iemoji.com/view/emoji/1085/objects/dagger. hate (also # suggested there) seems too strong, as does just "violence". "1f5e1": {"canonical_name": "dagger", "aliases": ["rated_for_violence"]}, "2694": {"canonical_name": "duel", "aliases": ["swords"]}, "1f6e1": {"canonical_name": "shield", "aliases": []}, "1f6ac": {"canonical_name": "smoking", "aliases": []}, "26b0": {"canonical_name": "coffin", "aliases": ["burial", "grave"]}, "26b1": {"canonical_name": "funeral_urn", "aliases": ["cremation"]}, # amphora is too obscure, I think "1f3fa": {"canonical_name": "vase", "aliases": ["amphora"]}, "1f52e": {"canonical_name": "crystal_ball", "aliases": ["oracle", "future", "fortune_telling"]}, "1f4ff": {"canonical_name": "prayer_beads", "aliases": []}, "1f488": {"canonical_name": "barber", "aliases": ["striped_pole"]}, # alchemy seems more useful and less obscure "2697": {"canonical_name": "alchemy", "aliases": ["alembic"]}, "1f52d": {"canonical_name": "telescope", "aliases": []}, # science seems useful to have. scientist inspired by # https://www.iemoji.com/view/emoji/787/objects/microscope "1f52c": {"canonical_name": "science", "aliases": ["microscope", "scientist"]}, "1f573": {"canonical_name": "hole", "aliases": []}, "1f48a": {"canonical_name": "medicine", "aliases": ["pill"]}, "1f489": {"canonical_name": "injection", "aliases": ["syringe"]}, "1f321": {"canonical_name": "temperature", "aliases": ["thermometer", "warm"]}, "1f6bd": {"canonical_name": "toilet", "aliases": []}, "1f6b0": {"canonical_name": "potable_water", "aliases": ["tap_water", "drinking_water"]}, "1f6bf": {"canonical_name": "shower", "aliases": []}, "1f6c1": {"canonical_name": "bathtub", "aliases": []}, "1f6c0": {"canonical_name": "bath", "aliases": []}, # reception and services from # https://www.iemoji.com/view/emoji/1169/objects/bellhop-bell "1f6ce": {"canonical_name": "bellhop_bell", "aliases": ["reception", "services", "ding"]}, "1f511": {"canonical_name": "key", "aliases": []}, # encrypted from https://www.iemoji.com/view/emoji/1081/objects/old-key, # secret from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/ "1f5dd": { "canonical_name": "secret", "aliases": ["dungeon", "old_key", "encrypted", "clue", "hint"], }, "1f6aa": {"canonical_name": "door", "aliases": []}, "1f6cb": { "canonical_name": "living_room", "aliases": ["furniture", "couch_and_lamp", "lifestyles"], }, "1f6cf": {"canonical_name": "bed", "aliases": ["bedroom"]}, # guestrooms from iemoji, would add hotel but taken by Places/94 "1f6cc": {"canonical_name": "in_bed", "aliases": ["accommodations", "guestrooms"]}, "1f5bc": {"canonical_name": "picture", "aliases": ["framed_picture"]}, "1f6cd": {"canonical_name": "shopping_bags", "aliases": []}, # https://trends.google.com/trends/explore?q=shopping%20cart,shopping%20trolley "1f6d2": {"canonical_name": "shopping_cart", "aliases": ["shopping_trolley"]}, "1f381": {"canonical_name": "gift", "aliases": ["present"]}, # seemed like the best celebration "1f388": {"canonical_name": "balloon", "aliases": ["celebration"]}, # from gemoji/Unicode "1f38f": {"canonical_name": "carp_streamer", "aliases": ["flags"]}, "1f380": {"canonical_name": "ribbon", "aliases": ["decoration"]}, "1f38a": {"canonical_name": "confetti", "aliases": ["party_ball"]}, # seemed like the best congratulations "1f389": {"canonical_name": "tada", "aliases": ["congratulations"]}, "1f38e": {"canonical_name": "dolls", "aliases": []}, "1f3ee": {"canonical_name": "lantern", "aliases": ["izakaya_lantern"]}, "1f390": {"canonical_name": "wind_chime", "aliases": []}, "2709": {"canonical_name": "email", "aliases": ["envelope", "mail"]}, # seems useful for chat? "1f4e9": {"canonical_name": "mail_sent", "aliases": ["sealed"]}, "1f4e8": {"canonical_name": "mail_received", "aliases": []}, "1f4e7": {"canonical_name": "e-mail", "aliases": []}, "1f48c": {"canonical_name": "love_letter", "aliases": []}, "1f4e5": {"canonical_name": "inbox", "aliases": []}, "1f4e4": {"canonical_name": "outbox", "aliases": []}, "1f4e6": {"canonical_name": "package", "aliases": []}, # price_tag from iemoji "1f3f7": {"canonical_name": "label", "aliases": ["tag", "price_tag"]}, "1f4ea": {"canonical_name": "closed_mailbox", "aliases": []}, "1f4eb": {"canonical_name": "mailbox", "aliases": []}, "1f4ec": {"canonical_name": "unread_mail", "aliases": []}, "1f4ed": {"canonical_name": "inbox_zero", "aliases": ["empty_mailbox", "no_mail"]}, "1f4ee": {"canonical_name": "mail_dropoff", "aliases": []}, "1f4ef": {"canonical_name": "horn", "aliases": []}, "1f4dc": {"canonical_name": "scroll", "aliases": []}, # receipt seems more useful? "1f4c3": {"canonical_name": "receipt", "aliases": []}, "1f4c4": {"canonical_name": "document", "aliases": ["paper", "file", "page"]}, "1f4d1": {"canonical_name": "place_holder", "aliases": []}, "1f4ca": {"canonical_name": "bar_chart", "aliases": []}, # seems like the best chart "1f4c8": {"canonical_name": "chart", "aliases": ["upwards_trend", "growing", "increasing"]}, "1f4c9": {"canonical_name": "downwards_trend", "aliases": ["shrinking", "decreasing"]}, "1f5d2": {"canonical_name": "spiral_notepad", "aliases": []}, # '1f5d3': {'canonical_name': 'X', 'aliases': ['spiral_calendar_pad']}, # swapped the following two largely due to the emojione glyphs "1f4c6": {"canonical_name": "date", "aliases": []}, "1f4c5": {"canonical_name": "calendar", "aliases": []}, "1f4c7": {"canonical_name": "rolodex", "aliases": ["card_index"]}, "1f5c3": {"canonical_name": "archive", "aliases": []}, "1f5f3": {"canonical_name": "ballot_box", "aliases": []}, "1f5c4": {"canonical_name": "file_cabinet", "aliases": []}, "1f4cb": {"canonical_name": "clipboard", "aliases": []}, # don't need two file_folders, so made this organize "1f4c1": {"canonical_name": "organize", "aliases": ["file_folder"]}, "1f4c2": {"canonical_name": "folder", "aliases": []}, "1f5c2": {"canonical_name": "sort", "aliases": []}, "1f5de": {"canonical_name": "newspaper", "aliases": ["swat"]}, "1f4f0": {"canonical_name": "headlines", "aliases": []}, "1f4d3": {"canonical_name": "notebook", "aliases": ["composition_book"]}, "1f4d4": {"canonical_name": "decorative_notebook", "aliases": []}, "1f4d2": {"canonical_name": "ledger", "aliases": ["spiral_notebook"]}, # the glyphs here are the same as Objects/147-149 (with a different color), # for all but Google "1f4d5": {"canonical_name": "red_book", "aliases": ["closed_book"]}, "1f4d7": {"canonical_name": "green_book", "aliases": []}, "1f4d8": {"canonical_name": "blue_book", "aliases": []}, "1f4d9": {"canonical_name": "orange_book", "aliases": []}, "1f4da": {"canonical_name": "books", "aliases": []}, "1f4d6": {"canonical_name": "book", "aliases": ["open_book"]}, "1f516": {"canonical_name": "bookmark", "aliases": []}, "1f517": {"canonical_name": "link", "aliases": []}, "1f4ce": {"canonical_name": "paperclip", "aliases": ["attachment"]}, # office_supplies from https://mashable.com/2015/10/23/ios-9-1-emoji-guide/ "1f587": {"canonical_name": "office_supplies", "aliases": ["paperclip_chain", "linked"]}, "1f4d0": {"canonical_name": "carpenter_square", "aliases": ["triangular_ruler"]}, "1f4cf": {"canonical_name": "ruler", "aliases": ["straightedge"]}, "1f4cc": {"canonical_name": "push_pin", "aliases": ["thumb_tack"]}, "1f4cd": {"canonical_name": "pin", "aliases": ["sewing_pin"]}, "2702": {"canonical_name": "scissors", "aliases": []}, "1f58a": {"canonical_name": "pen", "aliases": ["ballpoint_pen"]}, "1f58b": {"canonical_name": "fountain_pen", "aliases": []}, # three of the four emoji sets just have a rightwards-facing objects/162 # '2712': {'canonical_name': 'X', 'aliases': ['black_nib']}, "1f58c": {"canonical_name": "paintbrush", "aliases": []}, "1f58d": {"canonical_name": "crayon", "aliases": []}, "1f4dd": {"canonical_name": "memo", "aliases": ["note"]}, "270f": {"canonical_name": "pencil", "aliases": []}, "1f50d": {"canonical_name": "search", "aliases": ["find", "magnifying_glass"]}, # '1f50e': {'canonical_name': 'X', 'aliases': ['mag_right']}, # https://emojipedia.org/lock-with-ink-pen/ "1f50f": { "canonical_name": "privacy", "aliases": ["key_signing", "digital_security", "protected"], }, "1f510": { "canonical_name": "secure", "aliases": ["lock_with_key", "safe", "commitment", "loyalty"], }, "1f512": {"canonical_name": "locked", "aliases": []}, "1f513": {"canonical_name": "unlocked", "aliases": []}, # seems the best glyph for love and love_you "2764": {"canonical_name": "heart", "aliases": ["love", "love_you"]}, "1f49b": {"canonical_name": "yellow_heart", "aliases": ["heart_of_gold"]}, "1f49a": {"canonical_name": "green_heart", "aliases": ["envy"]}, "1f499": {"canonical_name": "blue_heart", "aliases": []}, "1f49c": {"canonical_name": "purple_heart", "aliases": ["bravery"]}, "1f5a4": {"canonical_name": "black_heart", "aliases": []}, "1f494": {"canonical_name": "broken_heart", "aliases": ["heartache"]}, "2763": {"canonical_name": "heart_exclamation", "aliases": []}, "1f495": {"canonical_name": "two_hearts", "aliases": []}, "1f49e": {"canonical_name": "revolving_hearts", "aliases": []}, "1f493": {"canonical_name": "heartbeat", "aliases": []}, "1f497": {"canonical_name": "heart_pulse", "aliases": ["growing_heart"]}, "1f496": {"canonical_name": "sparkling_heart", "aliases": []}, "1f498": {"canonical_name": "cupid", "aliases": ["smitten", "heart_arrow"]}, "1f49d": {"canonical_name": "gift_heart", "aliases": []}, "1f49f": {"canonical_name": "heart_box", "aliases": []}, "262e": {"canonical_name": "peace", "aliases": []}, "271d": {"canonical_name": "cross", "aliases": ["christianity"]}, "262a": {"canonical_name": "star_and_crescent", "aliases": ["islam"]}, "1f549": {"canonical_name": "om", "aliases": ["hinduism"]}, "2638": {"canonical_name": "wheel_of_dharma", "aliases": ["buddhism"]}, "2721": {"canonical_name": "star_of_david", "aliases": ["judaism"]}, # can't find any explanation of this at all. Is an alternate star of david? # '1f52f': {'canonical_name': 'X', 'aliases': ['six_pointed_star']}, "1f54e": {"canonical_name": "menorah", "aliases": []}, "262f": {"canonical_name": "yin_yang", "aliases": []}, "2626": {"canonical_name": "orthodox_cross", "aliases": []}, "1f6d0": {"canonical_name": "place_of_worship", "aliases": []}, "26ce": {"canonical_name": "ophiuchus", "aliases": []}, "2648": {"canonical_name": "aries", "aliases": []}, "2649": {"canonical_name": "taurus", "aliases": []}, "264a": {"canonical_name": "gemini", "aliases": []}, "264b": {"canonical_name": "cancer", "aliases": []}, "264c": {"canonical_name": "leo", "aliases": []}, "264d": {"canonical_name": "virgo", "aliases": []}, "264e": {"canonical_name": "libra", "aliases": []}, "264f": {"canonical_name": "scorpius", "aliases": []}, "2650": {"canonical_name": "sagittarius", "aliases": []}, "2651": {"canonical_name": "capricorn", "aliases": []}, "2652": {"canonical_name": "aquarius", "aliases": []}, "2653": {"canonical_name": "pisces", "aliases": []}, "1f194": {"canonical_name": "id", "aliases": []}, "269b": {"canonical_name": "atom", "aliases": ["physics"]}, # japanese symbol # '1f251': {'canonical_name': 'X', 'aliases': ['accept']}, "2622": {"canonical_name": "radioactive", "aliases": ["nuclear"]}, "2623": {"canonical_name": "biohazard", "aliases": []}, "1f4f4": {"canonical_name": "phone_off", "aliases": []}, "1f4f3": {"canonical_name": "vibration_mode", "aliases": []}, # '1f236': {'canonical_name': 'X', 'aliases': ['u6709']}, # '1f21a': {'canonical_name': 'X', 'aliases': ['u7121']}, # '1f238': {'canonical_name': 'X', 'aliases': ['u7533']}, # '1f23a': {'canonical_name': 'X', 'aliases': ['u55b6']}, # '1f237': {'canonical_name': 'X', 'aliases': ['u6708']}, "2734": {"canonical_name": "eight_pointed_star", "aliases": []}, "1f19a": {"canonical_name": "vs", "aliases": []}, "1f4ae": {"canonical_name": "white_flower", "aliases": []}, # '1f250': {'canonical_name': 'X', 'aliases': ['ideograph_advantage']}, # japanese character # '3299': {'canonical_name': 'X', 'aliases': ['secret']}, # '3297': {'canonical_name': 'X', 'aliases': ['congratulations']}, # '1f234': {'canonical_name': 'X', 'aliases': ['u5408']}, # '1f235': {'canonical_name': 'X', 'aliases': ['u6e80']}, # '1f239': {'canonical_name': 'X', 'aliases': ['u5272']}, # '1f232': {'canonical_name': 'X', 'aliases': ['u7981']}, "1f170": {"canonical_name": "a", "aliases": []}, "1f171": {"canonical_name": "b", "aliases": []}, "1f18e": {"canonical_name": "ab", "aliases": []}, "1f191": {"canonical_name": "cl", "aliases": []}, "1f17e": {"canonical_name": "o", "aliases": []}, "1f198": {"canonical_name": "sos", "aliases": []}, # Symbols/105 seems like a better x, and looks more like the other letters "274c": {"canonical_name": "cross_mark", "aliases": ["incorrect", "wrong"]}, "2b55": {"canonical_name": "circle", "aliases": []}, "1f6d1": {"canonical_name": "stop_sign", "aliases": ["octagonal_sign"]}, "26d4": {"canonical_name": "no_entry", "aliases": ["wrong_way"]}, "1f4db": {"canonical_name": "name_badge", "aliases": []}, "1f6ab": {"canonical_name": "prohibited", "aliases": ["not_allowed"]}, "1f4af": {"canonical_name": "100", "aliases": ["hundred"]}, "1f4a2": {"canonical_name": "anger", "aliases": ["bam", "pow"]}, "2668": {"canonical_name": "hot_springs", "aliases": []}, "1f6b7": {"canonical_name": "no_pedestrians", "aliases": []}, "1f6af": {"canonical_name": "do_not_litter", "aliases": []}, "1f6b3": {"canonical_name": "no_bicycles", "aliases": []}, "1f6b1": {"canonical_name": "non-potable_water", "aliases": []}, "1f51e": {"canonical_name": "underage", "aliases": ["nc17"]}, "1f4f5": {"canonical_name": "no_phones", "aliases": []}, "1f6ad": {"canonical_name": "no_smoking", "aliases": []}, "2757": {"canonical_name": "exclamation", "aliases": []}, "2755": {"canonical_name": "grey_exclamation", "aliases": []}, "2753": {"canonical_name": "question", "aliases": []}, "2754": {"canonical_name": "grey_question", "aliases": []}, "203c": {"canonical_name": "bangbang", "aliases": ["double_exclamation"]}, "2049": {"canonical_name": "interrobang", "aliases": []}, "1f505": {"canonical_name": "low_brightness", "aliases": ["dim"]}, "1f506": {"canonical_name": "brightness", "aliases": ["high_brightness"]}, "303d": {"canonical_name": "part_alternation", "aliases": []}, "26a0": {"canonical_name": "warning", "aliases": ["caution", "danger"]}, "1f6b8": { "canonical_name": "children_crossing", "aliases": ["school_crossing", "drive_with_care"], }, "1f531": {"canonical_name": "trident", "aliases": []}, "269c": {"canonical_name": "fleur_de_lis", "aliases": []}, "1f530": {"canonical_name": "beginner", "aliases": []}, "267b": {"canonical_name": "recycle", "aliases": []}, # seems like the best check "2705": {"canonical_name": "check", "aliases": ["all_good", "approved"]}, # '1f22f': {'canonical_name': 'X', 'aliases': ['u6307']}, # stock_market seemed more useful "1f4b9": {"canonical_name": "stock_market", "aliases": []}, "2747": {"canonical_name": "sparkle", "aliases": []}, "2733": {"canonical_name": "eight_spoked_asterisk", "aliases": []}, "274e": {"canonical_name": "x", "aliases": []}, "1f310": {"canonical_name": "www", "aliases": ["globe"]}, "1f4a0": {"canonical_name": "cute", "aliases": ["kawaii", "diamond_with_a_dot"]}, "24c2": {"canonical_name": "metro", "aliases": ["m"]}, "1f300": {"canonical_name": "cyclone", "aliases": ["hurricane", "typhoon"]}, "1f4a4": {"canonical_name": "zzz", "aliases": []}, "1f3e7": {"canonical_name": "atm", "aliases": []}, "1f6be": {"canonical_name": "wc", "aliases": ["water_closet"]}, "267f": {"canonical_name": "accessible", "aliases": ["wheelchair", "disabled"]}, "1f17f": {"canonical_name": "parking", "aliases": ["p"]}, # '1f233': {'canonical_name': 'X', 'aliases': ['u7a7a']}, # '1f202': {'canonical_name': 'X', 'aliases': ['sa']}, "1f6c2": {"canonical_name": "passport_control", "aliases": ["immigration"]}, "1f6c3": {"canonical_name": "customs", "aliases": []}, "1f6c4": {"canonical_name": "baggage_claim", "aliases": []}, "1f6c5": {"canonical_name": "locker", "aliases": ["locked_bag"]}, "1f6b9": {"canonical_name": "mens", "aliases": []}, "1f6ba": {"canonical_name": "womens", "aliases": []}, # seems more in line with the surrounding bathroom symbols "1f6bc": {"canonical_name": "baby_change_station", "aliases": ["nursery"]}, "1f6bb": {"canonical_name": "restroom", "aliases": []}, "1f6ae": {"canonical_name": "put_litter_in_its_place", "aliases": []}, "1f3a6": {"canonical_name": "cinema", "aliases": ["movie_theater"]}, "1f4f6": {"canonical_name": "cell_reception", "aliases": ["signal_strength", "signal_bars"]}, # '1f201': {'canonical_name': 'X', 'aliases': ['koko']}, "1f523": {"canonical_name": "symbols", "aliases": []}, "2139": {"canonical_name": "info", "aliases": []}, "1f524": {"canonical_name": "abc", "aliases": []}, "1f521": {"canonical_name": "abcd", "aliases": ["alphabet"]}, "1f520": {"canonical_name": "capital_abcd", "aliases": ["capital_letters"]}, "1f196": {"canonical_name": "ng", "aliases": []}, # from Unicode/gemoji. Saving ok for People/111 "1f197": {"canonical_name": "squared_ok", "aliases": []}, # from Unicode, and to parallel Symbols/135. Saving up for Symbols/171 "1f199": {"canonical_name": "squared_up", "aliases": []}, "1f192": {"canonical_name": "cool", "aliases": []}, "1f195": {"canonical_name": "new", "aliases": []}, "1f193": {"canonical_name": "free", "aliases": []}, "0030-20e3": {"canonical_name": "zero", "aliases": []}, "0031-20e3": {"canonical_name": "one", "aliases": []}, "0032-20e3": {"canonical_name": "two", "aliases": []}, "0033-20e3": {"canonical_name": "three", "aliases": []}, "0034-20e3": {"canonical_name": "four", "aliases": []}, "0035-20e3": {"canonical_name": "five", "aliases": []}, "0036-20e3": {"canonical_name": "six", "aliases": []}, "0037-20e3": {"canonical_name": "seven", "aliases": []}, "0038-20e3": {"canonical_name": "eight", "aliases": []}, "0039-20e3": {"canonical_name": "nine", "aliases": []}, "1f51f": {"canonical_name": "ten", "aliases": []}, "1f522": {"canonical_name": "1234", "aliases": ["numbers"]}, "0023-20e3": {"canonical_name": "hash", "aliases": []}, "002a-20e3": {"canonical_name": "asterisk", "aliases": []}, "25b6": {"canonical_name": "play", "aliases": []}, "23f8": {"canonical_name": "pause", "aliases": []}, "23ef": {"canonical_name": "play_pause", "aliases": []}, # stop taken by People/118 "23f9": {"canonical_name": "stop_button", "aliases": []}, "23fa": {"canonical_name": "record", "aliases": []}, "23ed": {"canonical_name": "next_track", "aliases": ["skip_forward"]}, "23ee": {"canonical_name": "previous_track", "aliases": ["skip_back"]}, "23e9": {"canonical_name": "fast_forward", "aliases": []}, "23ea": {"canonical_name": "rewind", "aliases": ["fast_reverse"]}, "23eb": {"canonical_name": "double_up", "aliases": ["fast_up"]}, "23ec": {"canonical_name": "double_down", "aliases": ["fast_down"]}, "25c0": {"canonical_name": "play_reverse", "aliases": []}, "1f53c": {"canonical_name": "upvote", "aliases": ["up_button", "increase"]}, "1f53d": {"canonical_name": "downvote", "aliases": ["down_button", "decrease"]}, "27a1": {"canonical_name": "right", "aliases": ["east"]}, "2b05": {"canonical_name": "left", "aliases": ["west"]}, "2b06": {"canonical_name": "up", "aliases": ["north"]}, "2b07": {"canonical_name": "down", "aliases": ["south"]}, "2197": {"canonical_name": "upper_right", "aliases": ["north_east"]}, "2198": {"canonical_name": "lower_right", "aliases": ["south_east"]}, "2199": {"canonical_name": "lower_left", "aliases": ["south_west"]}, "2196": {"canonical_name": "upper_left", "aliases": ["north_west"]}, "2195": {"canonical_name": "up_down", "aliases": []}, "2194": {"canonical_name": "left_right", "aliases": ["swap"]}, "21aa": {"canonical_name": "forward", "aliases": ["right_hook"]}, "21a9": {"canonical_name": "reply", "aliases": ["left_hook"]}, "2934": {"canonical_name": "heading_up", "aliases": []}, "2935": {"canonical_name": "heading_down", "aliases": []}, "1f500": {"canonical_name": "shuffle", "aliases": []}, "1f501": {"canonical_name": "repeat", "aliases": []}, "1f502": {"canonical_name": "repeat_one", "aliases": []}, "1f504": {"canonical_name": "counterclockwise", "aliases": ["return"]}, "1f503": {"canonical_name": "clockwise", "aliases": []}, "1f3b5": {"canonical_name": "music", "aliases": []}, "1f3b6": {"canonical_name": "musical_notes", "aliases": []}, "2795": {"canonical_name": "plus", "aliases": ["add"]}, "2796": {"canonical_name": "minus", "aliases": ["subtract"]}, "2797": {"canonical_name": "division", "aliases": ["divide"]}, "2716": {"canonical_name": "multiplication", "aliases": ["multiply"]}, "1f4b2": {"canonical_name": "dollars", "aliases": []}, # There is no other exchange, so might as well generalize this "1f4b1": {"canonical_name": "exchange", "aliases": []}, "2122": {"canonical_name": "tm", "aliases": ["trademark"]}, "3030": {"canonical_name": "wavy_dash", "aliases": []}, "27b0": {"canonical_name": "loop", "aliases": []}, # https://emojipedia.org/double-curly-loop/ "27bf": {"canonical_name": "double_loop", "aliases": ["voicemail"]}, "1f51a": {"canonical_name": "end", "aliases": []}, "1f519": {"canonical_name": "back", "aliases": []}, "1f51b": {"canonical_name": "on", "aliases": []}, "1f51d": {"canonical_name": "top", "aliases": []}, "1f51c": {"canonical_name": "soon", "aliases": []}, "2714": {"canonical_name": "check_mark", "aliases": []}, "2611": {"canonical_name": "checkbox", "aliases": []}, "1f518": {"canonical_name": "radio_button", "aliases": []}, "26aa": {"canonical_name": "white_circle", "aliases": []}, "26ab": {"canonical_name": "black_circle", "aliases": []}, "1f534": {"canonical_name": "red_circle", "aliases": []}, "1f535": {"canonical_name": "blue_circle", "aliases": []}, "1f53a": {"canonical_name": "red_triangle_up", "aliases": []}, "1f53b": {"canonical_name": "red_triangle_down", "aliases": []}, "1f538": {"canonical_name": "small_orange_diamond", "aliases": []}, "1f539": {"canonical_name": "small_blue_diamond", "aliases": []}, "1f536": {"canonical_name": "large_orange_diamond", "aliases": []}, "1f537": {"canonical_name": "large_blue_diamond", "aliases": []}, "1f533": {"canonical_name": "black_and_white_square", "aliases": []}, "1f532": {"canonical_name": "white_and_black_square", "aliases": []}, "25aa": {"canonical_name": "black_small_square", "aliases": []}, "25ab": {"canonical_name": "white_small_square", "aliases": []}, "25fe": {"canonical_name": "black_medium_small_square", "aliases": []}, "25fd": {"canonical_name": "white_medium_small_square", "aliases": []}, "25fc": {"canonical_name": "black_medium_square", "aliases": []}, "25fb": {"canonical_name": "white_medium_square", "aliases": []}, "2b1b": {"canonical_name": "black_large_square", "aliases": []}, "2b1c": {"canonical_name": "white_large_square", "aliases": []}, "1f508": {"canonical_name": "speaker", "aliases": []}, "1f507": {"canonical_name": "mute", "aliases": ["no_sound"]}, "1f509": {"canonical_name": "softer", "aliases": []}, "1f50a": {"canonical_name": "louder", "aliases": ["sound"]}, "1f514": {"canonical_name": "notifications", "aliases": ["bell"]}, "1f515": {"canonical_name": "mute_notifications", "aliases": []}, "1f4e3": {"canonical_name": "megaphone", "aliases": ["shout"]}, "1f4e2": {"canonical_name": "loudspeaker", "aliases": ["bullhorn"]}, "1f4ac": {"canonical_name": "umm", "aliases": ["speech_balloon"]}, "1f5e8": {"canonical_name": "speech_bubble", "aliases": []}, "1f4ad": {"canonical_name": "thought", "aliases": ["dream"]}, "1f5ef": {"canonical_name": "anger_bubble", "aliases": []}, "2660": {"canonical_name": "spades", "aliases": []}, "2663": {"canonical_name": "clubs", "aliases": []}, "2665": {"canonical_name": "hearts", "aliases": []}, "2666": {"canonical_name": "diamonds", "aliases": []}, "1f0cf": {"canonical_name": "joker", "aliases": []}, "1f3b4": {"canonical_name": "playing_cards", "aliases": []}, "1f004": {"canonical_name": "mahjong", "aliases": []}, # The only use I can think of for so many clocks is to be able to use them # to vote on times and such in emoji reactions. But a) the experience is # not that great (the images are too small), b) there are issues with # 24-hour time (used in many countries), like what is 00:30 or 01:00 # called, c) it's hard to make the compose typeahead experience great, and # d) we should have a dedicated time voting widget that takes care of # timezone and locale issues, and uses a digital representation. # '1f550': {'canonical_name': 'X', 'aliases': ['clock1']}, # '1f551': {'canonical_name': 'X', 'aliases': ['clock2']}, # '1f552': {'canonical_name': 'X', 'aliases': ['clock3']}, # '1f553': {'canonical_name': 'X', 'aliases': ['clock4']}, # '1f554': {'canonical_name': 'X', 'aliases': ['clock5']}, # '1f555': {'canonical_name': 'X', 'aliases': ['clock6']}, # '1f556': {'canonical_name': 'X', 'aliases': ['clock7']}, # seems like the best choice for time "1f557": {"canonical_name": "time", "aliases": ["clock"]}, # '1f558': {'canonical_name': 'X', 'aliases': ['clock9']}, # '1f559': {'canonical_name': 'X', 'aliases': ['clock10']}, # '1f55a': {'canonical_name': 'X', 'aliases': ['clock11']}, # '1f55b': {'canonical_name': 'X', 'aliases': ['clock12']}, # '1f55c': {'canonical_name': 'X', 'aliases': ['clock130']}, # '1f55d': {'canonical_name': 'X', 'aliases': ['clock230']}, # '1f55e': {'canonical_name': 'X', 'aliases': ['clock330']}, # '1f55f': {'canonical_name': 'X', 'aliases': ['clock430']}, # '1f560': {'canonical_name': 'X', 'aliases': ['clock530']}, # '1f561': {'canonical_name': 'X', 'aliases': ['clock630']}, # '1f562': {'canonical_name': 'X', 'aliases': ['clock730']}, # '1f563': {'canonical_name': 'X', 'aliases': ['clock830']}, # '1f564': {'canonical_name': 'X', 'aliases': ['clock930']}, # '1f565': {'canonical_name': 'X', 'aliases': ['clock1030']}, # '1f566': {'canonical_name': 'X', 'aliases': ['clock1130']}, # '1f567': {'canonical_name': 'X', 'aliases': ['clock1230']}, "1f3f3": {"canonical_name": "white_flag", "aliases": ["surrender"]}, "1f3f4": {"canonical_name": "black_flag", "aliases": []}, "1f3c1": {"canonical_name": "checkered_flag", "aliases": ["race", "go", "start"]}, "1f6a9": {"canonical_name": "triangular_flag", "aliases": []}, # solidarity from iemoji "1f38c": {"canonical_name": "crossed_flags", "aliases": ["solidarity"]}, }
codeparrot/github-code-clean
import fractions import operator import os import random import sys import struct import time import unittest from test import support from test.test_grammar import (VALID_UNDERSCORE_LITERALS, INVALID_UNDERSCORE_LITERALS) from math import isinf, isnan, copysign, ldexp INF = float("inf") NAN = float("nan") have_getformat = hasattr(float, "__getformat__") requires_getformat = unittest.skipUnless(have_getformat, "requires __getformat__") requires_setformat = unittest.skipUnless(hasattr(float, "__setformat__"), "requires __setformat__") #locate file with float format test values test_dir = os.path.dirname(__file__) or os.curdir format_testfile = os.path.join(test_dir, 'formatfloat_testcases.txt') class FloatSubclass(float): pass class OtherFloatSubclass(float): pass class GeneralFloatCases(unittest.TestCase): def test_float(self): self.assertEqual(float(3.14), 3.14) self.assertEqual(float(314), 314.0) self.assertEqual(float(" 3.14 "), 3.14) self.assertRaises(ValueError, float, " 0x3.1 ") self.assertRaises(ValueError, float, " -0x3.p-1 ") self.assertRaises(ValueError, float, " +0x3.p-1 ") self.assertRaises(ValueError, float, "++3.14") self.assertRaises(ValueError, float, "+-3.14") self.assertRaises(ValueError, float, "-+3.14") self.assertRaises(ValueError, float, "--3.14") self.assertRaises(ValueError, float, ".nan") self.assertRaises(ValueError, float, "+.inf") self.assertRaises(ValueError, float, ".") self.assertRaises(ValueError, float, "-.") self.assertRaises(TypeError, float, {}) self.assertRaisesRegex(TypeError, "not 'dict'", float, {}) # Lone surrogate self.assertRaises((UnicodeEncodeError, ValueError), float, '\uD8F0') # check that we don't accept alternate exponent markers self.assertRaises(ValueError, float, "-1.7d29") self.assertRaises(ValueError, float, "3D-14") self.assertEqual(float(" \u0663.\u0661\u0664 "), 3.14) self.assertEqual(float("\N{EM SPACE}3.14\N{EN SPACE}"), 3.14) # extra long strings should not be a problem float(b'.' + b'1'*1000) float('.' + '1'*1000) def test_underscores(self): for lit in VALID_UNDERSCORE_LITERALS: if not any(ch in lit for ch in 'jJxXoObB'): self.assertEqual(float(lit), eval(lit)) self.assertEqual(float(lit), float(lit.replace('_', ''))) for lit in INVALID_UNDERSCORE_LITERALS: if lit in ('0_7', '09_99'): # octals are not recognized here continue if not any(ch in lit for ch in 'jJxXoObB'): self.assertRaises(ValueError, float, lit) # Additional test cases; nan and inf are never valid as literals, # only in the float() constructor, but we don't allow underscores # in or around them. self.assertRaises(ValueError, float, '_NaN') self.assertRaises(ValueError, float, 'Na_N') self.assertRaises(ValueError, float, 'IN_F') self.assertRaises(ValueError, float, '-_INF') self.assertRaises(ValueError, float, '-INF_') # Check that we handle bytes values correctly. self.assertRaises(ValueError, float, b'0_.\xff9') def test_non_numeric_input_types(self): # Test possible non-numeric types for the argument x, including # subclasses of the explicitly documented accepted types. class CustomStr(str): pass class CustomBytes(bytes): pass class CustomByteArray(bytearray): pass factories = [ bytes, bytearray, lambda b: CustomStr(b.decode()), CustomBytes, CustomByteArray, memoryview, ] try: from array import array except ImportError: pass else: factories.append(lambda b: array('B', b)) for f in factories: x = f(b" 3.14 ") with self.subTest(type(x)): self.assertEqual(float(x), 3.14) with self.assertRaisesRegex(ValueError, "could not convert"): float(f(b'A' * 0x10)) def test_float_memoryview(self): self.assertEqual(float(memoryview(b'12.3')[1:4]), 2.3) self.assertEqual(float(memoryview(b'12.3\x00')[1:4]), 2.3) self.assertEqual(float(memoryview(b'12.3 ')[1:4]), 2.3) self.assertEqual(float(memoryview(b'12.3A')[1:4]), 2.3) self.assertEqual(float(memoryview(b'12.34')[1:4]), 2.3) def test_error_message(self): testlist = ('\xbd', '123\xbd', ' 123 456 ') for s in testlist: try: float(s) except ValueError as e: self.assertIn(s.strip(), e.args[0]) else: self.fail("Expected int(%r) to raise a ValueError", s) @support.run_with_locale('LC_NUMERIC', 'fr_FR', 'de_DE') def test_float_with_comma(self): # set locale to something that doesn't use '.' for the decimal point # float must not accept the locale specific decimal point but # it still has to accept the normal python syntax import locale if not locale.localeconv()['decimal_point'] == ',': self.skipTest('decimal_point is not ","') self.assertEqual(float(" 3.14 "), 3.14) self.assertEqual(float("+3.14 "), 3.14) self.assertEqual(float("-3.14 "), -3.14) self.assertEqual(float(".14 "), .14) self.assertEqual(float("3. "), 3.0) self.assertEqual(float("3.e3 "), 3000.0) self.assertEqual(float("3.2e3 "), 3200.0) self.assertEqual(float("2.5e-1 "), 0.25) self.assertEqual(float("5e-1"), 0.5) self.assertRaises(ValueError, float, " 3,14 ") self.assertRaises(ValueError, float, " +3,14 ") self.assertRaises(ValueError, float, " -3,14 ") self.assertRaises(ValueError, float, " 0x3.1 ") self.assertRaises(ValueError, float, " -0x3.p-1 ") self.assertRaises(ValueError, float, " +0x3.p-1 ") self.assertEqual(float(" 25.e-1 "), 2.5) self.assertAlmostEqual(float(" .25e-1 "), .025) def test_floatconversion(self): # Make sure that calls to __float__() work properly class Foo1(object): def __float__(self): return 42. class Foo2(float): def __float__(self): return 42. class Foo3(float): def __new__(cls, value=0.): return float.__new__(cls, 2*value) def __float__(self): return self class Foo4(float): def __float__(self): return 42 # Issue 5759: __float__ not called on str subclasses (though it is on # unicode subclasses). class FooStr(str): def __float__(self): return float(str(self)) + 1 self.assertEqual(float(Foo1()), 42.) self.assertEqual(float(Foo2()), 42.) with self.assertWarns(DeprecationWarning): self.assertEqual(float(Foo3(21)), 42.) self.assertRaises(TypeError, float, Foo4(42)) self.assertEqual(float(FooStr('8')), 9.) class Foo5: def __float__(self): return "" self.assertRaises(TypeError, time.sleep, Foo5()) # Issue #24731 class F: def __float__(self): return OtherFloatSubclass(42.) with self.assertWarns(DeprecationWarning): self.assertEqual(float(F()), 42.) with self.assertWarns(DeprecationWarning): self.assertIs(type(float(F())), float) with self.assertWarns(DeprecationWarning): self.assertEqual(FloatSubclass(F()), 42.) with self.assertWarns(DeprecationWarning): self.assertIs(type(FloatSubclass(F())), FloatSubclass) def test_is_integer(self): self.assertFalse((1.1).is_integer()) self.assertTrue((1.).is_integer()) self.assertFalse(float("nan").is_integer()) self.assertFalse(float("inf").is_integer()) def test_floatasratio(self): for f, ratio in [ (0.875, (7, 8)), (-0.875, (-7, 8)), (0.0, (0, 1)), (11.5, (23, 2)), ]: self.assertEqual(f.as_integer_ratio(), ratio) for i in range(10000): f = random.random() f *= 10 ** random.randint(-100, 100) n, d = f.as_integer_ratio() self.assertEqual(float(n).__truediv__(d), f) R = fractions.Fraction self.assertEqual(R(0, 1), R(*float(0.0).as_integer_ratio())) self.assertEqual(R(5, 2), R(*float(2.5).as_integer_ratio())) self.assertEqual(R(1, 2), R(*float(0.5).as_integer_ratio())) self.assertEqual(R(4728779608739021, 2251799813685248), R(*float(2.1).as_integer_ratio())) self.assertEqual(R(-4728779608739021, 2251799813685248), R(*float(-2.1).as_integer_ratio())) self.assertEqual(R(-2100, 1), R(*float(-2100.0).as_integer_ratio())) self.assertRaises(OverflowError, float('inf').as_integer_ratio) self.assertRaises(OverflowError, float('-inf').as_integer_ratio) self.assertRaises(ValueError, float('nan').as_integer_ratio) def test_float_containment(self): floats = (INF, -INF, 0.0, 1.0, NAN) for f in floats: if f is NAN and support.check_impl_detail(pypy=False): self.assertIn(f, [f]) self.assertIn(f, (f,)) self.assertIn(f, {f}) self.assertIn(f, {f: None}) self.assertEqual([f].count(f), 1, "[].count('%r') != 1" % f) self.assertIn(f, floats) for f in floats: # nonidentical containers, same type, same contents self.assertTrue([f] == [f], "[%r] != [%r]" % (f, f)) self.assertTrue((f,) == (f,), "(%r,) != (%r,)" % (f, f)) self.assertTrue({f} == {f}, "{%r} != {%r}" % (f, f)) self.assertTrue({f : None} == {f: None}, "{%r : None} != " "{%r : None}" % (f, f)) # identical containers l, t, s, d = [f], (f,), {f}, {f: None} self.assertTrue(l == l, "[%r] not equal to itself" % f) self.assertTrue(t == t, "(%r,) not equal to itself" % f) self.assertTrue(s == s, "{%r} not equal to itself" % f) self.assertTrue(d == d, "{%r : None} not equal to itself" % f) def assertEqualAndEqualSign(self, a, b): # fail unless a == b and a and b have the same sign bit; # the only difference from assertEqual is that this test # distinguishes -0.0 and 0.0. self.assertEqual((a, copysign(1.0, a)), (b, copysign(1.0, b))) @support.requires_IEEE_754 def test_float_mod(self): # Check behaviour of % operator for IEEE 754 special cases. # In particular, check signs of zeros. mod = operator.mod self.assertEqualAndEqualSign(mod(-1.0, 1.0), 0.0) self.assertEqualAndEqualSign(mod(-1e-100, 1.0), 1.0) self.assertEqualAndEqualSign(mod(-0.0, 1.0), 0.0) self.assertEqualAndEqualSign(mod(0.0, 1.0), 0.0) self.assertEqualAndEqualSign(mod(1e-100, 1.0), 1e-100) self.assertEqualAndEqualSign(mod(1.0, 1.0), 0.0) self.assertEqualAndEqualSign(mod(-1.0, -1.0), -0.0) self.assertEqualAndEqualSign(mod(-1e-100, -1.0), -1e-100) self.assertEqualAndEqualSign(mod(-0.0, -1.0), -0.0) self.assertEqualAndEqualSign(mod(0.0, -1.0), -0.0) self.assertEqualAndEqualSign(mod(1e-100, -1.0), -1.0) self.assertEqualAndEqualSign(mod(1.0, -1.0), -0.0) @support.requires_IEEE_754 def test_float_pow(self): # test builtin pow and ** operator for IEEE 754 special cases. # Special cases taken from section F.9.4.4 of the C99 specification for pow_op in pow, operator.pow: # x**NAN is NAN for any x except 1 self.assertTrue(isnan(pow_op(-INF, NAN))) self.assertTrue(isnan(pow_op(-2.0, NAN))) self.assertTrue(isnan(pow_op(-1.0, NAN))) self.assertTrue(isnan(pow_op(-0.5, NAN))) self.assertTrue(isnan(pow_op(-0.0, NAN))) self.assertTrue(isnan(pow_op(0.0, NAN))) self.assertTrue(isnan(pow_op(0.5, NAN))) self.assertTrue(isnan(pow_op(2.0, NAN))) self.assertTrue(isnan(pow_op(INF, NAN))) self.assertTrue(isnan(pow_op(NAN, NAN))) # NAN**y is NAN for any y except +-0 self.assertTrue(isnan(pow_op(NAN, -INF))) self.assertTrue(isnan(pow_op(NAN, -2.0))) self.assertTrue(isnan(pow_op(NAN, -1.0))) self.assertTrue(isnan(pow_op(NAN, -0.5))) self.assertTrue(isnan(pow_op(NAN, 0.5))) self.assertTrue(isnan(pow_op(NAN, 1.0))) self.assertTrue(isnan(pow_op(NAN, 2.0))) self.assertTrue(isnan(pow_op(NAN, INF))) # (+-0)**y raises ZeroDivisionError for y a negative odd integer self.assertRaises(ZeroDivisionError, pow_op, -0.0, -1.0) self.assertRaises(ZeroDivisionError, pow_op, 0.0, -1.0) # (+-0)**y raises ZeroDivisionError for y finite and negative # but not an odd integer self.assertRaises(ZeroDivisionError, pow_op, -0.0, -2.0) self.assertRaises(ZeroDivisionError, pow_op, -0.0, -0.5) self.assertRaises(ZeroDivisionError, pow_op, 0.0, -2.0) self.assertRaises(ZeroDivisionError, pow_op, 0.0, -0.5) # (+-0)**y is +-0 for y a positive odd integer self.assertEqualAndEqualSign(pow_op(-0.0, 1.0), -0.0) self.assertEqualAndEqualSign(pow_op(0.0, 1.0), 0.0) # (+-0)**y is 0 for y finite and positive but not an odd integer self.assertEqualAndEqualSign(pow_op(-0.0, 0.5), 0.0) self.assertEqualAndEqualSign(pow_op(-0.0, 2.0), 0.0) self.assertEqualAndEqualSign(pow_op(0.0, 0.5), 0.0) self.assertEqualAndEqualSign(pow_op(0.0, 2.0), 0.0) # (-1)**+-inf is 1 self.assertEqualAndEqualSign(pow_op(-1.0, -INF), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, INF), 1.0) # 1**y is 1 for any y, even if y is an infinity or nan self.assertEqualAndEqualSign(pow_op(1.0, -INF), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, -2.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, -1.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, -0.5), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, 0.5), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, 1.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, 2.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, INF), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, NAN), 1.0) # x**+-0 is 1 for any x, even if x is a zero, infinity, or nan self.assertEqualAndEqualSign(pow_op(-INF, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-0.5, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-0.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(0.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(0.5, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(INF, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(NAN, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-INF, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-0.5, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-0.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(0.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(0.5, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(INF, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(NAN, -0.0), 1.0) # x**y defers to complex pow for finite negative x and # non-integral y. self.assertEqual(type(pow_op(-2.0, -0.5)), complex) self.assertEqual(type(pow_op(-2.0, 0.5)), complex) self.assertEqual(type(pow_op(-1.0, -0.5)), complex) self.assertEqual(type(pow_op(-1.0, 0.5)), complex) self.assertEqual(type(pow_op(-0.5, -0.5)), complex) self.assertEqual(type(pow_op(-0.5, 0.5)), complex) # x**-INF is INF for abs(x) < 1 self.assertEqualAndEqualSign(pow_op(-0.5, -INF), INF) self.assertEqualAndEqualSign(pow_op(-0.0, -INF), INF) self.assertEqualAndEqualSign(pow_op(0.0, -INF), INF) self.assertEqualAndEqualSign(pow_op(0.5, -INF), INF) # x**-INF is 0 for abs(x) > 1 self.assertEqualAndEqualSign(pow_op(-INF, -INF), 0.0) self.assertEqualAndEqualSign(pow_op(-2.0, -INF), 0.0) self.assertEqualAndEqualSign(pow_op(2.0, -INF), 0.0) self.assertEqualAndEqualSign(pow_op(INF, -INF), 0.0) # x**INF is 0 for abs(x) < 1 self.assertEqualAndEqualSign(pow_op(-0.5, INF), 0.0) self.assertEqualAndEqualSign(pow_op(-0.0, INF), 0.0) self.assertEqualAndEqualSign(pow_op(0.0, INF), 0.0) self.assertEqualAndEqualSign(pow_op(0.5, INF), 0.0) # x**INF is INF for abs(x) > 1 self.assertEqualAndEqualSign(pow_op(-INF, INF), INF) self.assertEqualAndEqualSign(pow_op(-2.0, INF), INF) self.assertEqualAndEqualSign(pow_op(2.0, INF), INF) self.assertEqualAndEqualSign(pow_op(INF, INF), INF) # (-INF)**y is -0.0 for y a negative odd integer self.assertEqualAndEqualSign(pow_op(-INF, -1.0), -0.0) # (-INF)**y is 0.0 for y negative but not an odd integer self.assertEqualAndEqualSign(pow_op(-INF, -0.5), 0.0) self.assertEqualAndEqualSign(pow_op(-INF, -2.0), 0.0) # (-INF)**y is -INF for y a positive odd integer self.assertEqualAndEqualSign(pow_op(-INF, 1.0), -INF) # (-INF)**y is INF for y positive but not an odd integer self.assertEqualAndEqualSign(pow_op(-INF, 0.5), INF) self.assertEqualAndEqualSign(pow_op(-INF, 2.0), INF) # INF**y is INF for y positive self.assertEqualAndEqualSign(pow_op(INF, 0.5), INF) self.assertEqualAndEqualSign(pow_op(INF, 1.0), INF) self.assertEqualAndEqualSign(pow_op(INF, 2.0), INF) # INF**y is 0.0 for y negative self.assertEqualAndEqualSign(pow_op(INF, -2.0), 0.0) self.assertEqualAndEqualSign(pow_op(INF, -1.0), 0.0) self.assertEqualAndEqualSign(pow_op(INF, -0.5), 0.0) # basic checks not covered by the special cases above self.assertEqualAndEqualSign(pow_op(-2.0, -2.0), 0.25) self.assertEqualAndEqualSign(pow_op(-2.0, -1.0), -0.5) self.assertEqualAndEqualSign(pow_op(-2.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-2.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-2.0, 1.0), -2.0) self.assertEqualAndEqualSign(pow_op(-2.0, 2.0), 4.0) self.assertEqualAndEqualSign(pow_op(-1.0, -2.0), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, -1.0), -1.0) self.assertEqualAndEqualSign(pow_op(-1.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, 1.0), -1.0) self.assertEqualAndEqualSign(pow_op(-1.0, 2.0), 1.0) self.assertEqualAndEqualSign(pow_op(2.0, -2.0), 0.25) self.assertEqualAndEqualSign(pow_op(2.0, -1.0), 0.5) self.assertEqualAndEqualSign(pow_op(2.0, -0.0), 1.0) self.assertEqualAndEqualSign(pow_op(2.0, 0.0), 1.0) self.assertEqualAndEqualSign(pow_op(2.0, 1.0), 2.0) self.assertEqualAndEqualSign(pow_op(2.0, 2.0), 4.0) # 1 ** large and -1 ** large; some libms apparently # have problems with these self.assertEqualAndEqualSign(pow_op(1.0, -1e100), 1.0) self.assertEqualAndEqualSign(pow_op(1.0, 1e100), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, -1e100), 1.0) self.assertEqualAndEqualSign(pow_op(-1.0, 1e100), 1.0) # check sign for results that underflow to 0 self.assertEqualAndEqualSign(pow_op(-2.0, -2000.0), 0.0) self.assertEqual(type(pow_op(-2.0, -2000.5)), complex) self.assertEqualAndEqualSign(pow_op(-2.0, -2001.0), -0.0) self.assertEqualAndEqualSign(pow_op(2.0, -2000.0), 0.0) self.assertEqualAndEqualSign(pow_op(2.0, -2000.5), 0.0) self.assertEqualAndEqualSign(pow_op(2.0, -2001.0), 0.0) self.assertEqualAndEqualSign(pow_op(-0.5, 2000.0), 0.0) self.assertEqual(type(pow_op(-0.5, 2000.5)), complex) self.assertEqualAndEqualSign(pow_op(-0.5, 2001.0), -0.0) self.assertEqualAndEqualSign(pow_op(0.5, 2000.0), 0.0) self.assertEqualAndEqualSign(pow_op(0.5, 2000.5), 0.0) self.assertEqualAndEqualSign(pow_op(0.5, 2001.0), 0.0) # check we don't raise an exception for subnormal results, # and validate signs. Tests currently disabled, since # they fail on systems where a subnormal result from pow # is flushed to zero (e.g. Debian/ia64.) #self.assertTrue(0.0 < pow_op(0.5, 1048) < 1e-315) #self.assertTrue(0.0 < pow_op(-0.5, 1048) < 1e-315) #self.assertTrue(0.0 < pow_op(0.5, 1047) < 1e-315) #self.assertTrue(0.0 > pow_op(-0.5, 1047) > -1e-315) #self.assertTrue(0.0 < pow_op(2.0, -1048) < 1e-315) #self.assertTrue(0.0 < pow_op(-2.0, -1048) < 1e-315) #self.assertTrue(0.0 < pow_op(2.0, -1047) < 1e-315) #self.assertTrue(0.0 > pow_op(-2.0, -1047) > -1e-315) @requires_setformat class FormatFunctionsTestCase(unittest.TestCase): def setUp(self): self.save_formats = {'double':float.__getformat__('double'), 'float':float.__getformat__('float')} def tearDown(self): float.__setformat__('double', self.save_formats['double']) float.__setformat__('float', self.save_formats['float']) def test_getformat(self): self.assertIn(float.__getformat__('double'), ['unknown', 'IEEE, big-endian', 'IEEE, little-endian']) self.assertIn(float.__getformat__('float'), ['unknown', 'IEEE, big-endian', 'IEEE, little-endian']) self.assertRaises(ValueError, float.__getformat__, 'chicken') self.assertRaises(TypeError, float.__getformat__, 1) def test_setformat(self): for t in 'double', 'float': float.__setformat__(t, 'unknown') if self.save_formats[t] == 'IEEE, big-endian': self.assertRaises(ValueError, float.__setformat__, t, 'IEEE, little-endian') elif self.save_formats[t] == 'IEEE, little-endian': self.assertRaises(ValueError, float.__setformat__, t, 'IEEE, big-endian') else: self.assertRaises(ValueError, float.__setformat__, t, 'IEEE, big-endian') self.assertRaises(ValueError, float.__setformat__, t, 'IEEE, little-endian') self.assertRaises(ValueError, float.__setformat__, t, 'chicken') self.assertRaises(ValueError, float.__setformat__, 'chicken', 'unknown') BE_DOUBLE_INF = b'\x7f\xf0\x00\x00\x00\x00\x00\x00' LE_DOUBLE_INF = bytes(reversed(BE_DOUBLE_INF)) BE_DOUBLE_NAN = b'\x7f\xf8\x00\x00\x00\x00\x00\x00' LE_DOUBLE_NAN = bytes(reversed(BE_DOUBLE_NAN)) BE_FLOAT_INF = b'\x7f\x80\x00\x00' LE_FLOAT_INF = bytes(reversed(BE_FLOAT_INF)) BE_FLOAT_NAN = b'\x7f\xc0\x00\x00' LE_FLOAT_NAN = bytes(reversed(BE_FLOAT_NAN)) # on non-IEEE platforms, attempting to unpack a bit pattern # representing an infinity or a NaN should raise an exception. @requires_setformat class UnknownFormatTestCase(unittest.TestCase): def setUp(self): self.save_formats = {'double':float.__getformat__('double'), 'float':float.__getformat__('float')} float.__setformat__('double', 'unknown') float.__setformat__('float', 'unknown') def tearDown(self): float.__setformat__('double', self.save_formats['double']) float.__setformat__('float', self.save_formats['float']) def test_double_specials_dont_unpack(self): for fmt, data in [('>d', BE_DOUBLE_INF), ('>d', BE_DOUBLE_NAN), ('<d', LE_DOUBLE_INF), ('<d', LE_DOUBLE_NAN)]: self.assertRaises(ValueError, struct.unpack, fmt, data) def test_float_specials_dont_unpack(self): for fmt, data in [('>f', BE_FLOAT_INF), ('>f', BE_FLOAT_NAN), ('<f', LE_FLOAT_INF), ('<f', LE_FLOAT_NAN)]: self.assertRaises(ValueError, struct.unpack, fmt, data) # on an IEEE platform, all we guarantee is that bit patterns # representing infinities or NaNs do not raise an exception; all else # is accident (today). # let's also try to guarantee that -0.0 and 0.0 don't get confused. class IEEEFormatTestCase(unittest.TestCase): @support.requires_IEEE_754 def test_double_specials_do_unpack(self): for fmt, data in [('>d', BE_DOUBLE_INF), ('>d', BE_DOUBLE_NAN), ('<d', LE_DOUBLE_INF), ('<d', LE_DOUBLE_NAN)]: struct.unpack(fmt, data) @support.requires_IEEE_754 def test_float_specials_do_unpack(self): for fmt, data in [('>f', BE_FLOAT_INF), ('>f', BE_FLOAT_NAN), ('<f', LE_FLOAT_INF), ('<f', LE_FLOAT_NAN)]: struct.unpack(fmt, data) class FormatTestCase(unittest.TestCase): def test_format(self): # these should be rewritten to use both format(x, spec) and # x.__format__(spec) self.assertEqual(format(0.0, 'f'), '0.000000') # the default is 'g', except for empty format spec self.assertEqual(format(0.0, ''), '0.0') self.assertEqual(format(0.01, ''), '0.01') self.assertEqual(format(0.01, 'g'), '0.01') # empty presentation type should format in the same way as str # (issue 5920) x = 100/7. self.assertEqual(format(x, ''), str(x)) self.assertEqual(format(x, '-'), str(x)) self.assertEqual(format(x, '>'), str(x)) self.assertEqual(format(x, '2'), str(x)) self.assertEqual(format(1.0, 'f'), '1.000000') self.assertEqual(format(-1.0, 'f'), '-1.000000') self.assertEqual(format( 1.0, ' f'), ' 1.000000') self.assertEqual(format(-1.0, ' f'), '-1.000000') self.assertEqual(format( 1.0, '+f'), '+1.000000') self.assertEqual(format(-1.0, '+f'), '-1.000000') # % formatting self.assertEqual(format(-1.0, '%'), '-100.000000%') # conversion to string should fail self.assertRaises(ValueError, format, 3.0, "s") # other format specifiers shouldn't work on floats, # in particular int specifiers for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] + [chr(x) for x in range(ord('A'), ord('Z')+1)]): if not format_spec in 'eEfFgGn%': self.assertRaises(ValueError, format, 0.0, format_spec) self.assertRaises(ValueError, format, 1.0, format_spec) self.assertRaises(ValueError, format, -1.0, format_spec) self.assertRaises(ValueError, format, 1e100, format_spec) self.assertRaises(ValueError, format, -1e100, format_spec) self.assertRaises(ValueError, format, 1e-100, format_spec) self.assertRaises(ValueError, format, -1e-100, format_spec) # issue 3382 self.assertEqual(format(NAN, 'f'), 'nan') self.assertEqual(format(NAN, 'F'), 'NAN') self.assertEqual(format(INF, 'f'), 'inf') self.assertEqual(format(INF, 'F'), 'INF') @support.requires_IEEE_754 def test_format_testfile(self): with open(format_testfile) as testfile: for line in testfile: if line.startswith('--'): continue line = line.strip() if not line: continue lhs, rhs = map(str.strip, line.split('->')) fmt, arg = lhs.split() self.assertEqual(fmt % float(arg), rhs) self.assertEqual(fmt % -float(arg), '-' + rhs) def test_issue5864(self): self.assertEqual(format(123.456, '.4'), '123.5') self.assertEqual(format(1234.56, '.4'), '1.235e+03') self.assertEqual(format(12345.6, '.4'), '1.235e+04') class ReprTestCase(unittest.TestCase): def test_repr(self): floats_file = open(os.path.join(os.path.split(__file__)[0], 'floating_points.txt')) for line in floats_file: line = line.strip() if not line or line.startswith('#'): continue v = eval(line) self.assertEqual(v, eval(repr(v))) floats_file.close() @unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short', "applies only when using short float repr style") def test_short_repr(self): # test short float repr introduced in Python 3.1. One aspect # of this repr is that we get some degree of str -> float -> # str roundtripping. In particular, for any numeric string # containing 15 or fewer significant digits, those exact same # digits (modulo trailing zeros) should appear in the output. # No more repr(0.03) -> "0.029999999999999999"! test_strings = [ # output always includes *either* a decimal point and at # least one digit after that point, or an exponent. '0.0', '1.0', '0.01', '0.02', '0.03', '0.04', '0.05', '1.23456789', '10.0', '100.0', # values >= 1e16 get an exponent... '1000000000000000.0', '9999999999999990.0', '1e+16', '1e+17', # ... and so do values < 1e-4 '0.001', '0.001001', '0.00010000000000001', '0.0001', '9.999999999999e-05', '1e-05', # values designed to provoke failure if the FPU rounding # precision isn't set correctly '8.72293771110361e+25', '7.47005307342313e+26', '2.86438000439698e+28', '8.89142905246179e+28', '3.08578087079232e+35', ] for s in test_strings: negs = '-'+s self.assertEqual(s, repr(float(s))) self.assertEqual(negs, repr(float(negs))) # Since Python 3.2, repr and str are identical self.assertEqual(repr(float(s)), str(float(s))) self.assertEqual(repr(float(negs)), str(float(negs))) @support.requires_IEEE_754 class RoundTestCase(unittest.TestCase): def test_inf_nan(self): self.assertRaises(OverflowError, round, INF) self.assertRaises(OverflowError, round, -INF) self.assertRaises(ValueError, round, NAN) self.assertRaises(TypeError, round, INF, 0.0) self.assertRaises(TypeError, round, -INF, 1.0) self.assertRaises(TypeError, round, NAN, "ceci n'est pas un integer") self.assertRaises(TypeError, round, -0.0, 1j) def test_large_n(self): for n in [324, 325, 400, 2**31-1, 2**31, 2**32, 2**100]: self.assertEqual(round(123.456, n), 123.456) self.assertEqual(round(-123.456, n), -123.456) self.assertEqual(round(1e300, n), 1e300) self.assertEqual(round(1e-320, n), 1e-320) self.assertEqual(round(1e150, 300), 1e150) self.assertEqual(round(1e300, 307), 1e300) self.assertEqual(round(-3.1415, 308), -3.1415) self.assertEqual(round(1e150, 309), 1e150) self.assertEqual(round(1.4e-315, 315), 1e-315) def test_small_n(self): for n in [-308, -309, -400, 1-2**31, -2**31, -2**31-1, -2**100]: self.assertEqual(round(123.456, n), 0.0) self.assertEqual(round(-123.456, n), -0.0) self.assertEqual(round(1e300, n), 0.0) self.assertEqual(round(1e-320, n), 0.0) def test_overflow(self): self.assertRaises(OverflowError, round, 1.6e308, -308) self.assertRaises(OverflowError, round, -1.7e308, -308) @unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short', "applies only when using short float repr style") def test_previous_round_bugs(self): # particular cases that have occurred in bug reports self.assertEqual(round(562949953421312.5, 1), 562949953421312.5) self.assertEqual(round(56294995342131.5, 3), 56294995342131.5) # round-half-even self.assertEqual(round(25.0, -1), 20.0) self.assertEqual(round(35.0, -1), 40.0) self.assertEqual(round(45.0, -1), 40.0) self.assertEqual(round(55.0, -1), 60.0) self.assertEqual(round(65.0, -1), 60.0) self.assertEqual(round(75.0, -1), 80.0) self.assertEqual(round(85.0, -1), 80.0) self.assertEqual(round(95.0, -1), 100.0) @unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short', "applies only when using short float repr style") def test_matches_float_format(self): # round should give the same results as float formatting for i in range(500): x = i/1000. self.assertEqual(float(format(x, '.0f')), round(x, 0)) self.assertEqual(float(format(x, '.1f')), round(x, 1)) self.assertEqual(float(format(x, '.2f')), round(x, 2)) self.assertEqual(float(format(x, '.3f')), round(x, 3)) for i in range(5, 5000, 10): x = i/1000. self.assertEqual(float(format(x, '.0f')), round(x, 0)) self.assertEqual(float(format(x, '.1f')), round(x, 1)) self.assertEqual(float(format(x, '.2f')), round(x, 2)) self.assertEqual(float(format(x, '.3f')), round(x, 3)) for i in range(500): x = random.random() self.assertEqual(float(format(x, '.0f')), round(x, 0)) self.assertEqual(float(format(x, '.1f')), round(x, 1)) self.assertEqual(float(format(x, '.2f')), round(x, 2)) self.assertEqual(float(format(x, '.3f')), round(x, 3)) def test_format_specials(self): # Test formatting of nans and infs. def test(fmt, value, expected): # Test with both % and format(). self.assertEqual(fmt % value, expected, fmt) fmt = fmt[1:] # strip off the % self.assertEqual(format(value, fmt), expected, fmt) for fmt in ['%e', '%f', '%g', '%.0e', '%.6f', '%.20g', '%#e', '%#f', '%#g', '%#.20e', '%#.15f', '%#.3g']: pfmt = '%+' + fmt[1:] sfmt = '% ' + fmt[1:] test(fmt, INF, 'inf') test(fmt, -INF, '-inf') test(fmt, NAN, 'nan') test(fmt, -NAN, 'nan') # When asking for a sign, it's always provided. nans are # always positive. test(pfmt, INF, '+inf') test(pfmt, -INF, '-inf') test(pfmt, NAN, '+nan') test(pfmt, -NAN, '+nan') # When using ' ' for a sign code, only infs can be negative. # Others have a space. test(sfmt, INF, ' inf') test(sfmt, -INF, '-inf') test(sfmt, NAN, ' nan') test(sfmt, -NAN, ' nan') def test_None_ndigits(self): for x in round(1.23), round(1.23, None), round(1.23, ndigits=None): self.assertEqual(x, 1) self.assertIsInstance(x, int) for x in round(1.78), round(1.78, None), round(1.78, ndigits=None): self.assertEqual(x, 2) self.assertIsInstance(x, int) # Beginning with Python 2.6 float has cross platform compatible # ways to create and represent inf and nan class InfNanTest(unittest.TestCase): def test_inf_from_str(self): self.assertTrue(isinf(float("inf"))) self.assertTrue(isinf(float("+inf"))) self.assertTrue(isinf(float("-inf"))) self.assertTrue(isinf(float("infinity"))) self.assertTrue(isinf(float("+infinity"))) self.assertTrue(isinf(float("-infinity"))) self.assertEqual(repr(float("inf")), "inf") self.assertEqual(repr(float("+inf")), "inf") self.assertEqual(repr(float("-inf")), "-inf") self.assertEqual(repr(float("infinity")), "inf") self.assertEqual(repr(float("+infinity")), "inf") self.assertEqual(repr(float("-infinity")), "-inf") self.assertEqual(repr(float("INF")), "inf") self.assertEqual(repr(float("+Inf")), "inf") self.assertEqual(repr(float("-iNF")), "-inf") self.assertEqual(repr(float("Infinity")), "inf") self.assertEqual(repr(float("+iNfInItY")), "inf") self.assertEqual(repr(float("-INFINITY")), "-inf") self.assertEqual(str(float("inf")), "inf") self.assertEqual(str(float("+inf")), "inf") self.assertEqual(str(float("-inf")), "-inf") self.assertEqual(str(float("infinity")), "inf") self.assertEqual(str(float("+infinity")), "inf") self.assertEqual(str(float("-infinity")), "-inf") self.assertRaises(ValueError, float, "info") self.assertRaises(ValueError, float, "+info") self.assertRaises(ValueError, float, "-info") self.assertRaises(ValueError, float, "in") self.assertRaises(ValueError, float, "+in") self.assertRaises(ValueError, float, "-in") self.assertRaises(ValueError, float, "infinit") self.assertRaises(ValueError, float, "+Infin") self.assertRaises(ValueError, float, "-INFI") self.assertRaises(ValueError, float, "infinitys") self.assertRaises(ValueError, float, "++Inf") self.assertRaises(ValueError, float, "-+inf") self.assertRaises(ValueError, float, "+-infinity") self.assertRaises(ValueError, float, "--Infinity") def test_inf_as_str(self): self.assertEqual(repr(1e300 * 1e300), "inf") self.assertEqual(repr(-1e300 * 1e300), "-inf") self.assertEqual(str(1e300 * 1e300), "inf") self.assertEqual(str(-1e300 * 1e300), "-inf") def test_nan_from_str(self): self.assertTrue(isnan(float("nan"))) self.assertTrue(isnan(float("+nan"))) self.assertTrue(isnan(float("-nan"))) self.assertEqual(repr(float("nan")), "nan") self.assertEqual(repr(float("+nan")), "nan") self.assertEqual(repr(float("-nan")), "nan") self.assertEqual(repr(float("NAN")), "nan") self.assertEqual(repr(float("+NAn")), "nan") self.assertEqual(repr(float("-NaN")), "nan") self.assertEqual(str(float("nan")), "nan") self.assertEqual(str(float("+nan")), "nan") self.assertEqual(str(float("-nan")), "nan") self.assertRaises(ValueError, float, "nana") self.assertRaises(ValueError, float, "+nana") self.assertRaises(ValueError, float, "-nana") self.assertRaises(ValueError, float, "na") self.assertRaises(ValueError, float, "+na") self.assertRaises(ValueError, float, "-na") self.assertRaises(ValueError, float, "++nan") self.assertRaises(ValueError, float, "-+NAN") self.assertRaises(ValueError, float, "+-NaN") self.assertRaises(ValueError, float, "--nAn") def test_nan_as_str(self): self.assertEqual(repr(1e300 * 1e300 * 0), "nan") self.assertEqual(repr(-1e300 * 1e300 * 0), "nan") self.assertEqual(str(1e300 * 1e300 * 0), "nan") self.assertEqual(str(-1e300 * 1e300 * 0), "nan") def test_inf_signs(self): self.assertEqual(copysign(1.0, float('inf')), 1.0) self.assertEqual(copysign(1.0, float('-inf')), -1.0) @unittest.skipUnless(getattr(sys, 'float_repr_style', '') == 'short', "applies only when using short float repr style") def test_nan_signs(self): # When using the dtoa.c code, the sign of float('nan') should # be predictable. self.assertEqual(copysign(1.0, float('nan')), 1.0) self.assertEqual(copysign(1.0, float('-nan')), -1.0) fromHex = float.fromhex toHex = float.hex class HexFloatTestCase(unittest.TestCase): MAX = fromHex('0x.fffffffffffff8p+1024') # max normal MIN = fromHex('0x1p-1022') # min normal TINY = fromHex('0x0.0000000000001p-1022') # min subnormal EPS = fromHex('0x0.0000000000001p0') # diff between 1.0 and next float up def identical(self, x, y): # check that floats x and y are identical, or that both # are NaNs if isnan(x) or isnan(y): if isnan(x) == isnan(y): return elif x == y and (x != 0.0 or copysign(1.0, x) == copysign(1.0, y)): return self.fail('%r not identical to %r' % (x, y)) def test_ends(self): self.identical(self.MIN, ldexp(1.0, -1022)) self.identical(self.TINY, ldexp(1.0, -1074)) self.identical(self.EPS, ldexp(1.0, -52)) self.identical(self.MAX, 2.*(ldexp(1.0, 1023) - ldexp(1.0, 970))) def test_invalid_inputs(self): invalid_inputs = [ 'infi', # misspelt infinities and nans '-Infinit', '++inf', '-+Inf', '--nan', '+-NaN', 'snan', 'NaNs', 'nna', 'an', 'nf', 'nfinity', 'inity', 'iinity', '0xnan', '', ' ', 'x1.0p0', '0xX1.0p0', '+ 0x1.0p0', # internal whitespace '- 0x1.0p0', '0 x1.0p0', '0x 1.0p0', '0x1 2.0p0', '+0x1 .0p0', '0x1. 0p0', '-0x1.0 1p0', '-0x1.0 p0', '+0x1.0p +0', '0x1.0p -0', '0x1.0p 0', '+0x1.0p+ 0', '-0x1.0p- 0', '++0x1.0p-0', # double signs '--0x1.0p0', '+-0x1.0p+0', '-+0x1.0p0', '0x1.0p++0', '+0x1.0p+-0', '-0x1.0p-+0', '0x1.0p--0', '0x1.0.p0', '0x.p0', # no hex digits before or after point '0x1,p0', # wrong decimal point character '0x1pa', '0x1p\uff10', # fullwidth Unicode digits '\uff10x1p0', '0x\uff11p0', '0x1.\uff10p0', '0x1p0 \n 0x2p0', '0x1p0\0 0x1p0', # embedded null byte is not end of string ] for x in invalid_inputs: try: result = fromHex(x) except ValueError: pass else: self.fail('Expected float.fromhex(%r) to raise ValueError; ' 'got %r instead' % (x, result)) def test_whitespace(self): value_pairs = [ ('inf', INF), ('-Infinity', -INF), ('nan', NAN), ('1.0', 1.0), ('-0x.2', -0.125), ('-0.0', -0.0) ] whitespace = [ '', ' ', '\t', '\n', '\n \t', '\f', '\v', '\r' ] for inp, expected in value_pairs: for lead in whitespace: for trail in whitespace: got = fromHex(lead + inp + trail) self.identical(got, expected) def test_from_hex(self): MIN = self.MIN; MAX = self.MAX; TINY = self.TINY; EPS = self.EPS; # two spellings of infinity, with optional signs; case-insensitive self.identical(fromHex('inf'), INF) self.identical(fromHex('+Inf'), INF) self.identical(fromHex('-INF'), -INF) self.identical(fromHex('iNf'), INF) self.identical(fromHex('Infinity'), INF) self.identical(fromHex('+INFINITY'), INF) self.identical(fromHex('-infinity'), -INF) self.identical(fromHex('-iNFiNitY'), -INF) # nans with optional sign; case insensitive self.identical(fromHex('nan'), NAN) self.identical(fromHex('+NaN'), NAN) self.identical(fromHex('-NaN'), NAN) self.identical(fromHex('-nAN'), NAN) # variations in input format self.identical(fromHex('1'), 1.0) self.identical(fromHex('+1'), 1.0) self.identical(fromHex('1.'), 1.0) self.identical(fromHex('1.0'), 1.0) self.identical(fromHex('1.0p0'), 1.0) self.identical(fromHex('01'), 1.0) self.identical(fromHex('01.'), 1.0) self.identical(fromHex('0x1'), 1.0) self.identical(fromHex('0x1.'), 1.0) self.identical(fromHex('0x1.0'), 1.0) self.identical(fromHex('+0x1.0'), 1.0) self.identical(fromHex('0x1p0'), 1.0) self.identical(fromHex('0X1p0'), 1.0) self.identical(fromHex('0X1P0'), 1.0) self.identical(fromHex('0x1P0'), 1.0) self.identical(fromHex('0x1.p0'), 1.0) self.identical(fromHex('0x1.0p0'), 1.0) self.identical(fromHex('0x.1p4'), 1.0) self.identical(fromHex('0x.1p04'), 1.0) self.identical(fromHex('0x.1p004'), 1.0) self.identical(fromHex('0x1p+0'), 1.0) self.identical(fromHex('0x1P-0'), 1.0) self.identical(fromHex('+0x1p0'), 1.0) self.identical(fromHex('0x01p0'), 1.0) self.identical(fromHex('0x1p00'), 1.0) self.identical(fromHex(' 0x1p0 '), 1.0) self.identical(fromHex('\n 0x1p0'), 1.0) self.identical(fromHex('0x1p0 \t'), 1.0) self.identical(fromHex('0xap0'), 10.0) self.identical(fromHex('0xAp0'), 10.0) self.identical(fromHex('0xaP0'), 10.0) self.identical(fromHex('0xAP0'), 10.0) self.identical(fromHex('0xbep0'), 190.0) self.identical(fromHex('0xBep0'), 190.0) self.identical(fromHex('0xbEp0'), 190.0) self.identical(fromHex('0XBE0P-4'), 190.0) self.identical(fromHex('0xBEp0'), 190.0) self.identical(fromHex('0xB.Ep4'), 190.0) self.identical(fromHex('0x.BEp8'), 190.0) self.identical(fromHex('0x.0BEp12'), 190.0) # moving the point around pi = fromHex('0x1.921fb54442d18p1') self.identical(fromHex('0x.006487ed5110b46p11'), pi) self.identical(fromHex('0x.00c90fdaa22168cp10'), pi) self.identical(fromHex('0x.01921fb54442d18p9'), pi) self.identical(fromHex('0x.03243f6a8885a3p8'), pi) self.identical(fromHex('0x.06487ed5110b46p7'), pi) self.identical(fromHex('0x.0c90fdaa22168cp6'), pi) self.identical(fromHex('0x.1921fb54442d18p5'), pi) self.identical(fromHex('0x.3243f6a8885a3p4'), pi) self.identical(fromHex('0x.6487ed5110b46p3'), pi) self.identical(fromHex('0x.c90fdaa22168cp2'), pi) self.identical(fromHex('0x1.921fb54442d18p1'), pi) self.identical(fromHex('0x3.243f6a8885a3p0'), pi) self.identical(fromHex('0x6.487ed5110b46p-1'), pi) self.identical(fromHex('0xc.90fdaa22168cp-2'), pi) self.identical(fromHex('0x19.21fb54442d18p-3'), pi) self.identical(fromHex('0x32.43f6a8885a3p-4'), pi) self.identical(fromHex('0x64.87ed5110b46p-5'), pi) self.identical(fromHex('0xc9.0fdaa22168cp-6'), pi) self.identical(fromHex('0x192.1fb54442d18p-7'), pi) self.identical(fromHex('0x324.3f6a8885a3p-8'), pi) self.identical(fromHex('0x648.7ed5110b46p-9'), pi) self.identical(fromHex('0xc90.fdaa22168cp-10'), pi) self.identical(fromHex('0x1921.fb54442d18p-11'), pi) # ... self.identical(fromHex('0x1921fb54442d1.8p-47'), pi) self.identical(fromHex('0x3243f6a8885a3p-48'), pi) self.identical(fromHex('0x6487ed5110b46p-49'), pi) self.identical(fromHex('0xc90fdaa22168cp-50'), pi) self.identical(fromHex('0x1921fb54442d18p-51'), pi) self.identical(fromHex('0x3243f6a8885a30p-52'), pi) self.identical(fromHex('0x6487ed5110b460p-53'), pi) self.identical(fromHex('0xc90fdaa22168c0p-54'), pi) self.identical(fromHex('0x1921fb54442d180p-55'), pi) # results that should overflow... self.assertRaises(OverflowError, fromHex, '-0x1p1024') self.assertRaises(OverflowError, fromHex, '0x1p+1025') self.assertRaises(OverflowError, fromHex, '+0X1p1030') self.assertRaises(OverflowError, fromHex, '-0x1p+1100') self.assertRaises(OverflowError, fromHex, '0X1p123456789123456789') self.assertRaises(OverflowError, fromHex, '+0X.8p+1025') self.assertRaises(OverflowError, fromHex, '+0x0.8p1025') self.assertRaises(OverflowError, fromHex, '-0x0.4p1026') self.assertRaises(OverflowError, fromHex, '0X2p+1023') self.assertRaises(OverflowError, fromHex, '0x2.p1023') self.assertRaises(OverflowError, fromHex, '-0x2.0p+1023') self.assertRaises(OverflowError, fromHex, '+0X4p+1022') self.assertRaises(OverflowError, fromHex, '0x1.ffffffffffffffp+1023') self.assertRaises(OverflowError, fromHex, '-0X1.fffffffffffff9p1023') self.assertRaises(OverflowError, fromHex, '0X1.fffffffffffff8p1023') self.assertRaises(OverflowError, fromHex, '+0x3.fffffffffffffp1022') self.assertRaises(OverflowError, fromHex, '0x3fffffffffffffp+970') self.assertRaises(OverflowError, fromHex, '0x10000000000000000p960') self.assertRaises(OverflowError, fromHex, '-0Xffffffffffffffffp960') # ...and those that round to +-max float self.identical(fromHex('+0x1.fffffffffffffp+1023'), MAX) self.identical(fromHex('-0X1.fffffffffffff7p1023'), -MAX) self.identical(fromHex('0X1.fffffffffffff7fffffffffffffp1023'), MAX) # zeros self.identical(fromHex('0x0p0'), 0.0) self.identical(fromHex('0x0p1000'), 0.0) self.identical(fromHex('-0x0p1023'), -0.0) self.identical(fromHex('0X0p1024'), 0.0) self.identical(fromHex('-0x0p1025'), -0.0) self.identical(fromHex('0X0p2000'), 0.0) self.identical(fromHex('0x0p123456789123456789'), 0.0) self.identical(fromHex('-0X0p-0'), -0.0) self.identical(fromHex('-0X0p-1000'), -0.0) self.identical(fromHex('0x0p-1023'), 0.0) self.identical(fromHex('-0X0p-1024'), -0.0) self.identical(fromHex('-0x0p-1025'), -0.0) self.identical(fromHex('-0x0p-1072'), -0.0) self.identical(fromHex('0X0p-1073'), 0.0) self.identical(fromHex('-0x0p-1074'), -0.0) self.identical(fromHex('0x0p-1075'), 0.0) self.identical(fromHex('0X0p-1076'), 0.0) self.identical(fromHex('-0X0p-2000'), -0.0) self.identical(fromHex('-0x0p-123456789123456789'), -0.0) # values that should underflow to 0 self.identical(fromHex('0X1p-1075'), 0.0) self.identical(fromHex('-0X1p-1075'), -0.0) self.identical(fromHex('-0x1p-123456789123456789'), -0.0) self.identical(fromHex('0x1.00000000000000001p-1075'), TINY) self.identical(fromHex('-0x1.1p-1075'), -TINY) self.identical(fromHex('0x1.fffffffffffffffffp-1075'), TINY) # check round-half-even is working correctly near 0 ... self.identical(fromHex('0x1p-1076'), 0.0) self.identical(fromHex('0X2p-1076'), 0.0) self.identical(fromHex('0X3p-1076'), TINY) self.identical(fromHex('0x4p-1076'), TINY) self.identical(fromHex('0X5p-1076'), TINY) self.identical(fromHex('0X6p-1076'), 2*TINY) self.identical(fromHex('0x7p-1076'), 2*TINY) self.identical(fromHex('0X8p-1076'), 2*TINY) self.identical(fromHex('0X9p-1076'), 2*TINY) self.identical(fromHex('0xap-1076'), 2*TINY) self.identical(fromHex('0Xbp-1076'), 3*TINY) self.identical(fromHex('0xcp-1076'), 3*TINY) self.identical(fromHex('0Xdp-1076'), 3*TINY) self.identical(fromHex('0Xep-1076'), 4*TINY) self.identical(fromHex('0xfp-1076'), 4*TINY) self.identical(fromHex('0x10p-1076'), 4*TINY) self.identical(fromHex('-0x1p-1076'), -0.0) self.identical(fromHex('-0X2p-1076'), -0.0) self.identical(fromHex('-0x3p-1076'), -TINY) self.identical(fromHex('-0X4p-1076'), -TINY) self.identical(fromHex('-0x5p-1076'), -TINY) self.identical(fromHex('-0x6p-1076'), -2*TINY) self.identical(fromHex('-0X7p-1076'), -2*TINY) self.identical(fromHex('-0X8p-1076'), -2*TINY) self.identical(fromHex('-0X9p-1076'), -2*TINY) self.identical(fromHex('-0Xap-1076'), -2*TINY) self.identical(fromHex('-0xbp-1076'), -3*TINY) self.identical(fromHex('-0xcp-1076'), -3*TINY) self.identical(fromHex('-0Xdp-1076'), -3*TINY) self.identical(fromHex('-0xep-1076'), -4*TINY) self.identical(fromHex('-0Xfp-1076'), -4*TINY) self.identical(fromHex('-0X10p-1076'), -4*TINY) # ... and near MIN ... self.identical(fromHex('0x0.ffffffffffffd6p-1022'), MIN-3*TINY) self.identical(fromHex('0x0.ffffffffffffd8p-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffdap-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffdcp-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffdep-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffe0p-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffe2p-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffe4p-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffe6p-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffe8p-1022'), MIN-2*TINY) self.identical(fromHex('0x0.ffffffffffffeap-1022'), MIN-TINY) self.identical(fromHex('0x0.ffffffffffffecp-1022'), MIN-TINY) self.identical(fromHex('0x0.ffffffffffffeep-1022'), MIN-TINY) self.identical(fromHex('0x0.fffffffffffff0p-1022'), MIN-TINY) self.identical(fromHex('0x0.fffffffffffff2p-1022'), MIN-TINY) self.identical(fromHex('0x0.fffffffffffff4p-1022'), MIN-TINY) self.identical(fromHex('0x0.fffffffffffff6p-1022'), MIN-TINY) self.identical(fromHex('0x0.fffffffffffff8p-1022'), MIN) self.identical(fromHex('0x0.fffffffffffffap-1022'), MIN) self.identical(fromHex('0x0.fffffffffffffcp-1022'), MIN) self.identical(fromHex('0x0.fffffffffffffep-1022'), MIN) self.identical(fromHex('0x1.00000000000000p-1022'), MIN) self.identical(fromHex('0x1.00000000000002p-1022'), MIN) self.identical(fromHex('0x1.00000000000004p-1022'), MIN) self.identical(fromHex('0x1.00000000000006p-1022'), MIN) self.identical(fromHex('0x1.00000000000008p-1022'), MIN) self.identical(fromHex('0x1.0000000000000ap-1022'), MIN+TINY) self.identical(fromHex('0x1.0000000000000cp-1022'), MIN+TINY) self.identical(fromHex('0x1.0000000000000ep-1022'), MIN+TINY) self.identical(fromHex('0x1.00000000000010p-1022'), MIN+TINY) self.identical(fromHex('0x1.00000000000012p-1022'), MIN+TINY) self.identical(fromHex('0x1.00000000000014p-1022'), MIN+TINY) self.identical(fromHex('0x1.00000000000016p-1022'), MIN+TINY) self.identical(fromHex('0x1.00000000000018p-1022'), MIN+2*TINY) # ... and near 1.0. self.identical(fromHex('0x0.fffffffffffff0p0'), 1.0-EPS) self.identical(fromHex('0x0.fffffffffffff1p0'), 1.0-EPS) self.identical(fromHex('0X0.fffffffffffff2p0'), 1.0-EPS) self.identical(fromHex('0x0.fffffffffffff3p0'), 1.0-EPS) self.identical(fromHex('0X0.fffffffffffff4p0'), 1.0-EPS) self.identical(fromHex('0X0.fffffffffffff5p0'), 1.0-EPS/2) self.identical(fromHex('0X0.fffffffffffff6p0'), 1.0-EPS/2) self.identical(fromHex('0x0.fffffffffffff7p0'), 1.0-EPS/2) self.identical(fromHex('0x0.fffffffffffff8p0'), 1.0-EPS/2) self.identical(fromHex('0X0.fffffffffffff9p0'), 1.0-EPS/2) self.identical(fromHex('0X0.fffffffffffffap0'), 1.0-EPS/2) self.identical(fromHex('0x0.fffffffffffffbp0'), 1.0-EPS/2) self.identical(fromHex('0X0.fffffffffffffcp0'), 1.0) self.identical(fromHex('0x0.fffffffffffffdp0'), 1.0) self.identical(fromHex('0X0.fffffffffffffep0'), 1.0) self.identical(fromHex('0x0.ffffffffffffffp0'), 1.0) self.identical(fromHex('0X1.00000000000000p0'), 1.0) self.identical(fromHex('0X1.00000000000001p0'), 1.0) self.identical(fromHex('0x1.00000000000002p0'), 1.0) self.identical(fromHex('0X1.00000000000003p0'), 1.0) self.identical(fromHex('0x1.00000000000004p0'), 1.0) self.identical(fromHex('0X1.00000000000005p0'), 1.0) self.identical(fromHex('0X1.00000000000006p0'), 1.0) self.identical(fromHex('0X1.00000000000007p0'), 1.0) self.identical(fromHex('0x1.00000000000007ffffffffffffffffffffp0'), 1.0) self.identical(fromHex('0x1.00000000000008p0'), 1.0) self.identical(fromHex('0x1.00000000000008000000000000000001p0'), 1+EPS) self.identical(fromHex('0X1.00000000000009p0'), 1.0+EPS) self.identical(fromHex('0x1.0000000000000ap0'), 1.0+EPS) self.identical(fromHex('0x1.0000000000000bp0'), 1.0+EPS) self.identical(fromHex('0X1.0000000000000cp0'), 1.0+EPS) self.identical(fromHex('0x1.0000000000000dp0'), 1.0+EPS) self.identical(fromHex('0x1.0000000000000ep0'), 1.0+EPS) self.identical(fromHex('0X1.0000000000000fp0'), 1.0+EPS) self.identical(fromHex('0x1.00000000000010p0'), 1.0+EPS) self.identical(fromHex('0X1.00000000000011p0'), 1.0+EPS) self.identical(fromHex('0x1.00000000000012p0'), 1.0+EPS) self.identical(fromHex('0X1.00000000000013p0'), 1.0+EPS) self.identical(fromHex('0X1.00000000000014p0'), 1.0+EPS) self.identical(fromHex('0x1.00000000000015p0'), 1.0+EPS) self.identical(fromHex('0x1.00000000000016p0'), 1.0+EPS) self.identical(fromHex('0X1.00000000000017p0'), 1.0+EPS) self.identical(fromHex('0x1.00000000000017ffffffffffffffffffffp0'), 1.0+EPS) self.identical(fromHex('0x1.00000000000018p0'), 1.0+2*EPS) self.identical(fromHex('0X1.00000000000018000000000000000001p0'), 1.0+2*EPS) self.identical(fromHex('0x1.00000000000019p0'), 1.0+2*EPS) self.identical(fromHex('0X1.0000000000001ap0'), 1.0+2*EPS) self.identical(fromHex('0X1.0000000000001bp0'), 1.0+2*EPS) self.identical(fromHex('0x1.0000000000001cp0'), 1.0+2*EPS) self.identical(fromHex('0x1.0000000000001dp0'), 1.0+2*EPS) self.identical(fromHex('0x1.0000000000001ep0'), 1.0+2*EPS) self.identical(fromHex('0X1.0000000000001fp0'), 1.0+2*EPS) self.identical(fromHex('0x1.00000000000020p0'), 1.0+2*EPS) def test_roundtrip(self): def roundtrip(x): return fromHex(toHex(x)) for x in [NAN, INF, self.MAX, self.MIN, self.MIN-self.TINY, self.TINY, 0.0]: self.identical(x, roundtrip(x)) self.identical(-x, roundtrip(-x)) # fromHex(toHex(x)) should exactly recover x, for any non-NaN float x. import random for i in range(10000): e = random.randrange(-1200, 1200) m = random.random() s = random.choice([1.0, -1.0]) try: x = s*ldexp(m, e) except OverflowError: pass else: self.identical(x, fromHex(toHex(x))) def test_subclass(self): class F(float): def __new__(cls, value): return float.__new__(cls, value + 1) f = F.fromhex((1.5).hex()) self.assertIs(type(f), F) self.assertEqual(f, 2.5) class F2(float): def __init__(self, value): self.foo = 'bar' f = F2.fromhex((1.5).hex()) self.assertIs(type(f), F2) self.assertEqual(f, 1.5) self.assertEqual(getattr(f, 'foo', 'none'), 'bar') if __name__ == '__main__': unittest.main()
codeparrot/github-code-clean
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes and functions used to construct graphs.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import functools import linecache import os import re import sys import threading import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import function_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.core.framework import op_def_pb2 from tensorflow.core.framework import versions_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python import pywrap_tensorflow as c_api from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import tape from tensorflow.python.framework import c_api_util from tensorflow.python.framework import cpp_shape_inference_pb2 from tensorflow.python.framework import device as pydev from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import op_def_registry from tensorflow.python.framework import registry from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import versions from tensorflow.python.ops import control_flow_util from tensorflow.python.platform import app from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat from tensorflow.python.util import decorator_utils from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export # Temporary global switch determining if we should enable the work-in-progress # calls to the C API. Currently disabled by default but can be manually enabled # in code or via the environment variable. This will be removed once all # functionality is supported and there's no performance penalty with it enabled. _USE_C_API = os.getenv("TF_C_API_GRAPH_CONSTRUCTION", "1") is not "0" _USE_C_SHAPES = os.getenv("TF_C_API_GRAPH_CONSTRUCTION_SHAPES", "0") is not "0" def tensor_id(tensor): """Returns a unique identifier for this Tensor.""" return tensor._id # pylint: disable=protected-access class _NullContextmanager(object): def __enter__(self): pass def __exit__(self, type_arg, value_arg, traceback_arg): return False # False values do not suppress exceptions def _override_helper(clazz_object, operator, func): """Overrides (string) operator on Tensors to call func. Args: clazz_object: the class to override for; either Tensor or SparseTensor. operator: the string name of the operator to override. func: the function that replaces the overridden operator. Raises: ValueError: If operator has already been overwritten, or if operator is not allowed to be overwritten. """ existing = getattr(clazz_object, operator, None) if existing is not None: # Check to see if this is a default method-wrapper or slot wrapper which # will be true for the comparison operators. if not isinstance(existing, type(object.__lt__)): raise ValueError("operator %s cannot be overwritten again on class %s." % (operator, clazz_object)) if operator not in Tensor.OVERLOADABLE_OPERATORS: raise ValueError("Overriding %s is disallowed" % operator) setattr(clazz_object, operator, func) def _as_graph_element(obj): """Convert `obj` to a graph element if possible, otherwise return `None`. Args: obj: Object to convert. Returns: The result of `obj._as_graph_element()` if that method is available; otherwise `None`. """ conv_fn = getattr(obj, "_as_graph_element", None) if conv_fn and callable(conv_fn): return conv_fn() return None _TENSOR_LIKE_TYPES = tuple() def is_dense_tensor_like(t): """EXPERIMENTAL: Returns true if `t` implements the tensor interface. See `register_dense_tensor_like_type()` for the current definition of a "tensor-like type". Args: t: An object. Returns: True iff `t` is an instance of one of the registered "tensor-like" types. """ return isinstance(t, _TENSOR_LIKE_TYPES) def register_dense_tensor_like_type(tensor_type): """EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface. A "tensor-like type" can represent a single dense tensor, and implements the `name` and `dtype` properties. Args: tensor_type: A type implementing the tensor interface. Raises: TypeError: If `tensor_type` does not implement the tensor interface. """ try: if not isinstance(tensor_type.name, property): raise TypeError("Type %s does not define a `name` property" % tensor_type.__name__) except AttributeError: raise TypeError("Type %s does not define a `name` property" % tensor_type.__name__) try: if not isinstance(tensor_type.dtype, property): raise TypeError("Type %s does not define a `dtype` property" % tensor_type.__name__) except AttributeError: raise TypeError("Type %s does not define a `dtype` property" % tensor_type.__name__) # We expect this list to be small, so choose quadratic complexity # for registration, so that we have a tuple that can be used for # more efficient `isinstance` checks later. global _TENSOR_LIKE_TYPES _TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type]) def uid(): """A unique (within this program execution) integer.""" return c_api.TFE_Py_UID() def numpy_text(tensor, is_repr=False): """Human readable representation of a tensor's numpy value.""" if tensor.dtype.is_numpy_compatible: text = repr(tensor.numpy()) if is_repr else str(tensor.numpy()) else: text = "<unprintable>" if "\n" in text: text = "\n" + text return text # NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose. class _TensorLike(object): """Internal cls for grouping Tensor, SparseTensor, ..., for is_instance.""" pass @tf_export("Tensor") class Tensor(_TensorLike): """Represents one of the outputs of an `Operation`. A `Tensor` is a symbolic handle to one of the outputs of an `Operation`. It does not hold the values of that operation's output, but instead provides a means of computing those values in a TensorFlow @{tf.Session}. This class has two primary purposes: 1. A `Tensor` can be passed as an input to another `Operation`. This builds a dataflow connection between operations, which enables TensorFlow to execute an entire `Graph` that represents a large, multi-step computation. 2. After the graph has been launched in a session, the value of the `Tensor` can be computed by passing it to @{tf.Session.run}. `t.eval()` is a shortcut for calling `tf.get_default_session().run(t)`. In the following example, `c`, `d`, and `e` are symbolic `Tensor` objects, whereas `result` is a numpy array that stores a concrete value: ```python # Build a dataflow graph. c = tf.constant([[1.0, 2.0], [3.0, 4.0]]) d = tf.constant([[1.0, 1.0], [0.0, 1.0]]) e = tf.matmul(c, d) # Construct a `Session` to execute the graph. sess = tf.Session() # Execute the graph and store the value that `e` represents in `result`. result = sess.run(e) ``` """ # List of Python operators that we allow to override. OVERLOADABLE_OPERATORS = { # Binary. "__add__", "__radd__", "__sub__", "__rsub__", "__mul__", "__rmul__", "__div__", "__rdiv__", "__truediv__", "__rtruediv__", "__floordiv__", "__rfloordiv__", "__mod__", "__rmod__", "__lt__", "__le__", "__gt__", "__ge__", "__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__", "__getitem__", "__pow__", "__rpow__", # Unary. "__invert__", "__neg__", "__abs__", "__matmul__", "__rmatmul__" } def __init__(self, op, value_index, dtype): """Creates a new `Tensor`. Args: op: An `Operation`. `Operation` that computes this tensor. value_index: An `int`. Index of the operation's endpoint that produces this tensor. dtype: A `DType`. Type of elements stored in this tensor. Raises: TypeError: If the op is not an `Operation`. """ if not isinstance(op, Operation): raise TypeError("op needs to be an Operation: %s" % op) self._op = op self._value_index = value_index self._dtype = dtypes.as_dtype(dtype) if _USE_C_API: # This will be set by set_shape_and_handle_data_for_outputs. self._shape_val = None else: # The Python code requires all tensors start with a shape to support shape # inference on imported while loops. This isn't necessary with the C API # enabled because the C API provides the shapes for imported nodes. # TODO(skyewm): remove when _USE_C_API is removed. self._shape_val = tensor_shape.unknown_shape() # List of operations that use this Tensor as input. We maintain this list # to easily navigate a computation graph. self._consumers = [] if not _USE_C_SHAPES: # Attributes used for C++ shape inference. Not inspected, only forwarded. # If set, will be a HandleData object from cpp_shape_inference.proto. self._handle_data = None self._id = uid() @property def op(self): """The `Operation` that produces this tensor as an output.""" return self._op @property def dtype(self): """The `DType` of elements in this tensor.""" return self._dtype @property def graph(self): """The `Graph` that contains this tensor.""" return self._op.graph @property def name(self): """The string name of this tensor.""" if not self._op.name: raise ValueError("Operation was not named: %s" % self._op) return "%s:%d" % (self._op.name, self._value_index) @property def device(self): """The name of the device on which this tensor will be produced, or None.""" return self._op.device @property def shape(self): """Returns the `TensorShape` that represents the shape of this tensor. The shape is computed using shape inference functions that are registered in the Op for each `Operation`. See @{tf.TensorShape} for more details of what a shape represents. The inferred shape of a tensor is used to provide shape information without having to launch the graph in a session. This can be used for debugging, and providing early error messages. For example: ```python c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) print(c.shape) ==> TensorShape([Dimension(2), Dimension(3)]) d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) print(d.shape) ==> TensorShape([Dimension(4), Dimension(2)]) # Raises a ValueError, because `c` and `d` do not have compatible # inner dimensions. e = tf.matmul(c, d) f = tf.matmul(c, d, transpose_a=True, transpose_b=True) print(f.shape) ==> TensorShape([Dimension(3), Dimension(4)]) ``` In some cases, the inferred shape may have unknown dimensions. If the caller has additional information about the values of these dimensions, `Tensor.set_shape()` can be used to augment the inferred shape. Returns: A `TensorShape` representing the shape of this tensor. """ if self._shape_val is None: if _USE_C_SHAPES: self._shape_val = self._c_api_shape() else: assert _USE_C_API # Call set_shape_and_handle_data_for_outputs in topological order on all # ops that are needed to compute self.op's shape. We do this instead of # having set_shape_and_handle_data_for_outputs recursively call # Operation.shape on self.op.inputs to overflowing the call stack. need_shapes = self._get_input_ops_without_shapes(self.op) need_shapes.sort(key=lambda op: op._id) for op in need_shapes: set_shape_and_handle_data_for_outputs(op) return self._shape_val def _get_input_ops_without_shapes(self, target_op): """Returns ops needing shape inference to compute target_op's shape.""" result = [] stack = [self._op] visited = set() while stack: op = stack.pop() if op in visited: continue result.append(op) stack.extend(t.op for t in op.inputs if t._shape_val is None) visited.add(op) return result def _c_api_shape(self): """Returns the TensorShape of this tensor according to the C API.""" c_graph = self._op._graph._c_graph # pylint: disable=protected-access shape_vector, unknown_shape = c_api.TF_GraphGetTensorShapeHelper( c_graph, self._as_tf_output()) if unknown_shape: return tensor_shape.unknown_shape() else: shape_vector = [None if d == -1 else d for d in shape_vector] return tensor_shape.TensorShape(shape_vector) @property def _shape(self): logging.warning("Tensor._shape is private, use Tensor.shape " "instead. Tensor._shape will eventually be removed.") return self.shape @_shape.setter def _shape(self, value): raise ValueError( "Tensor._shape cannot be assigned, use Tensor.set_shape instead.") def __iter__(self): if not context.executing_eagerly(): raise TypeError( "Tensor objects are not iterable when eager execution is not " "enabled. To iterate over this tensor use tf.map_fn.") shape = self._shape_tuple() if shape is None: raise TypeError("Cannot iterate over a tensor with unknown shape.") if not shape: raise TypeError("Cannot iterate over a scalar tensor.") if shape[0] is None: raise TypeError( "Cannot iterate over a tensor with unknown first dimension.") for i in xrange(shape[0]): yield self[i] def _shape_as_list(self): if self.shape.ndims is not None: return [dim.value for dim in self.shape.dims] else: return None def _shape_tuple(self): shape = self._shape_as_list() if shape is None: return None return tuple(shape) def _rank(self): """Integer rank of this Tensor, if known, else None. Returns: Integer rank or None """ return self.shape.ndims def get_shape(self): """Alias of Tensor.shape.""" return self.shape def set_shape(self, shape): """Updates the shape of this tensor. This method can be called multiple times, and will merge the given `shape` with the current shape of this tensor. It can be used to provide additional information about the shape of this tensor that cannot be inferred from the graph alone. For example, this can be used to provide additional information about the shapes of images: ```python _, image_data = tf.TFRecordReader(...).read(...) image = tf.image.decode_png(image_data, channels=3) # The height and width dimensions of `image` are data dependent, and # cannot be computed without executing the op. print(image.shape) ==> TensorShape([Dimension(None), Dimension(None), Dimension(3)]) # We know that each image in this dataset is 28 x 28 pixels. image.set_shape([28, 28, 3]) print(image.shape) ==> TensorShape([Dimension(28), Dimension(28), Dimension(3)]) ``` Args: shape: A `TensorShape` representing the shape of this tensor, a `TensorShapeProto`, a list, a tuple, or None. Raises: ValueError: If `shape` is not compatible with the current shape of this tensor. """ if _USE_C_SHAPES: # pylint: disable=protected-access # Reset cached shape. self._shape_val = None else: self._shape_val = self.shape.merge_with(shape) if not self._op._graph._c_graph: return # Update C shape even if _USE_C_SHAPES = False, since we still want # set_shape to be reflected in the C API graph for when we run it. if not isinstance(shape, tensor_shape.TensorShape): shape = tensor_shape.TensorShape(shape) dim_list = [] if shape.dims is None: unknown_shape = True else: unknown_shape = False for dim in shape.dims: if dim.value is None: dim_list.append(-1) else: dim_list.append(dim.value) try: c_api.TF_GraphSetTensorShape_wrapper( self._op._graph._c_graph, # pylint: disable=protected-access self._as_tf_output(), dim_list, unknown_shape) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) @property def value_index(self): """The index of this tensor in the outputs of its `Operation`.""" return self._value_index def consumers(self): """Returns a list of `Operation`s that consume this tensor. Returns: A list of `Operation`s. """ if self._op._c_op: # pylint: disable=protected-access consumer_names = c_api.TF_OperationOutputConsumers_wrapper( self._as_tf_output()) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe(name) for name in consumer_names ] # pylint: enable=protected-access else: return self._consumers def _add_consumer(self, consumer): """Add a consumer to this tensor. Args: consumer: an Operation. Raises: TypeError: if the consumer is not an Operation. """ # pylint: disable=protected-access assert not self._op._c_op, "Tensor._add_consumer doesn't work with C API" # pylint: enable=protected-access if not isinstance(consumer, Operation): raise TypeError("Consumer must be an Operation: %s" % consumer) self._consumers.append(consumer) def _as_node_def_input(self): """Return a value to use for the NodeDef "input" attribute. The returned string can be used in a NodeDef "input" attribute to indicate that the NodeDef uses this Tensor as input. Raises: ValueError: if this Tensor's Operation does not have a name. Returns: a string. """ if not self._op.name: raise ValueError("Operation was not named: %s" % self._op) if self._value_index == 0: return self._op.name else: return "%s:%d" % (self._op.name, self._value_index) def _as_tf_output(self): # pylint: disable=protected-access assert self.op._c_op return c_api_util.tf_output(self.op._c_op, self.value_index) # pylint: enable=protected-access def __str__(self): return "Tensor(\"%s\"%s%s%s)" % ( self.name, (", shape=%s" % self.get_shape()) if self.get_shape().ndims is not None else "", (", dtype=%s" % self._dtype.name) if self._dtype else "", (", device=%s" % self.device) if self.device else "") def __repr__(self): return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(), self._dtype.name) def __hash__(self): # Necessary to support Python's collection membership operators return id(self) def __eq__(self, other): # Necessary to support Python's collection membership operators return id(self) == id(other) def __copy__(self): # Make sure _shape_val is computed before we copy. # TODO(b/77597810): get rid of Tensor copies. if self._shape_val is None: set_shape_and_handle_data_for_outputs(self.op) cls = self.__class__ result = cls.__new__(cls) result.__dict__.update(self.__dict__) return result # NOTE(mrry): This enables the Tensor's overloaded "right" binary # operators to run when the left operand is an ndarray, because it # accords the Tensor class higher priority than an ndarray, or a # numpy matrix. # TODO(mrry): Convert this to using numpy's __numpy_ufunc__ # mechanism, which allows more control over how Tensors interact # with ndarrays. __array_priority__ = 100 @staticmethod def _override_operator(operator, func): _override_helper(Tensor, operator, func) def __bool__(self): """Dummy method to prevent a tensor from being used as a Python `bool`. This overload raises a `TypeError` when the user inadvertently treats a `Tensor` as a boolean (e.g. in an `if` statement). For example: ```python if tf.constant(True): # Will raise. # ... if tf.constant(5) < tf.constant(7): # Will raise. # ... ``` This disallows ambiguities between testing the Python value vs testing the dynamic condition of the `Tensor`. Raises: `TypeError`. """ raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. " "Use `if t is not None:` instead of `if t:` to test if a " "tensor is defined, and use TensorFlow ops such as " "tf.cond to execute subgraphs conditioned on the value of " "a tensor.") def __nonzero__(self): """Dummy method to prevent a tensor from being used as a Python `bool`. This is the Python 2.x counterpart to `__bool__()` above. Raises: `TypeError`. """ raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. " "Use `if t is not None:` instead of `if t:` to test if a " "tensor is defined, and use TensorFlow ops such as " "tf.cond to execute subgraphs conditioned on the value of " "a tensor.") def eval(self, feed_dict=None, session=None): """Evaluates this tensor in a `Session`. Calling this method will execute all preceding operations that produce the inputs needed for the operation that produces this tensor. *N.B.* Before invoking `Tensor.eval()`, its graph must have been launched in a session, and either a default session must be available, or `session` must be specified explicitly. Args: feed_dict: A dictionary that maps `Tensor` objects to feed values. See @{tf.Session.run} for a description of the valid feed values. session: (Optional.) The `Session` to be used to evaluate this tensor. If none, the default session will be used. Returns: A numpy array corresponding to the value of this tensor. """ return _eval_using_default_session(self, feed_dict, self.graph, session) # TODO(agarwal): consider getting rid of this. class _EagerTensorBase(Tensor): """Base class for EagerTensor.""" @property def dtype(self): # Note: using the intern table directly here as this is # performance-sensitive in some models. return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access def numpy(self): """Returns a numpy array or a scalar with the same contents as the Tensor. TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying buffer but instead always explicitly copy? Note that currently it may or may not copy based on whether the numpy data is properly aligned or not. Returns: A numpy array or a scalar. Numpy array may share memory with the Tensor object. Any changes to one may be reflected in the other. A scalar value is returned when self has rank 0. Raises: ValueError: if the type of this Tensor is not representable in numpy. """ if self.dtype == dtypes.resource: raise ValueError("Resource handles are not convertible to numpy.") return self.cpu()._numpy() # pylint: disable=protected-access # __int__ and __float__ may copy the tensor to CPU and # only work for scalars; values are cast as per numpy. def __int__(self): return int(self.numpy()) def __float__(self): return float(self.numpy()) def __array__(self, dtype=None): return np.array(self.numpy(), dtype=dtype) def __format__(self, format_spec): return self.numpy().__format__(format_spec) def _numpy(self): raise NotImplementedError() def __copy__(self): # Eager Tensors are immutable so it's safe to return themselves as a copy. return self def __deepcopy__(self, memo): # Eager Tensors are immutable so it's safe to return themselves as a copy. del memo return self def _datatype_enum(self): raise NotImplementedError() def _shape_tuple(self): """The shape of this Tensor, as a tuple. This is more performant than tuple(shape().as_list()) as it avoids two list and one object creation. Marked private for now as from an API perspective, it would be better to have a single performant way of getting a shape rather than exposing shape() and shape_tuple() (and heaven forbid, shape_list() etc. as well!). Punting on that for now, but ideally one would work things out and remove the need for this method. Returns: tuple with the shape. """ raise NotImplementedError() def _rank(self): """Integer rank of this Tensor. Unlike regular Tensors, the rank is always known for EagerTensors. This is more performant than len(self._shape_tuple()) Returns: Integer rank """ raise NotImplementedError() def _copy_to_device(self, context, device): # pylint: disable=redefined-outer-name raise NotImplementedError() def __str__(self): return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape, self.dtype.name) def __repr__(self): return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % ( self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True)) @staticmethod def _override_operator(name, func): setattr(_EagerTensorBase, name, func) def _copy(self, ctx=None, device_name=None): """Copies tensor to dest device.""" # pylint: disable=protected-access # Creates a new tensor on the dest device. if ctx is None: ctx = context.context() if device_name is None: device_name = ctx.device_name # pylint: disable=protected-access try: new_tensor = self._copy_to_device(context=ctx._handle, device=device_name) except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) # Record the copy on tape and define backprop copy as well. if context.executing_eagerly(): self_device = self.device def grad_fun(dresult): return [dresult._copy(device_name=self_device)] tape.record_operation("_copy", [new_tensor], [self], grad_fun) return new_tensor # pylint: enable=protected-access @property def shape(self): if self._tensor_shape is None: # pylint: disable=access-member-before-definition # `_tensor_shape` is declared and defined in the definition of # `EagerTensor`, in C. self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple()) return self._tensor_shape def get_shape(self): """Alias of Tensor.shape.""" return self.shape def _shape_as_list(self): """The shape of the tensor as a list.""" return list(self._shape_tuple()) @property def ndim(self): """Returns the number of Tensor dimensions.""" return self.shape.ndims def cpu(self): """A copy of this Tensor with contents backed by host memory.""" return self._copy(context.context(), "CPU:0") def gpu(self, gpu_index=0): """A copy of this Tensor with contents backed by memory on the GPU. Arguments: gpu_index: Identifies which GPU to place the contents on the returned Tensor in. Returns: A GPU-memory backed Tensor object initialized with the same contents as this Tensor. """ return self._copy(context.context(), "GPU:" + str(gpu_index)) def __bool__(self): if self._shape_tuple() != (): # pylint: disable=g-explicit-bool-comparison raise ValueError( "Non-scalar tensor %s cannot be converted to boolean." % repr(self)) if self.dtype != dtypes.bool: raise ValueError( "Non-boolean tensor %s cannot be converted to boolean." % repr(self)) return bool(self.cpu().numpy()) def __nonzero__(self): return self.__bool__() def set_shape(self, shape): if not self.shape.is_compatible_with(shape): raise ValueError( "Tensor's shape %s is not compatible with supplied shape %s" % (self.shape, shape)) # Methods not supported / implemented for Eager Tensors. @property def op(self): raise AttributeError( "Tensor.op is meaningless when eager execution is enabled.") @property def graph(self): raise AttributeError( "Tensor.graph is meaningless when eager execution is enabled.") @property def name(self): raise AttributeError( "Tensor.name is meaningless when eager execution is enabled.") @property def value_index(self): raise AttributeError( "Tensor.value_index is meaningless when eager execution is enabled.") def consumers(self): raise NotImplementedError( "Tensor.consumers is meaningless when eager execution is enabled.") def _add_consumer(self, consumer): raise NotImplementedError( "_add_consumer not supported when eager execution is enabled.") def _as_node_def_input(self): raise NotImplementedError( "_as_node_def_input not supported when eager execution is enabled.") def _as_tf_output(self): raise NotImplementedError( "_as_tf_output not supported when eager execution is enabled.") def eval(self, feed_dict=None, session=None): raise NotImplementedError( "eval is not supported when eager execution is enabled, " "is .numpy() what you're looking for?" ) # This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and # registers it with the current module. EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase) def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False): _ = name, as_ref if dtype and not dtype.is_compatible_with(t.dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtype.name, t.dtype.name, str(t))) return t _tensor_conversion_func_registry = { 0: [(Tensor, _TensorTensorConversionFunction)] } _tensor_conversion_func_cache = {} _tensor_conversion_func_lock = threading.Lock() register_dense_tensor_like_type(Tensor) @tf_export("convert_to_tensor") def convert_to_tensor(value, dtype=None, name=None, preferred_dtype=None): """Converts the given `value` to a `Tensor`. This function converts Python objects of various types to `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: ```python import numpy as np def my_func(arg): arg = tf.convert_to_tensor(arg, dtype=tf.float32) return tf.matmul(arg, arg) + arg # The following calls are equivalent. value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]])) value_2 = my_func([[1.0, 2.0], [3.0, 4.0]]) value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)) ``` This function can be useful when composing a new operation in Python (such as `my_func` in the example above). All standard Python op constructors apply this function to each of their Tensor-valued inputs, which allows those ops to accept numpy arrays, Python lists, and scalars in addition to `Tensor` objects. Note: This function diverges from default Numpy behavior for `float` and `string` types when `None` is present in a Python list or scalar. Rather than silently converting `None` values, an error will be thrown. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. Returns: An `Output` based on `value`. Raises: TypeError: If no conversion function is registered for `value`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_to_tensor( value=value, dtype=dtype, name=name, preferred_dtype=preferred_dtype, as_ref=False) def _error_prefix(name): return "" if name is None else "%s: " % name def internal_convert_to_tensor(value, dtype=None, name=None, as_ref=False, preferred_dtype=None, ctx=None): """Converts the given `value` to an `Tensor`. This function converts Python objects of various types to `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: This function can be useful when composing a new operation in Python All standard Python op constructors apply this function to each of their Tensor-valued inputs, which allows those ops to accept numpy arrays, Python lists, and scalars in addition to `Tensor` objects. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. as_ref: True if we want the mutable view of Variables, if applicable. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. ctx: Optional: The value of context.context(). Returns: A `Tensor` based on `value`. Raises: TypeError: If no conversion function is registered for `value`. RuntimeError: If a registered conversion function returns an invalid value. """ if ctx is None: ctx = context.context() if ctx.executing_eagerly(): # Fast path for EagerTensors that don't need any conversion. if isinstance(value, EagerTensor): # Note that we don't check that value's dtype matches the dtype # argument. We expect that the C runtime will do that checking # when we execute the kernel. return value if dtype is not None: dtype = dtypes.as_dtype(dtype) unwrapped_type = type(value) conversion_func_list = _tensor_conversion_func_cache.get(unwrapped_type, None) if conversion_func_list is None: with _tensor_conversion_func_lock: conversion_func_list = [] for _, funcs_at_priority in sorted( _tensor_conversion_func_registry.items()): for base_type, conversion_func in funcs_at_priority: if isinstance(value, base_type): conversion_func_list.append((base_type, conversion_func)) _tensor_conversion_func_cache[unwrapped_type] = conversion_func_list for base_type, conversion_func in conversion_func_list: # If dtype is None but preferred_dtype is not None, we try to # cast to preferred_dtype first. ret = None if dtype is None and preferred_dtype is not None: try: ret = conversion_func( value, dtype=preferred_dtype, name=name, as_ref=as_ref) except (TypeError, ValueError, errors.UnimplementedError, errors.InvalidArgumentError): # Could not coerce the conversion to use the preferred dtype. ret = None if ret is not None and ret is not NotImplemented: if (ret.dtype.base_dtype != dtypes.as_dtype(preferred_dtype).base_dtype): raise TypeError("convert_to_tensor did not convert to " "the preferred dtype: %s vs %s " % (ret.dtype.base_dtype, dtypes.as_dtype(preferred_dtype).base_dtype)) if ret is None: ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) if ret is NotImplemented: continue if not isinstance(ret, Tensor): raise RuntimeError( "%sConversion function %r for type %s returned non-Tensor: %r" % (_error_prefix(name), conversion_func, base_type, ret)) if dtype and not dtype.is_compatible_with(ret.dtype): raise RuntimeError( "%sConversion function %r for type %s returned incompatible " "dtype: requested = %s, actual = %s" % (_error_prefix(name), conversion_func, base_type, dtype.name, ret.dtype.name)) return ret raise TypeError("%sCannot convert %r with type %s to Tensor: " "no conversion function registered." % (_error_prefix(name), value, unwrapped_type)) def internal_convert_n_to_tensor(values, dtype=None, name=None, as_ref=False, preferred_dtype=None, ctx=None): """Converts `values` to a list of `Tensor` objects. Args: values: A list of objects that can be consumed by `tf.convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` objects. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. ctx: The value of context.context(). Returns: A list of `Tensor` and/or `IndexedSlices` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ if not isinstance(values, collections.Sequence): raise TypeError("values must be a list.") ret = [] if ctx is None: ctx = context.context() for i, value in enumerate(values): n = None if name is None else "%s_%d" % (name, i) ret.append( internal_convert_to_tensor( value, dtype=dtype, name=n, as_ref=as_ref, preferred_dtype=preferred_dtype, ctx=ctx)) return ret def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None): """Converts `values` to a list of `Tensor` objects. Args: values: A list of objects that can be consumed by `tf.convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` objects. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. Returns: A list of `Tensor` and/or `IndexedSlices` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_n_to_tensor( values=values, dtype=dtype, name=name, preferred_dtype=preferred_dtype, as_ref=False) @tf_export("convert_to_tensor_or_indexed_slices") def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None): """Converts the given object to a `Tensor` or an `IndexedSlices`. If `value` is an `IndexedSlices` or `SparseTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name to use if a new `Tensor` is created. Returns: An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ return internal_convert_to_tensor_or_indexed_slices( value=value, dtype=dtype, name=name, as_ref=False) def internal_convert_to_tensor_or_indexed_slices(value, dtype=None, name=None, as_ref=False): """Converts the given object to an `Tensor` or an `IndexedSlices`. If `value` is an `IndexedSlices` or `SparseTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name to use if a new `Tensor` is created. as_ref: True if the caller wants the results as ref tensors. Returns: An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ if isinstance(value, _TensorLike): if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtypes.as_dtype(dtype).name, value.dtype.name, str(value))) return value else: return internal_convert_to_tensor( value, dtype=dtype, name=name, as_ref=as_ref) def internal_convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None, as_ref=False): """Converts `values` to a list of `Tensor` or `IndexedSlices` objects. Any `IndexedSlices` or `SparseTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` `IndexedSlices`. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. Returns: A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ if not isinstance(values, collections.Sequence): raise TypeError("values must be a list.") ret = [] for i, value in enumerate(values): if value is None: ret.append(value) else: n = None if name is None else "%s_%d" % (name, i) ret.append( internal_convert_to_tensor_or_indexed_slices( value, dtype=dtype, name=n, as_ref=as_ref)) return ret def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None): """Converts `values` to a list of `Output` or `IndexedSlices` objects. Any `IndexedSlices` or `SparseTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` `IndexedSlices`. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. Returns: A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_n_to_tensor_or_indexed_slices( values=values, dtype=dtype, name=name, as_ref=False) # TODO(josh11b): Add ctx argument to conversion_func() signature. @tf_export("register_tensor_conversion_function") def register_tensor_conversion_function(base_type, conversion_func, priority=100): """Registers a function for converting objects of `base_type` to `Tensor`. The conversion function must have the following signature: ```python def conversion_func(value, dtype=None, name=None, as_ref=False): # ... ``` It must return a `Tensor` with the given `dtype` if specified. If the conversion function creates a new `Tensor`, it should use the given `name` if specified. All exceptions will be propagated to the caller. The conversion function may return `NotImplemented` for some inputs. In this case, the conversion process will continue to try subsequent conversion functions. If `as_ref` is true, the function must return a `Tensor` reference, such as a `Variable`. NOTE: The conversion functions will execute in order of priority, followed by order of registration. To ensure that a conversion function `F` runs before another conversion function `G`, ensure that `F` is registered with a smaller priority than `G`. Args: base_type: The base type or tuple of base types for all objects that `conversion_func` accepts. conversion_func: A function that converts instances of `base_type` to `Tensor`. priority: Optional integer that indicates the priority for applying this conversion function. Conversion functions with smaller priority values run earlier than conversion functions with larger priority values. Defaults to 100. Raises: TypeError: If the arguments do not have the appropriate type. """ global _tensor_conversion_func_cache with _tensor_conversion_func_lock: if not (isinstance(base_type, type) or (isinstance(base_type, tuple) and all(isinstance(x, type) for x in base_type))): raise TypeError("base_type must be a type or a tuple of types.") if not callable(conversion_func): raise TypeError("conversion_func must be callable.") try: funcs_at_priority = _tensor_conversion_func_registry[priority] except KeyError: funcs_at_priority = [] _tensor_conversion_func_registry[priority] = funcs_at_priority funcs_at_priority.append((base_type, conversion_func)) _tensor_conversion_func_cache = {} @tf_export("IndexedSlices") class IndexedSlices(_TensorLike): """A sparse representation of a set of tensor slices at given indices. This class is a simple wrapper for a pair of `Tensor` objects: * `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`. * `indices`: A 1-D integer `Tensor` with shape `[D0]`. An `IndexedSlices` is typically used to represent a subset of a larger tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`. The values in `indices` are the indices in the first dimension of the slices that have been extracted from the larger tensor. The dense tensor `dense` represented by an `IndexedSlices` `slices` has ```python dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...] ``` The `IndexedSlices` class is used principally in the definition of gradients for operations that have sparse gradients (e.g. @{tf.gather}). Contrast this representation with @{tf.SparseTensor}, which uses multi-dimensional indices and scalar values. """ def __init__(self, values, indices, dense_shape=None): """Creates an `IndexedSlices`.""" _get_graph_from_inputs([values, indices, dense_shape]) self._values = values self._indices = indices self._dense_shape = dense_shape @property def values(self): """A `Tensor` containing the values of the slices.""" return self._values @property def indices(self): """A 1-D `Tensor` containing the indices of the slices.""" return self._indices @property def dense_shape(self): """A 1-D `Tensor` containing the shape of the corresponding dense tensor.""" return self._dense_shape @property def name(self): """The name of this `IndexedSlices`.""" return self.values.name @property def device(self): """The name of the device on which `values` will be produced, or `None`.""" return self.values.device @property def op(self): """The `Operation` that produces `values` as an output.""" return self.values.op @property def dtype(self): """The `DType` of elements in this tensor.""" return self.values.dtype @property def graph(self): """The `Graph` that contains the values, indices, and shape tensors.""" return self._values.graph def __str__(self): return "IndexedSlices(indices=%s, values=%s%s)" % ( self._indices, self._values, (", dense_shape=%s" % self._dense_shape) if self._dense_shape is not None else "") def __neg__(self): return IndexedSlices(-self.values, self.indices, self.dense_shape) IndexedSlicesValue = collections.namedtuple( "IndexedSlicesValue", ["values", "indices", "dense_shape"]) def _device_string(dev_spec): if isinstance(dev_spec, pydev.DeviceSpec): return dev_spec.to_string() else: return dev_spec def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name """Create a NodeDef proto. Args: op_type: Value for the "op" attribute of the NodeDef proto. name: Value for the "name" attribute of the NodeDef proto. device: string, device, or function from NodeDef to string. Value for the "device" attribute of the NodeDef proto. attrs: Optional dictionary where the key is the attribute name (a string) and the value is the respective "attr" attribute of the NodeDef proto (an AttrValue). Returns: A node_def_pb2.NodeDef protocol buffer. """ node_def = node_def_pb2.NodeDef() node_def.op = compat.as_bytes(op_type) node_def.name = compat.as_bytes(name) if attrs is not None: for k, v in six.iteritems(attrs): node_def.attr[k].CopyFrom(v) if device is not None: if callable(device): node_def.device = device(node_def) else: node_def.device = _device_string(device) return node_def # Copied from core/framework/node_def_util.cc # TODO(mrry,josh11b): Consolidate this validation in C++ code. _VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$") _VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$") def _create_c_op(graph, node_def, inputs, control_inputs): """Creates a TF_Operation. Args: graph: a `Graph`. node_def: `node_def_pb2.NodeDef` for the operation to create. inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of `Tensor`s (corresponding to sequence inputs, e.g. "int64 * N", "list(int64)"). The length of the list should be equal to the number of inputs specified by this operation's op def. control_inputs: A list of `Operation`s to set as control dependencies. Returns: A wrapped TF_Operation*. """ # pylint: disable=protected-access op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op), compat.as_str(node_def.name)) # Add inputs for op_input in inputs: if isinstance(op_input, (list, tuple)): c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input]) else: c_api.TF_AddInput(op_desc, op_input._as_tf_output()) # Add control inputs for control_input in control_inputs: c_api.TF_AddControlInput(op_desc, control_input._c_op) # pylint: enable=protected-access # Add attrs for name, attr_value in node_def.attr.items(): serialized = attr_value.SerializeToString() # TODO(skyewm): this creates and deletes a new TF_Status for every attr. # It might be worth creating a convenient way to re-use the same status. c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized) try: c_op = c_api.TF_FinishOperation(op_desc) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) return c_op @tf_export("Operation") class Operation(object): """Represents a graph node that performs computation on tensors. An `Operation` is a node in a TensorFlow `Graph` that takes zero or more `Tensor` objects as input, and produces zero or more `Tensor` objects as output. Objects of type `Operation` are created by calling a Python op constructor (such as @{tf.matmul}) or @{tf.Graph.create_op}. For example `c = tf.matmul(a, b)` creates an `Operation` of type "MatMul" that takes tensors `a` and `b` as input, and produces `c` as output. After the graph has been launched in a session, an `Operation` can be executed by passing it to @{tf.Session.run}. `op.run()` is a shortcut for calling `tf.get_default_session().run(op)`. """ def __init__(self, node_def, g, inputs=None, output_types=None, control_inputs=None, input_types=None, original_op=None, op_def=None): r"""Creates an `Operation`. NOTE: This constructor validates the name of the `Operation` (passed as `node_def.name`). Valid `Operation` names match the following regular expression: [A-Za-z0-9.][A-Za-z0-9_.\\-/]* Args: node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and `device`. The `input` attribute is irrelevant here as it will be computed when generating the model. g: `Graph`. The parent graph. inputs: list of `Tensor` objects. The inputs to this `Operation`. output_types: list of `DType` objects. List of the types of the `Tensors` computed by this operation. The length of this list indicates the number of output endpoints of the `Operation`. control_inputs: list of operations or tensors from which to have a control dependency. input_types: List of `DType` objects representing the types of the tensors accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect reference-typed inputs must specify these explicitly. original_op: Optional. Used to associate the new `Operation` with an existing `Operation` (for example, a replica with the op that was replicated). op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type that this `Operation` represents. Raises: TypeError: if control inputs are not Operations or Tensors, or if `node_def` is not a `NodeDef`, or if `g` is not a `Graph`, or if `inputs` are not tensors, or if `inputs` and `input_types` are incompatible. ValueError: if the `node_def` name is not valid. """ # For internal use only: `node_def` can be set to a TF_Operation to create # an Operation for that op. This is useful for creating Operations for ops # indirectly created by C API methods, e.g. the ops created by # TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields # should be None. if isinstance(node_def, node_def_pb2.NodeDef): if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0: raise ValueError( "Cannot create a tensor proto whose content is larger than 2GB.") if not _VALID_OP_NAME_REGEX.match(node_def.name): raise ValueError("'%s' is not a valid node name" % node_def.name) c_op = None elif type(node_def).__name__ == "SwigPyObject": assert inputs is None assert output_types is None assert control_inputs is None assert input_types is None assert original_op is None assert op_def is None c_op = node_def else: raise TypeError("node_def needs to be a NodeDef: %s" % node_def) if not isinstance(g, Graph): raise TypeError("g needs to be a Graph: %s" % g) self._graph = g if inputs is None: inputs = [] elif not isinstance(inputs, list): raise TypeError("inputs needs to be a list of Tensors: %s" % inputs) for a in inputs: if not isinstance(a, Tensor): raise TypeError("input needs to be a Tensor: %s" % a) if input_types is None: input_types = [i.dtype.base_dtype for i in inputs] else: if not all( x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)): raise TypeError("In op '%s', input types (%s) are not compatible " "with expected types (%s)" % (node_def.name, [i.dtype for i in inputs], input_types)) # Build the list of control inputs. control_input_ops = [] if control_inputs: for c in control_inputs: control_op = None if isinstance(c, Operation): control_op = c elif isinstance(c, (Tensor, IndexedSlices)): control_op = c.op else: raise TypeError("Control input must be an Operation, " "a Tensor, or IndexedSlices: %s" % c) control_input_ops.append(control_op) # Don't set private fields with C API enabled to catch users who need to # switch to public API. # TODO(skyewm): delete these fields once we remove _USE_C_API if not self._graph._c_graph: self._inputs_val = list(inputs) # Defensive copy. self._input_types_val = input_types self._control_inputs_val = control_input_ops self._node_def_val = copy.deepcopy(node_def) self._op_def_val = op_def else: # This will be set by self.inputs. self._inputs_val = None self._id_value = self._graph._next_id() # pylint: disable=protected-access self._original_op = original_op self._traceback = self._graph._extract_stack() # pylint: disable=protected-access self._control_flow_context = self.graph._get_control_flow_context() # pylint: disable=protected-access # Initialize self._c_op. if c_op: # TODO(skyewm): remove this assert when we remove USE_C_API assert self._graph._c_graph # pylint: disable=protected-access self._c_op = c_op elif self._graph._c_graph: # pylint: disable=protected-access if op_def is None: op_def = self._graph._get_op_def(node_def.op) # TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs. # Refactor so we don't have to do this here. grouped_inputs = self._reconstruct_sequence_inputs( op_def, inputs, node_def.attr) self._c_op = _create_c_op(self._graph, node_def, grouped_inputs, control_input_ops) else: self._c_op = None # Mark that we consume the inputs. This is unnecessary and unsupported with # the C API enabled, since the C API tracks the tensor consumers instead. if not self._c_op: for input_tensor in self._inputs_val: input_tensor._add_consumer(self) # pylint: disable=protected-access # Initialize self._outputs. if self._c_op: num_outputs = c_api.TF_OperationNumOutputs(self._c_op) output_types = [ c_api.TF_OperationOutputType(c_api_util.tf_output(self._c_op, i)) for i in range(num_outputs)] assert output_types is not None elif output_types is None: output_types = [] self._output_types_val = output_types self._outputs = [ Tensor(self, i, output_type) for i, output_type in enumerate(output_types) ] if not c_op: self._control_flow_post_processing() def _control_flow_post_processing(self): """Add this op to its control flow context. This may add new ops and change this op's inputs. self.inputs must be available before calling this method. """ for input_tensor in self.inputs: control_flow_util.CheckInputFromValidContext(self, input_tensor.op) if self._control_flow_context is not None: self._control_flow_context.AddOp(self) self._recompute_node_def() def _reconstruct_sequence_inputs(self, op_def, inputs, attrs): """Regroups a flat list of input tensors into scalar and sequence inputs. Args: op_def: The `op_def_pb2.OpDef` (for knowing the input types) inputs: a list of input `Tensor`s to the op. attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define how long each sequence is) Returns: A list of `Tensor`s (corresponding to scalar inputs) and lists of `Tensor`s (corresponding to sequence inputs). """ grouped_inputs = [] i = 0 for input_arg in op_def.input_arg: if input_arg.number_attr: input_len = attrs[input_arg.number_attr].i is_sequence = True elif input_arg.type_list_attr: input_len = len(attrs[input_arg.type_list_attr].list.type) is_sequence = True else: input_len = 1 is_sequence = False if is_sequence: grouped_inputs.append(inputs[i:i + input_len]) else: grouped_inputs.append(inputs[i]) i += input_len assert i == len(inputs) return grouped_inputs def colocation_groups(self): """Returns the list of colocation groups of the op.""" default_colocation_group = [ compat.as_bytes("loc:@%s" % self.name) ] try: class_attr = self.get_attr("_class") except ValueError: # This op has no explicit colocation group, so it is itself its # own root of a colocation group. return default_colocation_group attr_groups = [ class_name for class_name in class_attr if class_name.startswith(b"loc:@") ] # If there are no colocation groups in the explicit _class field, # return the default colocation group. return attr_groups if attr_groups else default_colocation_group def values(self): """DEPRECATED: Use outputs.""" return tuple(self.outputs) def _get_control_flow_context(self): """Returns the control flow context of this op. Returns: A context object. """ return self._control_flow_context def _set_control_flow_context(self, ctx): """Sets the current control flow context of this op. Args: ctx: a context object. """ self._control_flow_context = ctx @property def name(self): """The full name of this operation.""" if self._c_op: return c_api.TF_OperationName(self._c_op) else: return self._node_def_val.name @property def _id(self): """The unique integer id of this operation.""" return self._id_value @property def device(self): """The name of the device to which this op has been assigned, if any. Returns: The string name of the device to which this op has been assigned, or an empty string if it has not been assigned to a device. """ if self._c_op: return c_api.TF_OperationDevice(self._c_op) else: return self._node_def_val.device @property def _output_types(self): """List this operation's output types. Returns: List of the types of the Tensors computed by this operation. Each element in the list is an integer whose value is one of the TF_DataType enums defined in c_api.h The length of this list indicates the number of output endpoints of the operation. """ if self._c_op: num_outputs = c_api.TF_OperationNumOutputs(self._c_op) output_types = [ c_api.TF_OperationOutputType(self._tf_output(i)) for i in xrange(num_outputs) ] # TODO(iga): Remove this assert after converting to C API by default. # Just being a bit paranoid here. assert self._output_types_val == output_types # In all the tests we have output_types that are passed into # Operation.__init__ are a list of ints (which is illegal according # to the docstring), but input_types are instances of DType. # This extra assert is to catch if we ever use DType for output_types. if output_types: assert isinstance(output_types[0], int) return output_types else: return self._output_types_val def _tf_output(self, output_idx): """Create and return a new TF_Output for output_idx'th output of this op.""" assert self._c_op tf_output = c_api.TF_Output() tf_output.oper = self._c_op tf_output.index = output_idx return tf_output def _tf_input(self, input_idx): """Create and return a new TF_Input for input_idx'th input of this op.""" assert self._c_op tf_input = c_api.TF_Input() tf_input.oper = self._c_op tf_input.index = input_idx return tf_input def _set_device(self, device): # pylint: disable=redefined-outer-name """Set the device of this operation. Args: device: string or device.. The device to set. """ if self._c_op: c_api.SetRequestedDevice( self._graph._c_graph, # pylint: disable=protected-access self._c_op, # pylint: disable=protected-access compat.as_str(_device_string(device))) else: self._node_def_val.device = _device_string(device) def _add_input(self, tensor, dtype=None): """Add a new input to this operation. Args: tensor: the Tensor to add as an input. dtype: tf.DType: type of the input; defaults to the tensor's dtype. Raises: TypeError: if tensor is not a Tensor, or if input tensor type is not convertible to dtype. ValueError: if the Tensor is from a different graph. """ assert not self._c_op, ( "Operation._add_input doesn't work with C API") if not isinstance(tensor, Tensor): raise TypeError("tensor must be a Tensor: %s" % tensor) _assert_same_graph(self, tensor) if dtype is None: dtype = tensor.dtype else: dtype = dtypes.as_dtype(dtype) if not dtype.is_compatible_with(tensor.dtype): raise TypeError( "Cannot convert a tensor of type %s to an input of type %s" % (tensor.dtype.name, dtype.name)) self._inputs_val.append(tensor) self._input_types_val.append(dtype) tensor._add_consumer(self) # pylint: disable=protected-access self._recompute_node_def() # TODO(skyewm): Remove `update_dtype` when we enable the C API. def _update_input(self, index, tensor, update_dtype=True): """Update the input to this operation at the given index. NOTE: This is for TF internal use only. Please don't use it. Args: index: the index of the input to update. tensor: the Tensor to be used as the input at the given index. update_dtype: If `False`, the type for this input is not updated. Raises: TypeError: if tensor is not a Tensor, or if input tensor type is not convertible to dtype. ValueError: if the Tensor is from a different graph. """ if not isinstance(tensor, Tensor): raise TypeError("tensor must be a Tensor: %s" % tensor) _assert_same_graph(self, tensor) # Make sure output shapes are already computed for this op in case we create # a cycle (we cannot compute shapes for cycles). Usually shapes are computed # lazily upon request. if not _USE_C_SHAPES: set_shape_and_handle_data_for_outputs(self) if self._c_op: # Reset cached inputs. self._inputs_val = None c_api.UpdateEdge( self._graph._c_graph, # pylint: disable=protected-access tensor._as_tf_output(), # pylint: disable=protected-access self._tf_input(index)) else: self._inputs_val[index].consumers().remove(self) self._inputs_val[index] = tensor if update_dtype: self._input_types_val[index] = tensor.dtype tensor._add_consumer(self) # pylint: disable=protected-access self._recompute_node_def() def _add_control_inputs(self, ops): """Add a list of new control inputs to this operation. Args: ops: the list of Operations to add as control input. Raises: TypeError: if ops is not a list of Operations. ValueError: if any op in ops is from a different graph. """ if self._c_op: for op in ops: if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access else: if ops: for op in ops: if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) _assert_same_graph(self, op) self._control_inputs_val.append(op) self._recompute_node_def() def _add_control_input(self, op): """Add a new control input to this operation. Args: op: the Operation to add as control input. Raises: TypeError: if op is not an Operation. ValueError: if op is from a different graph. """ if self._c_op: if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access else: self._add_control_inputs([op]) def _remove_all_control_inputs(self): """Removes any control inputs to this operation.""" if self._c_op: c_api.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access else: del self.control_inputs[:] # Methods below are used when building the NodeDef and Graph proto. def _recompute_node_def(self): # TODO(skyewm): remove this function when we switch to C API if self._c_op: return del self._node_def_val.input[:] # pylint: disable=protected-access self._node_def_val.input.extend( [t._as_node_def_input() for t in self._inputs_val]) # pylint: enable=protected-access if self._control_inputs_val: self._node_def_val.input.extend( ["^%s" % op.name for op in self._control_inputs_val]) def __str__(self): return str(self.node_def) def __repr__(self): return "<tf.Operation '%s' type=%s>" % (self.name, self.type) @property def outputs(self): """The list of `Tensor` objects representing the outputs of this op.""" return self._outputs # pylint: disable=protected-access class _InputList(object): """Immutable input list wrapper.""" def __init__(self, inputs): self._inputs = inputs def __iter__(self): return iter(self._inputs) def __len__(self): return len(self._inputs) def __bool__(self): return bool(self._inputs) # Python 3 wants __bool__, Python 2.7 wants __nonzero__ __nonzero__ = __bool__ def __getitem__(self, i): return self._inputs[i] # pylint: enable=protected-access @property def inputs(self): """The list of `Tensor` objects representing the data inputs of this op.""" if self._c_op: if self._inputs_val is None: tf_outputs = c_api.GetOperationInputs(self._c_op) # pylint: disable=protected-access retval = [ self.graph._get_tensor_by_tf_output(tf_output) for tf_output in tf_outputs ] # pylint: enable=protected-access self._inputs_val = Operation._InputList(retval) return self._inputs_val else: return Operation._InputList(self._inputs_val) @property def _inputs(self): logging.warning("Operation._inputs is private, use Operation.inputs " "instead. Operation._inputs will eventually be removed.") return self.inputs @_inputs.setter def _inputs(self, value): raise ValueError("Cannot assign _inputs") @property def _input_types(self): if self._c_op: num_inputs = c_api.TF_OperationNumInputs(self._c_op) input_types = [ dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i))) for i in xrange(num_inputs) ] return input_types else: return self._input_types_val @_input_types.setter def _input_types(self, value): raise ValueError("Cannot assign _input_types") @property def control_inputs(self): """The `Operation` objects on which this op has a control dependency. Before this op is executed, TensorFlow will ensure that the operations in `self.control_inputs` have finished executing. This mechanism can be used to run ops sequentially for performance reasons, or to ensure that the side effects of an op are observed in the correct order. Returns: A list of `Operation` objects. """ if self._c_op: control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe( c_api.TF_OperationName(c_op)) for c_op in control_c_ops ] # pylint: enable=protected-access else: return self._control_inputs_val @property def _control_outputs(self): """The `Operation` objects which have a control dependency on this op. Before any of the ops in self._control_outputs can execute tensorflow will ensure self has finished executing. Returns: A list of `Operation` objects. """ if self._c_op: control_c_ops = c_api.TF_OperationGetControlOutputs_wrapper(self._c_op) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe( c_api.TF_OperationName(c_op)) for c_op in control_c_ops ] # pylint: enable=protected-access else: # TODO(apassos) this should be less inefficient. return [o for o in self._graph.get_operations() if self in o.control_inputs] @property def _control_inputs(self): logging.warning("Operation._control_inputs is private, use " "Operation.control_inputs instead. " "Operation._control_inputs will eventually be removed.") return self.control_inputs @_control_inputs.setter def _control_inputs(self, value): logging.warning("Operation._control_inputs is private, use " "Operation.control_inputs instead. " "Operation._control_inputs will eventually be removed.") # Copy value because it may be self._control_inputs_val (in particular if # this is called from self._control_inputs += ...), and we don't want to # clear value below. value = copy.copy(value) self._remove_all_control_inputs() self._add_control_inputs(value) @property def type(self): """The type of the op (e.g. `"MatMul"`).""" if self._c_op: op_type = c_api.TF_OperationOpType(self._c_op) return op_type else: return self._node_def_val.op @property def graph(self): """The `Graph` that contains this operation.""" return self._graph @property def node_def(self): # pylint: disable=line-too-long """Returns the `NodeDef` representation of this operation. Returns: A [`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto) protocol buffer. """ # pylint: enable=line-too-long if self._c_op: with c_api_util.tf_buffer() as buf: c_api.TF_OperationToNodeDef(self._c_op, buf) data = c_api.TF_GetBuffer(buf) node_def = node_def_pb2.NodeDef() node_def.ParseFromString(compat.as_bytes(data)) return node_def else: return self._node_def_val @property def _node_def(self): logging.warning("Operation._node_def is private, use Operation.node_def " "instead. Operation._node_def will eventually be removed.") return self.node_def @property def op_def(self): # pylint: disable=line-too-long """Returns the `OpDef` proto that represents the type of this op. Returns: An [`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto) protocol buffer. """ # pylint: enable=line-too-long if self._c_op: return self._graph._get_op_def(self.type) else: return self._op_def_val @property def _op_def(self): logging.warning("Operation._op_def is private, use Operation.op_def " "instead. Operation._op_def will eventually be removed.") return self.op_def @property def traceback(self): """Returns the call stack from when this operation was constructed.""" return self._graph._convert_stack(self._traceback) # pylint: disable=protected-access @property def traceback_with_start_lines(self): """Same as traceback but includes start line of function definition. Returns: A list of 5-tuples (filename, lineno, name, code, func_start_lineno). """ return self._graph._convert_stack( # pylint: disable=protected-access self._traceback, include_func_start_lineno=True) def _set_attr(self, attr_name, attr_value): """Private method used to set an attribute in the node_def.""" if self._c_op: buf = c_api.TF_NewBufferFromString( compat.as_bytes(attr_value.SerializeToString())) try: # pylint: disable=protected-access c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf) # pylint: enable=protected-access finally: c_api.TF_DeleteBuffer(buf) else: self._node_def_val.attr[attr_name].CopyFrom(attr_value) def get_attr(self, name): """Returns the value of the attr of this op with the given `name`. Args: name: The name of the attr to fetch. Returns: The value of the attr, as a Python object. Raises: ValueError: If this op does not have an attr with the given `name`. """ fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"] if self._c_op: try: with c_api_util.tf_buffer() as buf: c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf) data = c_api.TF_GetBuffer(buf) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) x = attr_value_pb2.AttrValue() x.ParseFromString(data) else: if name not in self._node_def_val.attr: raise ValueError( "No attr named '" + name + "' in " + str(self._node_def_val)) x = self._node_def_val.attr[name] # Treat an empty oneof value as an empty list. if not x.WhichOneof("value"): return [] if x.HasField("list"): for f in fields: if getattr(x.list, f): if f == "type": return [dtypes.as_dtype(x) for x in list(getattr(x.list, f))] else: return list(getattr(x.list, f)) return [] else: for f in fields: if x.HasField(f): if f == "type": return dtypes.as_dtype(getattr(x, f)) else: return getattr(x, f) assert False, "Unsupported field type in " + str(x) def run(self, feed_dict=None, session=None): """Runs this operation in a `Session`. Calling this method will execute all preceding operations that produce the inputs needed for this operation. *N.B.* Before invoking `Operation.run()`, its graph must have been launched in a session, and either a default session must be available, or `session` must be specified explicitly. Args: feed_dict: A dictionary that maps `Tensor` objects to feed values. See @{tf.Session.run} for a description of the valid feed values. session: (Optional.) The `Session` to be used to run to this operation. If none, the default session will be used. """ _run_using_default_session(self, feed_dict, self.graph, session) _gradient_registry = registry.Registry("gradient") @tf_export("RegisterGradient") class RegisterGradient(object): """A decorator for registering the gradient function for an op type. This decorator is only used when defining a new op type. For an op with `m` inputs and `n` outputs, the gradient function is a function that takes the original `Operation` and `n` `Tensor` objects (representing the gradients with respect to each output of the op), and returns `m` `Tensor` objects (representing the partial gradients with respect to each input of the op). For example, assuming that operations of type `"Sub"` take two inputs `x` and `y`, and return a single output `x - y`, the following gradient function would be registered: ```python @tf.RegisterGradient("Sub") def _sub_grad(unused_op, grad): return grad, tf.negative(grad) ``` The decorator argument `op_type` is the string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. """ def __init__(self, op_type): """Creates a new decorator with `op_type` as the Operation type. Args: op_type: The string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. """ if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") self._op_type = op_type def __call__(self, f): """Registers the function `f` as gradient function for `op_type`.""" _gradient_registry.register(f, self._op_type) return f @tf_export("NoGradient", "NotDifferentiable") def NotDifferentiable(op_type): """Specifies that ops of type `op_type` is not differentiable. This function should *not* be used for operations that have a well-defined gradient that is not yet implemented. This function is only used when defining a new op type. It may be used for ops such as `tf.size()` that are not differentiable. For example: ```python tf.NotDifferentiable("Size") ``` The gradient computed for 'op_type' will then propagate zeros. For ops that have a well-defined gradient but are not yet implemented, no declaration should be made, and an error *must* be thrown if an attempt to request its gradient is made. Args: op_type: The string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. Raises: TypeError: If `op_type` is not a string. """ if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") _gradient_registry.register(None, op_type) # Alias for the old name, will be eventually removed. NoGradient = NotDifferentiable def get_gradient_function(op): """Returns the function that computes gradients for "op".""" if not op.inputs: return None try: op_type = op.get_attr("_gradient_op_type") except ValueError: op_type = op.type return _gradient_registry.lookup(op_type) _shape_registry = registry.Registry("shape functions") _default_shape_function_registry = registry.Registry("default shape functions") # These are set to common_shapes.call_cpp_shape_fn by op generated code # (generated by python_op_gen.cc). # It is set outside ops.py to avoid a circular dependency. _call_cpp_shape_fn = None _call_cpp_shape_fn_and_require_op = None def _set_call_cpp_shape_fn(call_cpp_shape_fn): """Sets default shape fns from passed common_shapes.call_cpp_shape_fn.""" global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op if _call_cpp_shape_fn: return # already registered def call_without_requiring(op): return call_cpp_shape_fn(op, require_shape_fn=False) _call_cpp_shape_fn = call_without_requiring def call_with_requiring(op): return call_cpp_shape_fn(op, require_shape_fn=True) _call_cpp_shape_fn_and_require_op = call_with_requiring class RegisterShape(object): """No longer used. Was: A decorator for registering a shape function. Shape functions must now be registered via the SetShapeFn on the original Op specification in C++. """ def __init__(self, op_type): """Saves the `op_type` as the `Operation` type.""" if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") self._op_type = op_type def __call__(self, f): """Registers "f" as the shape function for "op_type".""" if f is None: assert _call_cpp_shape_fn # None is a special "weak" value that provides a default shape function, # and can be overridden by a non-None registration. try: _default_shape_function_registry.register(_call_cpp_shape_fn, self._op_type) except KeyError: # Ignore duplicate registrations of the weak value. This can # occur if the op library input to wrapper generation # inadvertently links in one or more of the standard op # libraries. pass else: _shape_registry.register(f, self._op_type) return f # TODO(b/74620627): remove when _USE_C_SHAPES is removed def _set_shape_and_handle_data_for_outputs_c_api(op): """Set shapes and resource handle data using info from the C API.""" assert not _USE_C_SHAPES for output in op.outputs: output._shape_val = output._c_api_shape() # Set the resource handle data for compatibility with the Python shape # inference code. serialized = c_api.ResourceHandleShapeAndType( op._graph._c_graph, output._as_tf_output()) if serialized: output._handle_data = ( cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData .FromString(compat.as_bytes(serialized))) else: output._handle_data = None # TODO(b/74620627): remove when _USE_C_SHAPES is removed def set_shape_and_handle_data_for_outputs(op): """Set the shapes and resource handle data for op's outputs. When _USE_C_API = True, this is lazily called when a tensor's shape is first requested. Usually this should work automatically, but some edge cases may require manaully calling this first to make sure Tensor._shape_val and Tensor._handle_data are set (e.g. manually overriding _handle_data, copying a Tensor). """ if _USE_C_SHAPES: return if op.graph._is_function(op.type): for output in op.outputs: output._shape_val = tensor_shape.unknown_shape() return try: shape_func = _shape_registry.lookup(op.type) except LookupError: try: shape_func = _default_shape_function_registry.lookup(op.type) except LookupError: shape_func = _call_cpp_shape_fn_and_require_op shapes = shape_func(op) if shapes is None: raise RuntimeError( "Shape function for op %s did not return any shapes" % op) elif isinstance(shapes, dict): # Returned by call_cpp_shape_fn shapes_dict = shapes shapes = shapes_dict["shapes"] handle_datas = shapes_dict["handle_data"] for output, handle_data in zip(op.outputs, handle_datas): # Don't override any existing handle data that may have been manually set. # pylint: disable=protected-access if output._handle_data is None: output._handle_data = handle_data # pylint: enable=protected-access if len(op.outputs) != len(shapes): raise RuntimeError( "Shape function for op %s returned %d shapes but expected %d %s %s" % (op, len(shapes), len(op.outputs), shape_func.__name__, str(shapes))) for output, s in zip(op.outputs, shapes): output._shape_val = tensor_shape.unknown_shape() output._shape_val = output._shape_val.merge_with(s) class OpStats(object): """A holder for statistics about an operator. This class holds information about the resource requirements for an op, including the size of its weight parameters on-disk and how many FLOPS it requires to execute forward inference. If you define a new operation, you can create a function that will return a set of information about its usage of the CPU and disk space when serialized. The function itself takes a Graph object that's been set up so you can call methods like get_tensor_by_name to help calculate the results, and a NodeDef argument. """ def __init__(self, statistic_type, value=None): """Sets up the initial placeholders for the statistics.""" self.statistic_type = statistic_type self.value = value @property def statistic_type(self): return self._statistic_type @statistic_type.setter def statistic_type(self, statistic_type): self._statistic_type = statistic_type @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __iadd__(self, other): if other.statistic_type != self.statistic_type: raise ValueError("Can't add an OpStat of type %s to one of %s." % (self.statistic_type, other.statistic_type)) if self.value is None: self.value = other.value elif other.value is not None: self._value += other.value return self _stats_registry = registry.Registry("statistical functions") class RegisterStatistics(object): """A decorator for registering the statistics function for an op type. This decorator can be defined for an op type so that it gives a report on the resources used by an instance of an operator, in the form of an OpStats object. Well-known types of statistics include these so far: - flops: When running a graph, the bulk of the computation happens doing numerical calculations like matrix multiplications. This type allows a node to return how many floating-point operations it takes to complete. The total number of FLOPs for a graph is a good guide to its expected latency. You can add your own statistics just by picking a new type string, registering functions for the ops you care about, and then calling get_stats_for_node_def. If a statistic for an op is registered multiple times, a KeyError will be raised. Since the statistics is counted on a per-op basis. It is not suitable for model parameters (capacity), which is expected to be counted only once, even if it is shared by multiple ops. (e.g. RNN) For example, you can define a new metric called doohickey for a Foo operation by placing this in your code: ```python @ops.RegisterStatistics("Foo", "doohickey") def _calc_foo_bojangles(unused_graph, unused_node_def): return ops.OpStats("doohickey", 20) ``` Then in client code you can retrieve the value by making this call: ```python doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey") ``` If the NodeDef is for an op with a registered doohickey function, you'll get back the calculated amount in doohickey.value, or None if it's not defined. """ def __init__(self, op_type, statistic_type): """Saves the `op_type` as the `Operation` type.""" if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string.") if "," in op_type: raise TypeError("op_type must not contain a comma.") self._op_type = op_type if not isinstance(statistic_type, six.string_types): raise TypeError("statistic_type must be a string.") if "," in statistic_type: raise TypeError("statistic_type must not contain a comma.") self._statistic_type = statistic_type def __call__(self, f): """Registers "f" as the statistics function for "op_type".""" _stats_registry.register(f, self._op_type + "," + self._statistic_type) return f def get_stats_for_node_def(graph, node, statistic_type): """Looks up the node's statistics function in the registry and calls it. This function takes a Graph object and a NodeDef from a GraphDef, and if there's an associated statistics method, calls it and returns a result. If no function has been registered for the particular node type, it returns an empty statistics object. Args: graph: A Graph object that's been set up with the node's graph. node: A NodeDef describing the operator. statistic_type: A string identifying the statistic we're interested in. Returns: An OpStats object containing information about resource usage. """ try: stats_func = _stats_registry.lookup(node.op + "," + statistic_type) result = stats_func(graph, node) except LookupError: result = OpStats(statistic_type) return result def _name_from_scope_name(name): """Returns the name of an op given the name of its scope. Args: name: the name of the scope. Returns: the name of the op (equal to scope name minus any trailing slash). """ return name[:-1] if (name and name[-1] == "/") else name @tf_export("Graph") class Graph(object): """A TensorFlow computation, represented as a dataflow graph. A `Graph` contains a set of @{tf.Operation} objects, which represent units of computation; and @{tf.Tensor} objects, which represent the units of data that flow between operations. A default `Graph` is always registered, and accessible by calling @{tf.get_default_graph}. To add an operation to the default graph, simply call one of the functions that defines a new `Operation`: ```python c = tf.constant(4.0) assert c.graph is tf.get_default_graph() ``` Another typical usage involves the @{tf.Graph.as_default} context manager, which overrides the current default graph for the lifetime of the context: ```python g = tf.Graph() with g.as_default(): # Define operations and tensors in `g`. c = tf.constant(30.0) assert c.graph is g ``` Important note: This class *is not* thread-safe for graph construction. All operations should be created from a single thread, or external synchronization must be provided. Unless otherwise specified, all methods are not thread-safe. A `Graph` instance supports an arbitrary number of "collections" that are identified by name. For convenience when building a large graph, collections can store groups of related objects: for example, the `tf.Variable` uses a collection (named @{tf.GraphKeys.GLOBAL_VARIABLES}) for all variables that are created during the construction of a graph. The caller may define additional collections by specifying a new name. """ def __init__(self): """Creates a new, empty Graph.""" # Protects core state that can be returned via public accessors, as well as # synchronizes Session.run calls with methods that create and mutate ops # (e.g. Graph.create_op()). This synchronization is necessary because it's # illegal to modify an operation after it's been run. Thread-safety is # provided on a best-effort basis to support buggy programs, and is not # guaranteed by the public `tf.Graph` API. # # The lock must be reentrant because create_op can be called recursively due # to control flow. Without a reentrant lock, many methods would also need a # "locked" version or parameter (including generated code). # # NOTE(mrry): This does not protect the various stacks. A warning will # be reported if these are used from multiple threads self._lock = threading.RLock() self._nodes_by_id = dict() # GUARDED_BY(self._lock) self._next_id_counter = 0 # GUARDED_BY(self._lock) self._nodes_by_name = dict() # GUARDED_BY(self._lock) self._version = 0 # GUARDED_BY(self._lock) # Maps a name used in the graph to the next id to use for that name. self._names_in_use = {} self._stack_state_is_thread_local = False self._thread_local = threading.local() # Functions that will be applied to choose a device if none is specified. # After switch_to_thread_local(), self._thread_local._device_function_stack # is used instead. self._graph_device_function_stack = [] # Default original_op applied to new ops. self._default_original_op = None # Current control flow context. It could be either CondContext or # WhileContext defined in ops/control_flow_ops.py self._control_flow_context = None # A new node will depend of the union of all of the nodes in the stack. # After switch_to_thread_local(), # self._thread_local._control_dependencies_stack is used instead. self._graph_control_dependencies_stack = [] # Arbitrary collections of objects. self._collections = {} # The graph-level random seed self._seed = None # A dictionary of attributes that should be applied to all ops. self._attr_scope_map = {} # A map from op type to the kernel label that should be used. self._op_to_kernel_label_map = {} # A map from op type to an alternative op type that should be used when # computing gradients. self._gradient_override_map = {} # True if the graph is considered "finalized". In that case no # new operations can be added. self._finalized = False # Functions defined in the graph self._functions = collections.OrderedDict() # Default GraphDef versions self._graph_def_versions = versions_pb2.VersionDef( producer=versions.GRAPH_DEF_VERSION, min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER) self._building_function = False # Stack of colocate_with ops. After switch_to_thread_local(), # self._thread_local._colocation_stack is used instead. self._graph_colocation_stack = [] # Set of tensors that are dangerous to feed! self._unfeedable_tensors = set() # Set of operations that are dangerous to fetch! self._unfetchable_ops = set() # A map of tensor handle placeholder to tensor dtype. self._handle_feeders = {} # A map from tensor handle to its read op. self._handle_readers = {} # A map from tensor handle to its move op. self._handle_movers = {} # A map from tensor handle to its delete op. self._handle_deleters = {} # Allow optimizers and other objects to pseudo-uniquely key graphs (this key # will be shared when defining function graphs, for example, so optimizers # being called inside function definitions behave as if they were seeing the # actual outside graph). self._graph_key = "grap-key-%d/" % (uid(),) # A string with the last reduction method passed to # losses.compute_weighted_loss(), or None. self._last_loss_reduction = None self._container = "" self._registered_ops = op_def_registry.get_registered_ops() # TODO(skyewm): fold as much of the above as possible into the C # implementation if self._use_c_api_hack(): self._scoped_c_graph = c_api_util.ScopedTFGraph() # The C API requires all ops to have shape functions. Disable this # requirement (many custom ops do not have shape functions, and we don't # want to break these existing cases). c_api.SetRequireShapeInferenceFns(self._c_graph, False) else: self._scoped_c_graph = None # TODO(apassos) remove once the C API is used by default. def _use_c_api_hack(self): """Temporary hack; can be overridden to force C API usage.""" return _USE_C_API def _convert_stack(self, stack, include_func_start_lineno=False): """Converts a stack extracted using _extract_stack() to a traceback stack. Args: stack: A list of n 5-tuples, (filename, lineno, name, frame_globals, func_start_lineno). include_func_start_lineno: True if function start line number should be included as the 5th entry in return tuples. Returns: A list of n 4-tuples or 5-tuples (filename, lineno, name, code, [optional: func_start_lineno]), where the code tuple element is calculated from the corresponding elements of the input tuple. """ ret = [] for (filename, lineno, name, frame_globals, func_start_lineno, unused_frame_info) in stack: linecache.checkcache(filename) line = linecache.getline(filename, lineno, frame_globals) if line: line = line.strip() else: line = None if include_func_start_lineno: ret.append((filename, lineno, name, line, func_start_lineno)) else: ret.append((filename, lineno, name, line)) return ret # Note: this method is private because the API of tf.Graph() is public and # frozen, and this functionality is still not ready for public visibility. @tf_contextlib.contextmanager def _variable_creator_scope(self, creator): # This step makes a copy of the existing stack, and it also initializes # self._thread_local._variable_creator_stack if it doesn't exist yet. old = list(self._variable_creator_stack) self._thread_local._variable_creator_stack.append(creator) try: yield finally: self._thread_local._variable_creator_stack = old # Note: this method is private because the API of tf.Graph() is public and # frozen, and this functionality is still not ready for public visibility. @property def _variable_creator_stack(self): if not hasattr(self._thread_local, "_variable_creator_stack"): self._thread_local._variable_creator_stack = [] return list(self._thread_local._variable_creator_stack) @_variable_creator_stack.setter def _variable_creator_stack(self, variable_creator_stack): self._thread_local._variable_creator_stack = variable_creator_stack def _extract_stack(self): """A lightweight, extensible re-implementation of traceback.extract_stack. NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for each stack frame using linecache, which results in an abundance of stat() calls. This implementation does not retrieve the code, and any consumer should apply _convert_stack to the result to obtain a traceback that can be formatted etc. using traceback methods. Derived classes can implement _extract_frame_info() to add extra information to the traceback. Returns: A list of 6-tuples (filename, lineno, name, frame_globals, func_start_lineno, custom_info) corresponding to the call stack of the current thread. """ try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back ret = [] while f is not None: lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name frame_globals = f.f_globals func_start_lineno = co.co_firstlineno frame_info = self._extract_frame_info(f) ret.append((filename, lineno, name, frame_globals, func_start_lineno, frame_info)) f = f.f_back ret.reverse() return ret def _extract_frame_info(self, frame): # pylint: disable=unused-argument """Extracts custom information from a frame in an op traceback.""" return None def _check_not_finalized(self): """Check if the graph is finalized. Raises: RuntimeError: If the graph finalized. """ if self._finalized: raise RuntimeError("Graph is finalized and cannot be modified.") def _add_op(self, op): """Adds 'op' to the graph. Args: op: the Operator or Tensor to add. Raises: TypeError: if op is not an Operation or Tensor. ValueError: if the op.name or op._id are already used. """ self._check_not_finalized() if not isinstance(op, (Tensor, Operation)): raise TypeError("op must be a Tensor or Operation: %s" % op) with self._lock: # pylint: disable=protected-access if op._id in self._nodes_by_id: raise ValueError("cannot add an op with id %d as it already " "exists in the graph" % op._id) if op.name in self._nodes_by_name: raise ValueError("cannot add op with name %s as that name " "is already used" % op.name) self._nodes_by_id[op._id] = op self._nodes_by_name[op.name] = op self._version = max(self._version, op._id) # pylint: enable=protected-access @property def _c_graph(self): if self._scoped_c_graph: return self._scoped_c_graph.graph return None @property def version(self): """Returns a version number that increases as ops are added to the graph. Note that this is unrelated to the @{tf.Graph.graph_def_versions}. Returns: An integer version that increases as ops are added to the graph. """ if self._finalized: return self._version with self._lock: return self._version @property def graph_def_versions(self): # pylint: disable=line-too-long """The GraphDef version information of this graph. For details on the meaning of each version, see [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto). Returns: A `VersionDef`. """ # pylint: enable=line-too-long if self._c_graph: with c_api_util.tf_buffer() as buf: c_api.TF_GraphVersions(self._c_graph, buf) data = c_api.TF_GetBuffer(buf) version_def = versions_pb2.VersionDef() version_def.ParseFromString(compat.as_bytes(data)) return version_def else: return self._graph_def_versions @property def seed(self): """The graph-level random seed of this graph.""" return self._seed @seed.setter def seed(self, seed): self._seed = seed @property def finalized(self): """True if this graph has been finalized.""" return self._finalized def finalize(self): """Finalizes this graph, making it read-only. After calling `g.finalize()`, no new operations can be added to `g`. This method is used to ensure that no operations are added to a graph when it is shared between multiple threads, for example when using a @{tf.train.QueueRunner}. """ self._finalized = True def _unsafe_unfinalize(self): """Opposite of `finalize`. Internal interface. NOTE: Unfinalizing a graph could have negative impact on performance, especially in a multi-threaded environment. Unfinalizing a graph when it is in use by a Session may lead to undefined behavior. Ensure that all sessions using a graph are closed before calling this method. """ self._finalized = False def _get_control_flow_context(self): """Returns the current control flow context. Returns: A context object. """ return self._control_flow_context def _set_control_flow_context(self, ctx): """Sets the current control flow context. Args: ctx: a context object. """ self._control_flow_context = ctx def _copy_functions_to_graph_def(self, graph_def, starting_bytesize): """If this graph contains functions, copy them to `graph_def`.""" bytesize = starting_bytesize for f in self._functions.values(): bytesize += f.definition.ByteSize() if bytesize >= (1 << 31) or bytesize < 0: raise ValueError("GraphDef cannot be larger than 2GB.") graph_def.library.function.extend([f.definition]) if f.grad_func_name: grad_def = function_pb2.GradientDef() grad_def.function_name = f.name grad_def.gradient_func = f.grad_func_name graph_def.library.gradient.extend([grad_def]) def _as_graph_def(self, from_version=None, add_shapes=False): # pylint: disable=line-too-long """Returns a serialized `GraphDef` representation of this graph. The serialized `GraphDef` can be imported into another `Graph` (using @{tf.import_graph_def}) or used with the [C++ Session API](../../../../api_docs/cc/index.md). This method is thread-safe. Args: from_version: Optional. If this is set, returns a `GraphDef` containing only the nodes that were added to this graph since its `version` property had the given value. add_shapes: If true, adds an "_output_shapes" list attr to each node with the inferred shapes of each of its outputs. Returns: A tuple containing a [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto) protocol buffer, and the version of the graph to which that `GraphDef` corresponds. Raises: ValueError: If the `graph_def` would be too large. """ # pylint: enable=line-too-long if self._c_graph: with self._lock: with c_api_util.tf_buffer() as buf: c_api.TF_GraphToGraphDef(self._c_graph, buf) data = c_api.TF_GetBuffer(buf) graph = graph_pb2.GraphDef() graph.ParseFromString(compat.as_bytes(data)) # Strip the experimental library field iff it's empty. if not graph.library.function: graph.ClearField("library") if add_shapes: for node in graph.node: op = self._nodes_by_name[node.name] if op.outputs: node.attr["_output_shapes"].list.shape.extend( [output.get_shape().as_proto() for output in op.outputs]) else: with self._lock: graph = graph_pb2.GraphDef() graph.versions.CopyFrom(self._graph_def_versions) bytesize = 0 for op_id in sorted(self._nodes_by_id): op = self._nodes_by_id[op_id] if from_version is None or op_id > from_version: graph.node.extend([op.node_def]) if op.outputs and add_shapes: assert "_output_shapes" not in graph.node[-1].attr graph.node[-1].attr["_output_shapes"].list.shape.extend( [output.get_shape().as_proto() for output in op.outputs]) bytesize += op.node_def.ByteSize() if bytesize >= (1 << 31) or bytesize < 0: raise ValueError("GraphDef cannot be larger than 2GB.") self._copy_functions_to_graph_def(graph, bytesize) return graph, self._version def as_graph_def(self, from_version=None, add_shapes=False): # pylint: disable=line-too-long """Returns a serialized `GraphDef` representation of this graph. The serialized `GraphDef` can be imported into another `Graph` (using @{tf.import_graph_def}) or used with the [C++ Session API](../../api_docs/cc/index.md). This method is thread-safe. Args: from_version: Optional. If this is set, returns a `GraphDef` containing only the nodes that were added to this graph since its `version` property had the given value. add_shapes: If true, adds an "_output_shapes" list attr to each node with the inferred shapes of each of its outputs. Returns: A [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto) protocol buffer. Raises: ValueError: If the `graph_def` would be too large. """ # pylint: enable=line-too-long result, _ = self._as_graph_def(from_version, add_shapes) return result def _is_function(self, name): """Tests whether 'name' is registered in this graph's function library. Args: name: string op name. Returns: bool indicating whether or not 'name' is registered in function library. """ return name in self._functions def _get_function(self, name): """Returns the function definition for 'name'. Args: name: string function name. Returns: The function def proto. """ return self._functions.get(name, None) def _add_function(self, function): """Adds a function to the graph. After the function has been added, you can call to the function by passing the function name in place of an op name to `Graph.create_op()`. Args: function: A `_DefinedFunction` object. Raises: ValueError: if another function is defined with the same name. """ name = function.name # Sanity checks on gradient definition. if (function.grad_func_name is not None) and (function.python_grad_func is not None): raise ValueError("Gradient defined twice for function %s" % name) # Add function to graph # pylint: disable=protected-access if self._c_graph: # Handle functions created without using the C API. TODO(apassos,skyewm) # remove this when all functions are generated using the C API by default # as this will be unnecessary. if not function._c_func: serialized = function.definition.SerializeToString() c_func = c_api.TF_FunctionImportFunctionDef(serialized) function._c_func = c_api_util.ScopedTFFunction(c_func) gradient = (function._grad_func._c_func.func if function._grad_func else None) c_api.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient) else: # If there is already a function with the same name, raise an error # if bodies are different. Else, do nothing. The C API version above # has the same behavior. previous = self._functions.get(name, None) if previous: # This check is not ideal as we can have a hash collision with only # 32 bits in the hash, but the non C API mode is being deprecated. # Don't bother changing it now. if previous._hash_str == function._hash_str: return else: raise ValueError("Cannot add function (%s, hash %s) to graph (%s). " "Another function (%s, hash %s) is already defined " "with that name (%s)" % ( function, function._hash_str, self, previous, previous._hash_str, name)) # pylint: enable=protected-access self._functions[name] = function # Need a new-enough consumer to support the functions we add to the graph. if self._graph_def_versions.min_consumer < 12: self._graph_def_versions.min_consumer = 12 @property def building_function(self): """Returns True iff this graph represents a function.""" return self._building_function # Helper functions to create operations. def create_op( self, op_type, inputs, dtypes, # pylint: disable=redefined-outer-name input_types=None, name=None, attrs=None, op_def=None, compute_shapes=True, compute_device=True): """Creates an `Operation` in this graph. This is a low-level interface for creating an `Operation`. Most programs will not call this method directly, and instead use the Python op constructors, such as `tf.constant()`, which add ops to the default graph. Args: op_type: The `Operation` type to create. This corresponds to the `OpDef.name` field for the proto that defines the operation. inputs: A list of `Tensor` objects that will be inputs to the `Operation`. dtypes: A list of `DType` objects that will be the types of the tensors that the operation produces. input_types: (Optional.) A list of `DType`s that will be the types of the tensors that the operation consumes. By default, uses the base `DType` of each input in `inputs`. Operations that expect reference-typed inputs must specify `input_types` explicitly. name: (Optional.) A string name for the operation. If not specified, a name is generated based on `op_type`. attrs: (Optional.) A dictionary where the key is the attribute name (a string) and the value is the respective `attr` attribute of the `NodeDef` proto that will represent the operation (an `AttrValue` proto). op_def: (Optional.) The `OpDef` proto that describes the `op_type` that the operation will have. compute_shapes: (Optional.) If True, shape inference will be performed to compute the shapes of the outputs. compute_device: (Optional.) If True, device functions will be executed to compute the device property of the Operation. Raises: TypeError: if any of the inputs is not a `Tensor`. ValueError: if colocation conflicts with existing device assignment. Returns: An `Operation` object. """ self._check_not_finalized() for idx, a in enumerate(inputs): if not isinstance(a, Tensor): raise TypeError("Input #%d is not a tensor: %s" % (idx, a)) if name is None: name = op_type # If a names ends with a '/' it is a "name scope" and we use it as-is, # after removing the trailing '/'. if name and name[-1] == "/": name = _name_from_scope_name(name) else: name = self.unique_name(name) node_def = _NodeDef(op_type, name, device=None, attrs=attrs) input_ops = set([t.op for t in inputs]) control_inputs = self._control_dependencies_for_inputs(input_ops) # _create_op_helper mutates the new Operation. _lock ensures a Session.run # call cannot occur between creating and mutating the op. with self._lock: ret = Operation( node_def, self, inputs=inputs, output_types=dtypes, control_inputs=control_inputs, input_types=input_types, original_op=self._default_original_op, op_def=op_def) # Note: shapes are lazily computed with the C API enabled. # # TODO(skyewm): unlike in the original Python implementation, the C API # always computes shape information (even for function calls, which the # original Python shape inference code doesn't handle). Deprecate the # compute_shapes argument. if not _USE_C_API and compute_shapes: set_shape_and_handle_data_for_outputs(ret) self._create_op_helper(ret, compute_shapes=compute_shapes, compute_device=compute_device) return ret def _create_op_from_tf_operation(self, c_op, compute_device=True): """Creates an `Operation` in this graph from the supplied TF_Operation. This method is like create_op() except the new Operation is constructed using `c_op`. The returned Operation will have `c_op` as its _c_op field. This is used to create Operation objects around TF_Operations created indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile). This function does not call Operation._control_flow_post_processing or Graph._control_dependencies_for_inputs (since the inputs may not be available yet). The caller is responsible for calling these methods. Args: c_op: a wrapped TF_Operation compute_device: (Optional.) If True, device functions will be executed to compute the device property of the Operation. Returns: An `Operation` object. """ self._check_not_finalized() ret = Operation(c_op, self) # If a name_scope was created with ret.name but no nodes were created in it, # the name will still appear in _names_in_use even though the name hasn't # been used. This is ok, just leave _names_in_use as-is in this case. # TODO(skyewm): make the C API guarantee no name conflicts. if ret.name not in self._names_in_use: self._names_in_use[ret.name] = 1 self._create_op_helper(ret, compute_device=compute_device) return ret def _create_op_helper(self, op, compute_shapes=True, compute_device=True): """Common logic for creating an op in this graph.""" # TODO(b/XXXX): move to Operation.__init__ once _USE_C_API flag is removed. self._add_op(op) # Apply any additional attributes requested. Do not overwrite any existing # attributes. for key, value in self._attr_scope_map.items(): try: op.get_attr(key) except ValueError: if callable(value): value = value(op.node_def) if not isinstance(value, (type(None), attr_value_pb2.AttrValue)): raise TypeError( "Callable for scope map key '%s' must return either None or " "an AttrValue protocol buffer; but it returned: %s" % (key, value)) if value: op._set_attr(key, value) # pylint: disable=protected-access # Apply a kernel label if one has been specified for this op type. try: kernel_label = self._op_to_kernel_label_map[op.type] op._set_attr("_kernel", # pylint: disable=protected-access attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label))) except KeyError: pass # Apply the overriding op type for gradients if one has been specified for # this op type. try: mapped_op_type = self._gradient_override_map[op.type] op._set_attr("_gradient_op_type", # pylint: disable=protected-access attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type))) except KeyError: pass self._record_op_seen_by_control_dependencies(op) if compute_device: self._apply_device_functions(op) if self._colocation_stack: all_colocation_groups = [] for colocation_op in self._colocation_stack: all_colocation_groups.extend(colocation_op.colocation_groups()) if colocation_op.device: # Make this device match the device of the colocated op, to provide # consistency between the device and the colocation property. if (op.device and pydev.canonical_name(op.device) != pydev.canonical_name(colocation_op.device)): logging.warning("Tried to colocate %s with an op %s that had " "a different device: %s vs %s. Postponing " "error-checking until all devices are assigned.", op.name, colocation_op.name, op.device, colocation_op.device) else: op._set_device(colocation_op.device) # pylint: disable=protected-access all_colocation_groups = sorted(set(all_colocation_groups)) # pylint: disable=protected-access op._set_attr("_class", attr_value_pb2.AttrValue( list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups))) # pylint: enable=protected-access # Sets "container" attribute if # (1) self._container is not None # (2) "is_stateful" is set in OpDef # (3) "container" attribute is in OpDef # (4) "container" attribute is None # TODO(skyewm): remove op.op_def check when _USE_C_API is removed. if self._container and op.op_def and op.op_def.is_stateful: try: container_attr = op.get_attr("container") except ValueError: # "container" attribute is not in OpDef pass else: if not container_attr: op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access s=compat.as_bytes(self._container))) def _add_new_tf_operations(self, compute_devices=True): """Creates `Operations` in this graph for any new TF_Operations. This is useful for when TF_Operations are indirectly created by the C API outside of the Operation constructor (e.g. by TF_ImportGraphDef, TF_FinishWhile). This ensures there are corresponding Operations for all TF_Operations in the underlying TF_Graph. Args: compute_devices: (Optional.) If True, device functions will be executed to compute the device properties of each new Operation. Returns: A list of the new `Operation` objects. """ # Create all Operation objects before accessing their inputs since an op may # be created before its inputs. new_ops = [ self._create_op_from_tf_operation(c_op, compute_device=compute_devices) for c_op in c_api_util.new_tf_operations(self) ] # pylint: disable=protected-access for op in new_ops: # Operations created by the C API always retrieve shapes from the C API so # we preserve the shapes of ops created in import_graph_def (from the # "_output_shapes" attr of the imported NodeDef). if not _USE_C_SHAPES: _set_shape_and_handle_data_for_outputs_c_api(op) new_control_inputs = self._control_dependencies_for_inputs(op.inputs) op._add_control_inputs(new_control_inputs) op._control_flow_post_processing() # pylint: enable=protected-access return new_ops def as_graph_element(self, obj, allow_tensor=True, allow_operation=True): """Returns the object referred to by `obj`, as an `Operation` or `Tensor`. This function validates that `obj` represents an element of this graph, and gives an informative error message if it is not. This function is the canonical way to get/validate an object of one of the allowed types from an external argument reference in the Session API. This method may be called concurrently from multiple threads. Args: obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can also be any object with an `_as_graph_element()` method that returns a value of one of these types. allow_tensor: If true, `obj` may refer to a `Tensor`. allow_operation: If true, `obj` may refer to an `Operation`. Returns: The `Tensor` or `Operation` in the Graph corresponding to `obj`. Raises: TypeError: If `obj` is not a type we support attempting to convert to types. ValueError: If `obj` is of an appropriate type but invalid. For example, an invalid string. KeyError: If `obj` is not an object in the graph. """ if self._finalized: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) with self._lock: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) def _as_graph_element_locked(self, obj, allow_tensor, allow_operation): """See `Graph.as_graph_element()` for details.""" # The vast majority of this function is figuring # out what an API user might be doing wrong, so # that we can give helpful error messages. # # Ideally, it would be nice to split it up, but we # need context to generate nice error messages. if allow_tensor and allow_operation: types_str = "Tensor or Operation" elif allow_tensor: types_str = "Tensor" elif allow_operation: types_str = "Operation" else: raise ValueError("allow_tensor and allow_operation can't both be False.") temp_obj = _as_graph_element(obj) if temp_obj is not None: obj = temp_obj # If obj appears to be a name... if isinstance(obj, compat.bytes_or_text_types): name = compat.as_str(obj) if ":" in name and allow_tensor: # Looks like a Tensor name and can be a Tensor. try: op_name, out_n = name.split(":") out_n = int(out_n) except: raise ValueError("The name %s looks a like a Tensor name, but is " "not a valid one. Tensor names must be of the " "form \"<op_name>:<output_index>\"." % repr(name)) if op_name in self._nodes_by_name: op = self._nodes_by_name[op_name] else: raise KeyError("The name %s refers to a Tensor which does not " "exist. The operation, %s, does not exist in the " "graph." % (repr(name), repr(op_name))) try: return op.outputs[out_n] except: raise KeyError("The name %s refers to a Tensor which does not " "exist. The operation, %s, exists but only has " "%s outputs." % (repr(name), repr(op_name), len(op.outputs))) elif ":" in name and not allow_tensor: # Looks like a Tensor name but can't be a Tensor. raise ValueError("Name %s appears to refer to a Tensor, not a %s." % (repr(name), types_str)) elif ":" not in name and allow_operation: # Looks like an Operation name and can be an Operation. if name not in self._nodes_by_name: raise KeyError("The name %s refers to an Operation not in the " "graph." % repr(name)) return self._nodes_by_name[name] elif ":" not in name and not allow_operation: # Looks like an Operation name but can't be an Operation. if name in self._nodes_by_name: # Yep, it's an Operation name err_msg = ("The name %s refers to an Operation, not a %s." % (repr(name), types_str)) else: err_msg = ("The name %s looks like an (invalid) Operation name, " "not a %s." % (repr(name), types_str)) err_msg += (" Tensor names must be of the form " "\"<op_name>:<output_index>\".") raise ValueError(err_msg) elif isinstance(obj, Tensor) and allow_tensor: # Actually obj is just the object it's referring to. if obj.graph is not self: raise ValueError("Tensor %s is not an element of this graph." % obj) return obj elif isinstance(obj, Operation) and allow_operation: # Actually obj is just the object it's referring to. if obj.graph is not self: raise ValueError("Operation %s is not an element of this graph." % obj) return obj else: # We give up! raise TypeError("Can not convert a %s into a %s." % (type(obj).__name__, types_str)) def get_operations(self): """Return the list of operations in the graph. You can modify the operations in place, but modifications to the list such as inserts/delete have no effect on the list of operations known to the graph. This method may be called concurrently from multiple threads. Returns: A list of Operations. """ if self._finalized: return list(self._nodes_by_id.values()) with self._lock: return list(self._nodes_by_id.values()) def get_operation_by_name(self, name): """Returns the `Operation` with the given `name`. This method may be called concurrently from multiple threads. Args: name: The name of the `Operation` to return. Returns: The `Operation` with the given `name`. Raises: TypeError: If `name` is not a string. KeyError: If `name` does not correspond to an operation in this graph. """ if not isinstance(name, six.string_types): raise TypeError("Operation names are strings (or similar), not %s." % type(name).__name__) return self.as_graph_element(name, allow_tensor=False, allow_operation=True) def _get_operation_by_name_unsafe(self, name): """Returns the `Operation` with the given `name`. This is a internal unsafe version of get_operation_by_name. It skips many checks and does not have user friedly error messages but runs considerably faster. This method may be called concurrently from multiple threads. Args: name: The name of the `Operation` to return. Returns: The `Operation` with the given `name`. Raises: KeyError: If `name` does not correspond to an operation in this graph. """ if self._finalized: return self._nodes_by_name[name] with self._lock: return self._nodes_by_name[name] def _get_operation_by_tf_operation(self, tf_oper): op_name = c_api.TF_OperationName(tf_oper) return self._get_operation_by_name_unsafe(op_name) def get_tensor_by_name(self, name): """Returns the `Tensor` with the given `name`. This method may be called concurrently from multiple threads. Args: name: The name of the `Tensor` to return. Returns: The `Tensor` with the given `name`. Raises: TypeError: If `name` is not a string. KeyError: If `name` does not correspond to a tensor in this graph. """ # Names should be strings. if not isinstance(name, six.string_types): raise TypeError("Tensor names are strings (or similar), not %s." % type(name).__name__) return self.as_graph_element(name, allow_tensor=True, allow_operation=False) def _get_tensor_by_tf_output(self, tf_output): """Returns the `Tensor` representing `tf_output`. Note that there is only one such `Tensor`, i.e. multiple calls to this function with the same TF_Output value will always return the same `Tensor` object. Args: tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`). Returns: The `Tensor` that represents `tf_output`. """ op = self._get_operation_by_tf_operation(tf_output.oper) return op.outputs[tf_output.index] def _next_id(self): """Id for next Operation instance. Also increments the internal id.""" self._check_not_finalized() with self._lock: self._next_id_counter += 1 return self._next_id_counter @property def _last_id(self): return self._next_id_counter def _get_op_def(self, type): # pylint: disable=redefined-builtin """Returns the `OpDef` proto for `type`. `type` is a string.""" if self._c_graph: with c_api_util.tf_buffer() as buf: # pylint: disable=protected-access c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf) # pylint: enable=protected-access data = c_api.TF_GetBuffer(buf) op_def = op_def_pb2.OpDef() op_def.ParseFromString(compat.as_bytes(data)) return op_def else: return self._registered_ops[type] def as_default(self): """Returns a context manager that makes this `Graph` the default graph. This method should be used if you want to create multiple graphs in the same process. For convenience, a global default graph is provided, and all ops will be added to this graph if you do not create a new graph explicitly. Use this method with the `with` keyword to specify that ops created within the scope of a block should be added to this graph. The default graph is a property of the current thread. If you create a new thread, and wish to use the default graph in that thread, you must explicitly add a `with g.as_default():` in that thread's function. The following code examples are equivalent: ```python # 1. Using Graph.as_default(): g = tf.Graph() with g.as_default(): c = tf.constant(5.0
codeparrot/github-code-clean
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.tools', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## data-output-interface.h (module 'stats'): ns3::DataOutputCallback [class] module.add_class('DataOutputCallback', allow_subclassing=True, import_from_module='ns.stats') ## delay-jitter-estimation.h (module 'tools'): ns3::DelayJitterEstimation [class] module.add_class('DelayJitterEstimation') ## event-garbage-collector.h (module 'tools'): ns3::EventGarbageCollector [class] module.add_class('EventGarbageCollector') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## gnuplot.h (module 'tools'): ns3::Gnuplot [class] module.add_class('Gnuplot') ## gnuplot.h (module 'tools'): ns3::GnuplotCollection [class] module.add_class('GnuplotCollection') ## gnuplot.h (module 'tools'): ns3::GnuplotDataset [class] module.add_class('GnuplotDataset') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simulator.h (module 'core'): ns3::Simulator [class] module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core') ## data-calculator.h (module 'stats'): ns3::StatisticalSummary [class] module.add_class('StatisticalSummary', allow_subclassing=True, import_from_module='ns.stats') ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset [class] module.add_class('Gnuplot2dDataset', parent=root_module['ns3::GnuplotDataset']) ## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::Style [enumeration] module.add_enum('Style', ['LINES', 'POINTS', 'LINES_POINTS', 'DOTS', 'IMPULSES', 'STEPS', 'FSTEPS', 'HISTEPS'], outer_class=root_module['ns3::Gnuplot2dDataset']) ## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::ErrorBars [enumeration] module.add_enum('ErrorBars', ['NONE', 'X', 'Y', 'XY'], outer_class=root_module['ns3::Gnuplot2dDataset']) ## gnuplot.h (module 'tools'): ns3::Gnuplot2dFunction [class] module.add_class('Gnuplot2dFunction', parent=root_module['ns3::GnuplotDataset']) ## gnuplot.h (module 'tools'): ns3::Gnuplot3dDataset [class] module.add_class('Gnuplot3dDataset', parent=root_module['ns3::GnuplotDataset']) ## gnuplot.h (module 'tools'): ns3::Gnuplot3dFunction [class] module.add_class('Gnuplot3dFunction', parent=root_module['ns3::GnuplotDataset']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## data-calculator.h (module 'stats'): ns3::DataCalculator [class] module.add_class('DataCalculator', import_from_module='ns.stats', parent=root_module['ns3::Object']) ## data-output-interface.h (module 'stats'): ns3::DataOutputInterface [class] module.add_class('DataOutputInterface', import_from_module='ns.stats', parent=root_module['ns3::Object']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## basic-data-calculators.h (module 'stats'): ns3::MinMaxAvgTotalCalculator<double> [class] module.add_class('MinMaxAvgTotalCalculator', import_from_module='ns.stats', template_parameters=['double'], parent=[root_module['ns3::DataCalculator'], root_module['ns3::StatisticalSummary']]) ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## nstime.h (module 'core'): ns3::TimeChecker [class] module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_methods(root_module): register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3DataOutputCallback_methods(root_module, root_module['ns3::DataOutputCallback']) register_Ns3DelayJitterEstimation_methods(root_module, root_module['ns3::DelayJitterEstimation']) register_Ns3EventGarbageCollector_methods(root_module, root_module['ns3::EventGarbageCollector']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Gnuplot_methods(root_module, root_module['ns3::Gnuplot']) register_Ns3GnuplotCollection_methods(root_module, root_module['ns3::GnuplotCollection']) register_Ns3GnuplotDataset_methods(root_module, root_module['ns3::GnuplotDataset']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3StatisticalSummary_methods(root_module, root_module['ns3::StatisticalSummary']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Gnuplot2dDataset_methods(root_module, root_module['ns3::Gnuplot2dDataset']) register_Ns3Gnuplot2dFunction_methods(root_module, root_module['ns3::Gnuplot2dFunction']) register_Ns3Gnuplot3dDataset_methods(root_module, root_module['ns3::Gnuplot3dDataset']) register_Ns3Gnuplot3dFunction_methods(root_module, root_module['ns3::Gnuplot3dFunction']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3DataCalculator_methods(root_module, root_module['ns3::DataCalculator']) register_Ns3DataOutputInterface_methods(root_module, root_module['ns3::DataOutputInterface']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3MinMaxAvgTotalCalculator__Double_methods(root_module, root_module['ns3::MinMaxAvgTotalCalculator< double >']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'bool', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'bool', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function] cls.add_method('CreateFullCopy', 'ns3::Buffer', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function] cls.add_method('GetCurrentEndOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function] cls.add_method('GetCurrentStartOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3DataOutputCallback_methods(root_module, cls): ## data-output-interface.h (module 'stats'): ns3::DataOutputCallback::DataOutputCallback() [constructor] cls.add_constructor([]) ## data-output-interface.h (module 'stats'): ns3::DataOutputCallback::DataOutputCallback(ns3::DataOutputCallback const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataOutputCallback const &', 'arg0')]) ## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, int val) [member function] cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('int', 'val')], is_pure_virtual=True, is_virtual=True) ## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, uint32_t val) [member function] cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('uint32_t', 'val')], is_pure_virtual=True, is_virtual=True) ## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, double val) [member function] cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('double', 'val')], is_pure_virtual=True, is_virtual=True) ## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, std::string val) [member function] cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('std::string', 'val')], is_pure_virtual=True, is_virtual=True) ## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, ns3::Time val) [member function] cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('ns3::Time', 'val')], is_pure_virtual=True, is_virtual=True) ## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputStatistic(std::string key, std::string variable, ns3::StatisticalSummary const * statSum) [member function] cls.add_method('OutputStatistic', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('ns3::StatisticalSummary const *', 'statSum')], is_pure_virtual=True, is_virtual=True) return def register_Ns3DelayJitterEstimation_methods(root_module, cls): ## delay-jitter-estimation.h (module 'tools'): ns3::DelayJitterEstimation::DelayJitterEstimation(ns3::DelayJitterEstimation const & arg0) [copy constructor] cls.add_constructor([param('ns3::DelayJitterEstimation const &', 'arg0')]) ## delay-jitter-estimation.h (module 'tools'): ns3::DelayJitterEstimation::DelayJitterEstimation() [constructor] cls.add_constructor([]) ## delay-jitter-estimation.h (module 'tools'): ns3::Time ns3::DelayJitterEstimation::GetLastDelay() const [member function] cls.add_method('GetLastDelay', 'ns3::Time', [], is_const=True) ## delay-jitter-estimation.h (module 'tools'): uint64_t ns3::DelayJitterEstimation::GetLastJitter() const [member function] cls.add_method('GetLastJitter', 'uint64_t', [], is_const=True) ## delay-jitter-estimation.h (module 'tools'): static void ns3::DelayJitterEstimation::PrepareTx(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('PrepareTx', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')], is_static=True) ## delay-jitter-estimation.h (module 'tools'): void ns3::DelayJitterEstimation::RecordRx(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('RecordRx', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) return def register_Ns3EventGarbageCollector_methods(root_module, cls): ## event-garbage-collector.h (module 'tools'): ns3::EventGarbageCollector::EventGarbageCollector(ns3::EventGarbageCollector const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventGarbageCollector const &', 'arg0')]) ## event-garbage-collector.h (module 'tools'): ns3::EventGarbageCollector::EventGarbageCollector() [constructor] cls.add_constructor([]) ## event-garbage-collector.h (module 'tools'): void ns3::EventGarbageCollector::Track(ns3::EventId event) [member function] cls.add_method('Track', 'void', [param('ns3::EventId', 'event')]) return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3Gnuplot_methods(root_module, cls): ## gnuplot.h (module 'tools'): ns3::Gnuplot::Gnuplot(ns3::Gnuplot const & arg0) [copy constructor] cls.add_constructor([param('ns3::Gnuplot const &', 'arg0')]) ## gnuplot.h (module 'tools'): ns3::Gnuplot::Gnuplot(std::string const & outputFilename="", std::string const & title="") [constructor] cls.add_constructor([param('std::string const &', 'outputFilename', default_value='""'), param('std::string const &', 'title', default_value='""')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot::AddDataset(ns3::GnuplotDataset const & dataset) [member function] cls.add_method('AddDataset', 'void', [param('ns3::GnuplotDataset const &', 'dataset')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot::AppendExtra(std::string const & extra) [member function] cls.add_method('AppendExtra', 'void', [param('std::string const &', 'extra')]) ## gnuplot.h (module 'tools'): static std::string ns3::Gnuplot::DetectTerminal(std::string const & filename) [member function] cls.add_method('DetectTerminal', 'std::string', [param('std::string const &', 'filename')], is_static=True) ## gnuplot.h (module 'tools'): void ns3::Gnuplot::GenerateOutput(std::ostream & os) const [member function] cls.add_method('GenerateOutput', 'void', [param('std::ostream &', 'os')], is_const=True) ## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetExtra(std::string const & extra) [member function] cls.add_method('SetExtra', 'void', [param('std::string const &', 'extra')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetLegend(std::string const & xLegend, std::string const & yLegend) [member function] cls.add_method('SetLegend', 'void', [param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetTerminal(std::string const & terminal) [member function] cls.add_method('SetTerminal', 'void', [param('std::string const &', 'terminal')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetTitle(std::string const & title) [member function] cls.add_method('SetTitle', 'void', [param('std::string const &', 'title')]) return def register_Ns3GnuplotCollection_methods(root_module, cls): ## gnuplot.h (module 'tools'): ns3::GnuplotCollection::GnuplotCollection(ns3::GnuplotCollection const & arg0) [copy constructor] cls.add_constructor([param('ns3::GnuplotCollection const &', 'arg0')]) ## gnuplot.h (module 'tools'): ns3::GnuplotCollection::GnuplotCollection(std::string const & outputFilename) [constructor] cls.add_constructor([param('std::string const &', 'outputFilename')]) ## gnuplot.h (module 'tools'): void ns3::GnuplotCollection::AddPlot(ns3::Gnuplot const & plot) [member function] cls.add_method('AddPlot', 'void', [param('ns3::Gnuplot const &', 'plot')]) ## gnuplot.h (module 'tools'): void ns3::GnuplotCollection::GenerateOutput(std::ostream & os) const [member function] cls.add_method('GenerateOutput', 'void', [param('std::ostream &', 'os')], is_const=True) ## gnuplot.h (module 'tools'): ns3::Gnuplot & ns3::GnuplotCollection::GetPlot(unsigned int id) [member function] cls.add_method('GetPlot', 'ns3::Gnuplot &', [param('unsigned int', 'id')]) ## gnuplot.h (module 'tools'): void ns3::GnuplotCollection::SetTerminal(std::string const & terminal) [member function] cls.add_method('SetTerminal', 'void', [param('std::string const &', 'terminal')]) return def register_Ns3GnuplotDataset_methods(root_module, cls): ## gnuplot.h (module 'tools'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset const & original) [copy constructor] cls.add_constructor([param('ns3::GnuplotDataset const &', 'original')]) ## gnuplot.h (module 'tools'): static void ns3::GnuplotDataset::SetDefaultExtra(std::string const & extra) [member function] cls.add_method('SetDefaultExtra', 'void', [param('std::string const &', 'extra')], is_static=True) ## gnuplot.h (module 'tools'): void ns3::GnuplotDataset::SetExtra(std::string const & extra) [member function] cls.add_method('SetExtra', 'void', [param('std::string const &', 'extra')]) ## gnuplot.h (module 'tools'): void ns3::GnuplotDataset::SetTitle(std::string const & title) [member function] cls.add_method('SetTitle', 'void', [param('std::string const &', 'title')]) ## gnuplot.h (module 'tools'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset::Data * data) [constructor] cls.add_constructor([param('ns3::GnuplotDataset::Data *', 'data')], visibility='protected') return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Simulator_methods(root_module, cls): ## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Simulator const &', 'arg0')]) ## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function] cls.add_method('Cancel', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function] cls.add_method('Destroy', 'void', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function] cls.add_method('GetContext', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function] cls.add_method('GetImplementation', 'ns3::Ptr< ns3::SimulatorImpl >', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function] cls.add_method('GetMaximumSimulationTime', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function] cls.add_method('IsExpired', 'bool', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function] cls.add_method('IsFinished', 'bool', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function] cls.add_method('Now', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function] cls.add_method('Remove', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function] cls.add_method('SetImplementation', 'void', [param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function] cls.add_method('SetScheduler', 'void', [param('ns3::ObjectFactory', 'schedulerFactory')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function] cls.add_method('Stop', 'void', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function] cls.add_method('Stop', 'void', [param('ns3::Time const &', 'time')], is_static=True) return def register_Ns3StatisticalSummary_methods(root_module, cls): ## data-calculator.h (module 'stats'): ns3::StatisticalSummary::StatisticalSummary() [constructor] cls.add_constructor([]) ## data-calculator.h (module 'stats'): ns3::StatisticalSummary::StatisticalSummary(ns3::StatisticalSummary const & arg0) [copy constructor] cls.add_constructor([param('ns3::StatisticalSummary const &', 'arg0')]) ## data-calculator.h (module 'stats'): long int ns3::StatisticalSummary::getCount() const [member function] cls.add_method('getCount', 'long int', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMax() const [member function] cls.add_method('getMax', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMean() const [member function] cls.add_method('getMean', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMin() const [member function] cls.add_method('getMin', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getSqrSum() const [member function] cls.add_method('getSqrSum', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getStddev() const [member function] cls.add_method('getStddev', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getSum() const [member function] cls.add_method('getSum', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getVariance() const [member function] cls.add_method('getVariance', 'double', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Gnuplot2dDataset_methods(root_module, cls): ## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::Gnuplot2dDataset(ns3::Gnuplot2dDataset const & arg0) [copy constructor] cls.add_constructor([param('ns3::Gnuplot2dDataset const &', 'arg0')]) ## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::Gnuplot2dDataset(std::string const & title="Untitled") [constructor] cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::Add(double x, double y) [member function] cls.add_method('Add', 'void', [param('double', 'x'), param('double', 'y')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::Add(double x, double y, double errorDelta) [member function] cls.add_method('Add', 'void', [param('double', 'x'), param('double', 'y'), param('double', 'errorDelta')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::Add(double x, double y, double xErrorDelta, double yErrorDelta) [member function] cls.add_method('Add', 'void', [param('double', 'x'), param('double', 'y'), param('double', 'xErrorDelta'), param('double', 'yErrorDelta')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::AddEmptyLine() [member function] cls.add_method('AddEmptyLine', 'void', []) ## gnuplot.h (module 'tools'): static void ns3::Gnuplot2dDataset::SetDefaultErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function] cls.add_method('SetDefaultErrorBars', 'void', [param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')], is_static=True) ## gnuplot.h (module 'tools'): static void ns3::Gnuplot2dDataset::SetDefaultStyle(ns3::Gnuplot2dDataset::Style style) [member function] cls.add_method('SetDefaultStyle', 'void', [param('ns3::Gnuplot2dDataset::Style', 'style')], is_static=True) ## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::SetErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function] cls.add_method('SetErrorBars', 'void', [param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::SetStyle(ns3::Gnuplot2dDataset::Style style) [member function] cls.add_method('SetStyle', 'void', [param('ns3::Gnuplot2dDataset::Style', 'style')]) return def register_Ns3Gnuplot2dFunction_methods(root_module, cls): ## gnuplot.h (module 'tools'): ns3::Gnuplot2dFunction::Gnuplot2dFunction(ns3::Gnuplot2dFunction const & arg0) [copy constructor] cls.add_constructor([param('ns3::Gnuplot2dFunction const &', 'arg0')]) ## gnuplot.h (module 'tools'): ns3::Gnuplot2dFunction::Gnuplot2dFunction(std::string const & title="Untitled", std::string const & function="") [constructor] cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"'), param('std::string const &', 'function', default_value='""')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot2dFunction::SetFunction(std::string const & function) [member function] cls.add_method('SetFunction', 'void', [param('std::string const &', 'function')]) return def register_Ns3Gnuplot3dDataset_methods(root_module, cls): ## gnuplot.h (module 'tools'): ns3::Gnuplot3dDataset::Gnuplot3dDataset(ns3::Gnuplot3dDataset const & arg0) [copy constructor] cls.add_constructor([param('ns3::Gnuplot3dDataset const &', 'arg0')]) ## gnuplot.h (module 'tools'): ns3::Gnuplot3dDataset::Gnuplot3dDataset(std::string const & title="Untitled") [constructor] cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot3dDataset::Add(double x, double y, double z) [member function] cls.add_method('Add', 'void', [param('double', 'x'), param('double', 'y'), param('double', 'z')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot3dDataset::AddEmptyLine() [member function] cls.add_method('AddEmptyLine', 'void', []) ## gnuplot.h (module 'tools'): static void ns3::Gnuplot3dDataset::SetDefaultStyle(std::string const & style) [member function] cls.add_method('SetDefaultStyle', 'void', [param('std::string const &', 'style')], is_static=True) ## gnuplot.h (module 'tools'): void ns3::Gnuplot3dDataset::SetStyle(std::string const & style) [member function] cls.add_method('SetStyle', 'void', [param('std::string const &', 'style')]) return def register_Ns3Gnuplot3dFunction_methods(root_module, cls): ## gnuplot.h (module 'tools'): ns3::Gnuplot3dFunction::Gnuplot3dFunction(ns3::Gnuplot3dFunction const & arg0) [copy constructor] cls.add_constructor([param('ns3::Gnuplot3dFunction const &', 'arg0')]) ## gnuplot.h (module 'tools'): ns3::Gnuplot3dFunction::Gnuplot3dFunction(std::string const & title="Untitled", std::string const & function="") [constructor] cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"'), param('std::string const &', 'function', default_value='""')]) ## gnuplot.h (module 'tools'): void ns3::Gnuplot3dFunction::SetFunction(std::string const & function) [member function] cls.add_method('SetFunction', 'void', [param('std::string const &', 'function')]) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator',
codeparrot/github-code-clean
# -*- coding: utf-8 -*- # PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: # https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code from ccxt.base.exchange import Exchange import json from ccxt.base.errors import ExchangeError from ccxt.base.errors import AuthenticationError from ccxt.base.errors import BadSymbol from ccxt.base.errors import InsufficientFunds from ccxt.base.errors import OrderNotFound from ccxt.base.decimal_to_precision import TICK_SIZE from ccxt.base.precise import Precise class ndax(Exchange): def describe(self): return self.deep_extend(super(ndax, self).describe(), { 'id': 'ndax', 'name': 'NDAX', 'countries': ['CA'], # Canada 'rateLimit': 1000, 'pro': True, 'has': { 'CORS': None, 'spot': True, 'margin': False, 'swap': False, 'future': False, 'option': False, 'addMargin': False, 'cancelAllOrders': True, 'cancelOrder': True, 'createDepositAddress': True, 'createOrder': True, 'createReduceOnlyOrder': False, 'editOrder': True, 'fetchAccounts': True, 'fetchBalance': True, 'fetchBorrowRate': False, 'fetchBorrowRateHistories': False, 'fetchBorrowRateHistory': False, 'fetchBorrowRates': False, 'fetchBorrowRatesPerSymbol': False, 'fetchCurrencies': True, 'fetchDepositAddress': True, 'fetchDeposits': True, 'fetchFundingHistory': False, 'fetchFundingRate': False, 'fetchFundingRateHistory': False, 'fetchFundingRates': False, 'fetchIndexOHLCV': False, 'fetchIsolatedPositions': False, 'fetchLedger': True, 'fetchLeverage': False, 'fetchLeverageTiers': False, 'fetchMarkets': True, 'fetchMarkOHLCV': False, 'fetchMyTrades': True, 'fetchOHLCV': True, 'fetchOpenOrders': True, 'fetchOrder': True, 'fetchOrderBook': True, 'fetchOrders': True, 'fetchOrderTrades': True, 'fetchPosition': False, 'fetchPositions': False, 'fetchPositionsRisk': False, 'fetchPremiumIndexOHLCV': False, 'fetchTicker': True, 'fetchTrades': True, 'fetchWithdrawals': True, 'reduceMargin': False, 'setLeverage': False, 'setMarginMode': False, 'setPositionMode': False, 'signIn': True, 'withdraw': True, }, 'timeframes': { '1m': '60', '5m': '300', '15m': '900', '30m': '1800', '1h': '3600', '2h': '7200', '4h': '14400', '6h': '21600', '12h': '43200', '1d': '86400', '1w': '604800', '1M': '2419200', '4M': '9676800', }, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/108623144-67a3ef00-744e-11eb-8140-75c6b851e945.jpg', 'test': { 'public': 'https://ndaxmarginstaging.cdnhop.net:8443/AP', 'private': 'https://ndaxmarginstaging.cdnhop.net:8443/AP', }, 'api': { 'public': 'https://api.ndax.io:8443/AP', 'private': 'https://api.ndax.io:8443/AP', }, 'www': 'https://ndax.io', 'doc': [ 'https://apidoc.ndax.io/', ], 'fees': 'https://ndax.io/fees', 'referral': 'https://one.ndax.io/bfQiSL', }, 'api': { 'public': { 'get': [ 'Activate2FA', 'Authenticate2FA', 'AuthenticateUser', 'GetL2Snapshot', 'GetLevel1', 'GetValidate2FARequiredEndpoints', 'LogOut', 'GetTickerHistory', 'GetProduct', 'GetProducts', 'GetInstrument', 'GetInstruments', 'Ping', 'trades', # undocumented 'GetLastTrades', # undocumented 'SubscribeLevel1', 'SubscribeLevel2', 'SubscribeTicker', 'SubscribeTrades', 'SubscribeBlockTrades', 'UnsubscribeBlockTrades', 'UnsubscribeLevel1', 'UnsubscribeLevel2', 'UnsubscribeTicker', 'UnsubscribeTrades', 'Authenticate', # undocumented ], }, 'private': { 'get': [ 'GetUserAccountInfos', 'GetUserAccounts', 'GetUserAffiliateCount', 'GetUserAffiliateTag', 'GetUserConfig', 'GetAllUnredactedUserConfigsForUser', 'GetUnredactedUserConfigByKey', 'GetUserDevices', 'GetUserReportTickets', 'GetUserReportWriterResultRecords', 'GetAccountInfo', 'GetAccountPositions', 'GetAllAccountConfigs', 'GetTreasuryProductsForAccount', 'GetAccountTrades', 'GetAccountTransactions', 'GetOpenTradeReports', 'GetAllOpenTradeReports', 'GetTradesHistory', 'GetOpenOrders', 'GetOpenQuotes', 'GetOrderFee', 'GetOrderHistory', 'GetOrdersHistory', 'GetOrderStatus', 'GetOmsFeeTiers', 'GetAccountDepositTransactions', 'GetAccountWithdrawTransactions', 'GetAllDepositRequestInfoTemplates', 'GetDepositInfo', 'GetDepositRequestInfoTemplate', 'GetDeposits', 'GetDepositTicket', 'GetDepositTickets', 'GetOMSWithdrawFees', 'GetWithdrawFee', 'GetWithdraws', 'GetWithdrawTemplate', 'GetWithdrawTemplateTypes', 'GetWithdrawTicket', 'GetWithdrawTickets', ], 'post': [ 'AddUserAffiliateTag', 'CancelUserReport', 'RegisterNewDevice', 'SubscribeAccountEvents', 'UpdateUserAffiliateTag', 'GenerateTradeActivityReport', 'GenerateTransactionActivityReport', 'GenerateTreasuryActivityReport', 'ScheduleTradeActivityReport', 'ScheduleTransactionActivityReport', 'ScheduleTreasuryActivityReport', 'CancelAllOrders', 'CancelOrder', 'CancelQuote', 'CancelReplaceOrder', 'CreateQuote', 'ModifyOrder', 'SendOrder', 'SubmitBlockTrade', 'UpdateQuote', 'CancelWithdraw', 'CreateDepositTicket', 'CreateWithdrawTicket', 'SubmitDepositTicketComment', 'SubmitWithdrawTicketComment', 'GetOrderHistoryByOrderId', ], }, }, 'fees': { 'trading': { 'tierBased': False, 'percentage': True, 'maker': 0.2 / 100, 'taker': 0.25 / 100, }, }, 'requiredCredentials': { 'apiKey': True, 'secret': True, 'uid': True, # these credentials are required for signIn() and withdraw() 'login': True, 'password': True, # 'twofa': True, }, 'precisionMode': TICK_SIZE, 'exceptions': { 'exact': { 'Not_Enough_Funds': InsufficientFunds, # {"status":"Rejected","errormsg":"Not_Enough_Funds","errorcode":101} 'Server Error': ExchangeError, # {"result":false,"errormsg":"Server Error","errorcode":102,"detail":null} 'Resource Not Found': OrderNotFound, # {"result":false,"errormsg":"Resource Not Found","errorcode":104,"detail":null} }, 'broad': { 'Invalid InstrumentId': BadSymbol, # {"result":false,"errormsg":"Invalid InstrumentId: 10000","errorcode":100,"detail":null} 'This endpoint requires 2FACode along with the payload': AuthenticationError, }, }, 'options': { 'omsId': 1, 'orderTypes': { 'Market': 1, 'Limit': 2, 'StopMarket': 3, 'StopLimit': 4, 'TrailingStopMarket': 5, 'TrailingStopLimit': 6, 'BlockTrade': 7, }, }, }) def sign_in(self, params={}): self.check_required_credentials() if self.login is None or self.password is None: raise AuthenticationError(self.id + ' signIn() requires exchange.login, exchange.password') request = { 'grant_type': 'client_credentials', # the only supported value } response = self.publicGetAuthenticate(self.extend(request, params)) # # { # "Authenticated":true, # "Requires2FA":true, # "AuthType":"Google", # "AddtlInfo":"", # "Pending2FaToken": "6f5c4e66-f3ee-493e-9227-31cc0583b55f" # } # sessionToken = self.safe_string(response, 'SessionToken') if sessionToken is not None: self.options['sessionToken'] = sessionToken return response pending2faToken = self.safe_string(response, 'Pending2FaToken') if pending2faToken is not None: if self.twofa is None: raise AuthenticationError(self.id + ' signIn() requires exchange.twofa credentials') self.options['pending2faToken'] = pending2faToken request = { 'Code': self.oath(), } response = self.publicGetAuthenticate2FA(self.extend(request, params)) # # { # "Authenticated": True, # "UserId":57765, # "SessionToken":"4a2a5857-c4e5-4fac-b09e-2c4c30b591a0" # } # sessionToken = self.safe_string(response, 'SessionToken') self.options['sessionToken'] = sessionToken return response return response def fetch_currencies(self, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) request = { 'omsId': omsId, } response = self.publicGetGetProducts(self.extend(request, params)) # # [ # { # "OMSId":1, # "ProductId":1, # "Product":"BTC", # "ProductFullName":"Bitcoin", # "ProductType":"CryptoCurrency", # "DecimalPlaces":8, # "TickSize":0.0000000100000000000000000000, # "NoFees":false, # "IsDisabled":false, # "MarginEnabled":false # }, # ] # result = {} for i in range(0, len(response)): currency = response[i] id = self.safe_string(currency, 'ProductId') name = self.safe_string(currency, 'ProductFullName') type = self.safe_string(currency, 'ProductType') code = self.safe_currency_code(self.safe_string(currency, 'Product')) precision = self.safe_number(currency, 'TickSize') isDisabled = self.safe_value(currency, 'IsDisabled') active = not isDisabled result[code] = { 'id': id, 'name': name, 'code': code, 'type': type, 'precision': precision, 'info': currency, 'active': active, 'deposit': None, 'withdraw': None, 'fee': None, 'limits': { 'amount': { 'min': None, 'max': None, }, 'withdraw': { 'min': None, 'max': None, }, }, } return result def fetch_markets(self, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) request = { 'omsId': omsId, } response = self.publicGetGetInstruments(self.extend(request, params)) # # [ # { # "OMSId":1, # "InstrumentId":3, # "Symbol":"LTCBTC", # "Product1":3, # "Product1Symbol":"LTC", # "Product2":1, # "Product2Symbol":"BTC", # "InstrumentType":"Standard", # "VenueInstrumentId":3, # "VenueId":1, # "SortIndex":0, # "SessionStatus":"Running", # "PreviousSessionStatus":"Stopped", # "SessionStatusDateTime":"2020-11-25T19:42:15.245Z", # "SelfTradePrevention":true, # "QuantityIncrement":0.0000000100000000000000000000, # "PriceIncrement":0.0000000100000000000000000000, # "MinimumQuantity":0.0100000000000000000000000000, # "MinimumPrice":0.0000010000000000000000000000, # "VenueSymbol":"LTCBTC", # "IsDisable":false, # "MasterDataId":0, # "PriceCollarThreshold":0.0000000000000000000000000000, # "PriceCollarPercent":0.0000000000000000000000000000, # "PriceCollarEnabled":false, # "PriceFloorLimit":0.0000000000000000000000000000, # "PriceFloorLimitEnabled":false, # "PriceCeilingLimit":0.0000000000000000000000000000, # "PriceCeilingLimitEnabled":false, # "CreateWithMarketRunning":true, # "AllowOnlyMarketMakerCounterParty":false, # "PriceCollarIndexDifference":0.0000000000000000000000000000, # "PriceCollarConvertToOtcEnabled":false, # "PriceCollarConvertToOtcClientUserId":0, # "PriceCollarConvertToOtcAccountId":0, # "PriceCollarConvertToOtcThreshold":0.0000000000000000000000000000, # "OtcConvertSizeThreshold":0.0000000000000000000000000000, # "OtcConvertSizeEnabled":false, # "OtcTradesPublic":true, # "PriceTier":0 # }, # ] # result = [] for i in range(0, len(response)): market = response[i] id = self.safe_string(market, 'InstrumentId') # lowercaseId = self.safe_string_lower(market, 'symbol') baseId = self.safe_string(market, 'Product1') quoteId = self.safe_string(market, 'Product2') base = self.safe_currency_code(self.safe_string(market, 'Product1Symbol')) quote = self.safe_currency_code(self.safe_string(market, 'Product2Symbol')) sessionStatus = self.safe_string(market, 'SessionStatus') isDisable = self.safe_value(market, 'IsDisable') sessionRunning = (sessionStatus == 'Running') result.append({ 'id': id, 'symbol': base + '/' + quote, 'base': base, 'quote': quote, 'settle': None, 'baseId': baseId, 'quoteId': quoteId, 'settleId': None, 'type': 'spot', 'spot': True, 'margin': False, 'swap': False, 'future': False, 'option': False, 'active': (sessionRunning and not isDisable), 'contract': False, 'linear': None, 'inverse': None, 'contractSize': None, 'expiry': None, 'expiryDatetime': None, 'strike': None, 'optionType': None, 'precision': { 'amount': self.safe_number(market, 'QuantityIncrement'), 'price': self.safe_number(market, 'PriceIncrement'), }, 'limits': { 'leverage': { 'min': None, 'max': None, }, 'amount': { 'min': self.safe_number(market, 'MinimumQuantity'), 'max': None, }, 'price': { 'min': self.safe_number(market, 'MinimumPrice'), 'max': None, }, 'cost': { 'min': None, 'max': None, }, }, 'info': market, }) return result def parse_order_book(self, orderbook, symbol, timestamp=None, bidsKey='bids', asksKey='asks', priceKey=6, amountKey=8): nonce = None result = { 'symbol': symbol, 'bids': [], 'asks': [], 'timestamp': None, 'datetime': None, 'nonce': None, } for i in range(0, len(orderbook)): level = orderbook[i] if timestamp is None: timestamp = self.safe_integer(level, 2) else: newTimestamp = self.safe_integer(level, 2) timestamp = max(timestamp, newTimestamp) if nonce is None: nonce = self.safe_integer(level, 0) else: newNonce = self.safe_integer(level, 0) nonce = max(nonce, newNonce) bidask = self.parse_bid_ask(level, priceKey, amountKey) levelSide = self.safe_integer(level, 9) side = asksKey if levelSide else bidsKey result[side].append(bidask) result['bids'] = self.sort_by(result['bids'], 0, True) result['asks'] = self.sort_by(result['asks'], 0) result['timestamp'] = timestamp result['datetime'] = self.iso8601(timestamp) result['nonce'] = nonce return result def fetch_order_book(self, symbol, limit=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() market = self.market(symbol) limit = 100 if (limit is None) else limit # default 100 request = { 'omsId': omsId, 'InstrumentId': market['id'], 'Depth': limit, # default 100 } response = self.publicGetGetL2Snapshot(self.extend(request, params)) # # [ # [ # 0, # 0 MDUpdateId # 1, # 1 Number of Unique Accounts # 123, # 2 ActionDateTime in Posix format X 1000 # 0, # 3 ActionType 0(New), 1(Update), 2(Delete) # 0.0, # 4 LastTradePrice # 0, # 5 Number of Orders # 0.0, # 6 Price # 0, # 7 ProductPairCode # 0.0, # 8 Quantity # 0, # 9 Side # ], # [97244115,1,1607456142963,0,19069.32,1,19069.31,8,0.140095,0], # [97244115,0,1607456142963,0,19069.32,1,19068.64,8,0.0055,0], # [97244115,0,1607456142963,0,19069.32,1,19068.26,8,0.021291,0], # [97244115,1,1607456142964,0,19069.32,1,19069.32,8,0.099636,1], # [97244115,0,1607456142964,0,19069.32,1,19069.98,8,0.1,1], # [97244115,0,1607456142964,0,19069.32,1,19069.99,8,0.141604,1], # ] # return self.parse_order_book(response, symbol) def parse_ticker(self, ticker, market=None): # # fetchTicker # # { # "OMSId":1, # "InstrumentId":8, # "BestBid":19069.31, # "BestOffer":19069.32, # "LastTradedPx":19069.32, # "LastTradedQty":0.0001, # "LastTradeTime":1607040406424, # "SessionOpen":19069.32, # "SessionHigh":19069.32, # "SessionLow":19069.32, # "SessionClose":19069.32, # "Volume":0.0001, # "CurrentDayVolume":0.0001, # "CurrentDayNotional":1.906932, # "CurrentDayNumTrades":1, # "CurrentDayPxChange":0.00, # "Rolling24HrVolume":0.000000000000000000000000000, # "Rolling24HrNotional":0.00000000000000000000000, # "Rolling24NumTrades":0, # "Rolling24HrPxChange":0, # "TimeStamp":"1607040406425", # "BidQty":0, # "AskQty":0, # "BidOrderCt":0, # "AskOrderCt":0, # "Rolling24HrPxChangePercent":0, # } # timestamp = self.safe_integer(ticker, 'TimeStamp') marketId = self.safe_string(ticker, 'InstrumentId') market = self.safe_market(marketId, market) symbol = self.safe_symbol(marketId, market) last = self.safe_number(ticker, 'LastTradedPx') percentage = self.safe_number(ticker, 'Rolling24HrPxChangePercent') change = self.safe_number(ticker, 'Rolling24HrPxChange') open = self.safe_number(ticker, 'SessionOpen') baseVolume = self.safe_number(ticker, 'Rolling24HrVolume') quoteVolume = self.safe_number(ticker, 'Rolling24HrNotional') vwap = self.vwap(baseVolume, quoteVolume) return self.safe_ticker({ 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': self.safe_number(ticker, 'SessionHigh'), 'low': self.safe_number(ticker, 'SessionLow'), 'bid': self.safe_number(ticker, 'BestBid'), 'bidVolume': None, # self.safe_number(ticker, 'BidQty'), always shows 0 'ask': self.safe_number(ticker, 'BestOffer'), 'askVolume': None, # self.safe_number(ticker, 'AskQty'), always shows 0 'vwap': vwap, 'open': open, 'close': last, 'last': last, 'previousClose': None, 'change': change, 'percentage': percentage, 'average': None, 'baseVolume': baseVolume, 'quoteVolume': quoteVolume, 'info': ticker, }, market) def fetch_ticker(self, symbol, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() market = self.market(symbol) request = { 'omsId': omsId, 'InstrumentId': market['id'], } response = self.publicGetGetLevel1(self.extend(request, params)) # # { # "OMSId":1, # "InstrumentId":8, # "BestBid":19069.31, # "BestOffer":19069.32, # "LastTradedPx":19069.32, # "LastTradedQty":0.0001, # "LastTradeTime":1607040406424, # "SessionOpen":19069.32, # "SessionHigh":19069.32, # "SessionLow":19069.32, # "SessionClose":19069.32, # "Volume":0.0001, # "CurrentDayVolume":0.0001, # "CurrentDayNotional":1.906932, # "CurrentDayNumTrades":1, # "CurrentDayPxChange":0.00, # "Rolling24HrVolume":0.000000000000000000000000000, # "Rolling24HrNotional":0.00000000000000000000000, # "Rolling24NumTrades":0, # "Rolling24HrPxChange":0, # "TimeStamp":"1607040406425", # "BidQty":0, # "AskQty":0, # "BidOrderCt":0, # "AskOrderCt":0, # "Rolling24HrPxChangePercent":0, # } # return self.parse_ticker(response, market) def parse_ohlcv(self, ohlcv, market=None): # # [ # 1501603632000, # 0 DateTime # 2700.33, # 1 High # 2687.01, # 2 Low # 2687.01, # 3 Open # 2687.01, # 4 Close # 24.86100992, # 5 Volume # 0, # 6 Inside Bid Price # 2870.95, # 7 Inside Ask Price # 1 # 8 InstrumentId # ] # return [ self.safe_integer(ohlcv, 0), self.safe_number(ohlcv, 3), self.safe_number(ohlcv, 1), self.safe_number(ohlcv, 2), self.safe_number(ohlcv, 4), self.safe_number(ohlcv, 5), ] def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() market = self.market(symbol) request = { 'omsId': omsId, 'InstrumentId': market['id'], 'Interval': self.timeframes[timeframe], } duration = self.parse_timeframe(timeframe) now = self.milliseconds() if since is None: if limit is not None: request['FromDate'] = self.ymdhms(now - duration * limit * 1000) request['ToDate'] = self.ymdhms(now) else: request['FromDate'] = self.ymdhms(since) if limit is None: request['ToDate'] = self.ymdhms(now) else: request['ToDate'] = self.ymdhms(self.sum(since, duration * limit * 1000)) response = self.publicGetGetTickerHistory(self.extend(request, params)) # # [ # [1607299260000,19069.32,19069.32,19069.32,19069.32,0,19069.31,19069.32,8,1607299200000], # [1607299320000,19069.32,19069.32,19069.32,19069.32,0,19069.31,19069.32,8,1607299260000], # [1607299380000,19069.32,19069.32,19069.32,19069.32,0,19069.31,19069.32,8,1607299320000], # ] # return self.parse_ohlcvs(response, market, timeframe, since, limit) def parse_trade(self, trade, market=None): # # fetchTrades(public) # # [ # 6913253, # 0 TradeId # 8, # 1 ProductPairCode # 0.03340802, # 2 Quantity # 19116.08, # 3 Price # 2543425077, # 4 Order1 # 2543425482, # 5 Order2 # 1606935922416, # 6 Tradetime # 0, # 7 Direction # 1, # 8 TakerSide # 0, # 9 BlockTrade # 0, # 10 Either Order1ClientId or Order2ClientId # ] # # fetchMyTrades(private) # # { # "OMSId":1, # "ExecutionId":16916567, # "TradeId":14476351, # "OrderId":2543565231, # "AccountId":449, # "AccountName":"igor@ccxt.trade", # "SubAccountId":0, # "ClientOrderId":0, # "InstrumentId":8, # "Side":"Sell", # "OrderType":"Market", # "Quantity":0.1230000000000000000000000000, # "RemainingQuantity":0.0000000000000000000000000000, # "Price":19069.310000000000000000000000, # "Value":2345.5251300000000000000000000, # "CounterParty":"7", # "OrderTradeRevision":1, # "Direction":"NoChange", # "IsBlockTrade":false, # "Fee":1.1727625650000000000000000000, # "FeeProductId":8, # "OrderOriginator":446, # "UserName":"igor@ccxt.trade", # "TradeTimeMS":1607565031569, # "MakerTaker":"Taker", # "AdapterTradeId":0, # "InsideBid":19069.310000000000000000000000, # "InsideBidSize":0.2400950000000000000000000000, # "InsideAsk":19069.320000000000000000000000, # "InsideAskSize":0.0997360000000000000000000000, # "IsQuote":false, # "CounterPartyClientUserId":1, # "NotionalProductId":2, # "NotionalRate":1.0000000000000000000000000000, # "NotionalValue":2345.5251300000000000000000000, # "NotionalHoldAmount":0, # "TradeTime":637431618315686826 # } # # fetchOrderTrades # # { # "Side":"Sell", # "OrderId":2543565235, # "Price":18600.000000000000000000000000, # "Quantity":0.0000000000000000000000000000, # "DisplayQuantity":0.0000000000000000000000000000, # "Instrument":8, # "Account":449, # "AccountName":"igor@ccxt.trade", # "OrderType":"Limit", # "ClientOrderId":0, # "OrderState":"FullyExecuted", # "ReceiveTime":1607585844956, # "ReceiveTimeTicks":637431826449564182, # "LastUpdatedTime":1607585844959, # "LastUpdatedTimeTicks":637431826449593893, # "OrigQuantity":0.1230000000000000000000000000, # "QuantityExecuted":0.1230000000000000000000000000, # "GrossValueExecuted":2345.3947500000000000000000000, # "ExecutableValue":0.0000000000000000000000000000, # "AvgPrice":19068.250000000000000000000000, # "CounterPartyId":0, # "ChangeReason":"Trade", # "OrigOrderId":2543565235, # "OrigClOrdId":0, # "EnteredBy":446, # "UserName":"igor@ccxt.trade", # "IsQuote":false, # "InsideAsk":19069.320000000000000000000000, # "InsideAskSize":0.0997360000000000000000000000, # "InsideBid":19068.250000000000000000000000, # "InsideBidSize":1.3300010000000000000000000000, # "LastTradePrice":19068.250000000000000000000000, # "RejectReason":"", # "IsLockedIn":false, # "CancelReason":"", # "OrderFlag":"0", # "UseMargin":false, # "StopPrice":0.0000000000000000000000000000, # "PegPriceType":"Unknown", # "PegOffset":0.0000000000000000000000000000, # "PegLimitOffset":0.0000000000000000000000000000, # "IpAddress":"x.x.x.x", # "ClientOrderIdUuid":null, # "OMSId":1 # } # priceString = None amountString = None cost = None timestamp = None id = None marketId = None side = None orderId = None takerOrMaker = None fee = None type = None if isinstance(trade, list): priceString = self.safe_string(trade, 3) amountString = self.safe_string(trade, 2) timestamp = self.safe_integer(trade, 6) id = self.safe_string(trade, 0) marketId = self.safe_string(trade, 1) takerSide = self.safe_value(trade, 8) side = 'sell' if takerSide else 'buy' orderId = self.safe_string(trade, 4) else: timestamp = self.safe_integer_2(trade, 'TradeTimeMS', 'ReceiveTime') id = self.safe_string(trade, 'TradeId') orderId = self.safe_string_2(trade, 'OrderId', 'OrigOrderId') marketId = self.safe_string_2(trade, 'InstrumentId', 'Instrument') priceString = self.safe_string(trade, 'Price') amountString = self.safe_string(trade, 'Quantity') cost = self.safe_number_2(trade, 'Value', 'GrossValueExecuted') takerOrMaker = self.safe_string_lower(trade, 'MakerTaker') side = self.safe_string_lower(trade, 'Side') type = self.safe_string_lower(trade, 'OrderType') feeCost = self.safe_number(trade, 'Fee') if feeCost is not None: feeCurrencyId = self.safe_string(trade, 'FeeProductId') feeCurrencyCode = self.safe_currency_code(feeCurrencyId) fee = { 'cost': feeCost, 'currency': feeCurrencyCode, } price = self.parse_number(priceString) amount = self.parse_number(amountString) if cost is None: cost = self.parse_number(Precise.string_mul(priceString, amountString)) symbol = self.safe_symbol(marketId, market) return { 'info': trade, 'id': id, 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'order': orderId, 'type': type, 'side': side, 'takerOrMaker': takerOrMaker, 'price': price, 'amount': amount, 'cost': cost, 'fee': fee, } def fetch_trades(self, symbol, since=None, limit=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() market = self.market(symbol) request = { 'omsId': omsId, 'InstrumentId': market['id'], } if limit is not None: request['Count'] = limit response = self.publicGetGetLastTrades(self.extend(request, params)) # # [ # [6913253,8,0.03340802,19116.08,2543425077,2543425482,1606935922416,0,1,0,0], # [6913254,8,0.01391671,19117.42,2543427510,2543427811,1606935927998,1,1,0,0], # [6913255,8,0.000006,19107.81,2543430495,2543430793,1606935933881,2,0,0,0], # ] # return self.parse_trades(response, market, since, limit) def fetch_accounts(self, params={}): if not self.login: raise AuthenticationError(self.id + ' fetchAccounts() requires exchange.login email credential') omsId = self.safe_integer(self.options, 'omsId', 1) self.check_required_credentials() request = { 'omsId': omsId, 'UserId': self.uid, 'UserName': self.login, } response = self.privateGetGetUserAccounts(self.extend(request, params)) # # [449] # comma-separated list of account ids # result = [] for i in range(0, len(response)): accountId = self.safe_string(response, i) result.append({ 'id': accountId, 'type': None, 'currency': None, 'info': accountId, }) return result def parse_balance(self, response): result = { 'info': response, 'timestamp': None, 'datetime': None, } for i in range(0, len(response)): balance = response[i] currencyId = self.safe_string(balance, 'ProductId') if currencyId in self.currencies_by_id: code = self.safe_currency_code(currencyId) account = self.account() account['total'] = self.safe_string(balance, 'Amount') account['used'] = self.safe_string(balance, 'Hold') result[code] = account return self.safe_balance(result) def fetch_balance(self, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) params = self.omit(params, ['accountId', 'AccountId']) request = { 'omsId': omsId, 'AccountId': accountId, } response = self.privateGetGetAccountPositions(self.extend(request, params)) # # [ # { # "OMSId":1, # "AccountId":449, # "ProductSymbol":"BTC", # "ProductId":1, # "Amount":10.000000000000000000000000000, # "Hold":0, # "PendingDeposits":0.0000000000000000000000000000, # "PendingWithdraws":0.0000000000000000000000000000, # "TotalDayDeposits":10.000000000000000000000000000, # "TotalMonthDeposits":10.000000000000000000000000000, # "TotalYearDeposits":10.000000000000000000000000000, # "TotalDayDepositNotional":10.000000000000000000000000000, # "TotalMonthDepositNotional":10.000000000000000000000000000, # "TotalYearDepositNotional":10.000000000000000000000000000, # "TotalDayWithdraws":0, # "TotalMonthWithdraws":0, # "TotalYearWithdraws":0, # "TotalDayWithdrawNotional":0, # "TotalMonthWithdrawNotional":0, # "TotalYearWithdrawNotional":0, # "NotionalProductId":8, # "NotionalProductSymbol":"USDT", # "NotionalValue":10.000000000000000000000000000, # "NotionalHoldAmount":0, # "NotionalRate":1 # }, # ] # return self.parse_balance(response) def parse_ledger_entry_type(self, type): types = { 'Trade': 'trade', 'Deposit': 'transaction', 'Withdraw': 'transaction', 'Transfer': 'transfer', 'OrderHold': 'trade', 'WithdrawHold': 'transaction', 'DepositHold': 'transaction', 'MarginHold': 'trade', 'ManualHold': 'trade', 'ManualEntry': 'trade', 'MarginAcquisition': 'trade', 'MarginRelinquish': 'trade', 'MarginQuoteHold': 'trade', } return self.safe_string(types, type, type) def parse_ledger_entry(self, item, currency=None): # # { # "TransactionId":2663709493, # "ReferenceId":68, # "OMSId":1, # "AccountId":449, # "CR":10.000000000000000000000000000, # "DR":0.0000000000000000000000000000, # "Counterparty":3, # "TransactionType":"Other", # "ReferenceType":"Deposit", # "ProductId":1, # "Balance":10.000000000000000000000000000, # "TimeStamp":1607532331591 # } # id = self.safe_string(item, 'TransactionId') account = self.safe_string(item, 'AccountId') referenceId = self.safe_string(item, 'ReferenceId') referenceAccount = self.safe_string(item, 'Counterparty') type = self.parse_ledger_entry_type(self.safe_string(item, 'ReferenceType')) currencyId = self.safe_string(item, 'ProductId') code = self.safe_currency_code(currencyId, currency) credit = self.safe_number(item, 'CR') debit = self.safe_number(item, 'DR') amount = None direction = None if credit > 0: amount = credit direction = 'in' elif debit > 0: amount = debit direction = 'out' timestamp = self.safe_integer(item, 'TimeStamp') before = None after = self.safe_number(item, 'Balance') if direction == 'out': before = self.sum(after, amount) elif direction == 'in': before = max(0, after - amount) status = 'ok' return { 'info': item, 'id': id, 'direction': direction, 'account': account, 'referenceId': referenceId, 'referenceAccount': referenceAccount, 'type': type, 'currency': code, 'amount': amount, 'before': before, 'after': after, 'status': status, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'fee': None, } def fetch_ledger(self, code=None, since=None, limit=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) params = self.omit(params, ['accountId', 'AccountId']) request = { 'omsId': omsId, 'AccountId': accountId, } if limit is not None: request['Depth'] = limit response = self.privateGetGetAccountTransactions(self.extend(request, params)) # # [ # { # "TransactionId":2663709493, # "ReferenceId":68, # "OMSId":1, # "AccountId":449, # "CR":10.000000000000000000000000000, # "DR":0.0000000000000000000000000000, # "Counterparty":3, # "TransactionType":"Other", # "ReferenceType":"Deposit", # "ProductId":1, # "Balance":10.000000000000000000000000000, # "TimeStamp":1607532331591 # }, # ] # currency = None if code is not None: currency = self.currency(code) return self.parse_ledger(response, currency, since, limit) def parse_order_status(self, status): statuses = { 'Accepted': 'open', 'Rejected': 'rejected', 'Working': 'open', 'Canceled': 'canceled', 'Expired': 'expired', 'FullyExecuted': 'closed', } return self.safe_string(statuses, status, status) def parse_order(self, order, market=None): # # createOrder # # { # "status":"Accepted", # "errormsg":"", # "OrderId": 2543565231 # } # # editOrder # # { # "ReplacementOrderId": 1234, # "ReplacementClOrdId": 1561, # "OrigOrderId": 5678, # "OrigClOrdId": 91011, # } # # fetchOpenOrders, fetchClosedOrders # # { # "Side":"Buy", # "OrderId":2543565233, # "Price":19010, # "Quantity":0.345, # "DisplayQuantity":0.345, # "Instrument":8, # "Account":449, # "AccountName":"igor@ccxt.trade", # "OrderType":"Limit", # "ClientOrderId":0, # "OrderState":"Working", # "ReceiveTime":1607579326003, # "ReceiveTimeTicks":637431761260028981, # "LastUpdatedTime":1607579326005, # "LastUpdatedTimeTicks":637431761260054714, # "OrigQuantity":0.345, # "QuantityExecuted":0, # "GrossValueExecuted":0, # "ExecutableValue":0, # "AvgPrice":0, # "CounterPartyId":0, # "ChangeReason":"NewInputAccepted", # "OrigOrderId":2543565233, # "OrigClOrdId":0, # "EnteredBy":446, # "UserName":"igor@ccxt.trade", # "IsQuote":false, # "InsideAsk":19069.32, # "InsideAskSize":0.099736, # "InsideBid":19068.25, # "InsideBidSize":1.330001, # "LastTradePrice":19068.25, # "RejectReason":"", # "IsLockedIn":false, # "CancelReason":"", # "OrderFlag":"AddedToBook", # "UseMargin":false, # "StopPrice":0, # "PegPriceType":"Unknown", # "PegOffset":0, # "PegLimitOffset":0, # "IpAddress":null, # "ClientOrderIdUuid":null, # "OMSId":1 # } # id = self.safe_string_2(order, 'ReplacementOrderId', 'OrderId') timestamp = self.safe_integer(order, 'ReceiveTime') lastTradeTimestamp = self.safe_integer(order, 'LastUpdatedTime') marketId = self.safe_string(order, 'Instrument') symbol = self.safe_symbol(marketId, market) side = self.safe_string_lower(order, 'Side') type = self.safe_string_lower(order, 'OrderType') clientOrderId = self.safe_string_2(order, 'ReplacementClOrdId', 'ClientOrderId') price = self.safe_string(order, 'Price') amount = self.safe_string(order, 'OrigQuantity') filled = self.safe_string(order, 'QuantityExecuted') cost = self.safe_string(order, 'GrossValueExecuted') average = self.safe_string(order, 'AvgPrice') stopPrice = self.parse_number(self.omit_zero(self.safe_string(order, 'StopPrice'))) status = self.parse_order_status(self.safe_string(order, 'OrderState')) return self.safe_order({ 'id': id, 'clientOrderId': clientOrderId, 'info': order, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'lastTradeTimestamp': lastTradeTimestamp, 'status': status, 'symbol': symbol, 'type': type, 'timeInForce': None, 'postOnly': None, 'side': side, 'price': price, 'stopPrice': stopPrice, 'cost': cost, 'amount': amount, 'filled': filled, 'average': average, 'remaining': None, 'fee': None, 'trades': None, }, market) def create_order(self, symbol, type, side, amount, price=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) clientOrderId = self.safe_integer_2(params, 'ClientOrderId', 'clientOrderId') params = self.omit(params, ['accountId', 'AccountId', 'clientOrderId', 'ClientOrderId']) market = self.market(symbol) orderSide = 0 if (side == 'buy') else 1 request = { 'InstrumentId': int(market['id']), 'omsId': omsId, 'AccountId': accountId, 'TimeInForce': 1, # 0 Unknown, 1 GTC by default, 2 OPG execute as close to opening price as possible, 3 IOC immediate or canceled, 4 FOK fill-or-kill, 5 GTX good 'til executed, 6 GTD good 'til date # 'ClientOrderId': clientOrderId, # defaults to 0 # If self order is order A, OrderIdOCO refers to the order ID of an order B(which is not the order being created by self call). # If order B executes, then order A created by self call is canceled. # You can also set up order B to watch order A in the same way, but that may require an update to order B to make it watch self one, which could have implications for priority in the order book. # See CancelReplaceOrder and ModifyOrder. # 'OrderIdOCO': 0, # The order ID if One Cancels the Other. # 'UseDisplayQuantity': False, # If you enter a Limit order with a reserve, you must set UseDisplayQuantity to True 'Side': orderSide, # 0 Buy, 1 Sell, 2 Short, 3 unknown an error condition 'Quantity': float(self.amount_to_precision(symbol, amount)), 'OrderType': self.safe_integer(self.options['orderTypes'], self.capitalize(type)), # 0 Unknown, 1 Market, 2 Limit, 3 StopMarket, 4 StopLimit, 5 TrailingStopMarket, 6 TrailingStopLimit, 7 BlockTrade # 'PegPriceType': 3, # 1 Last, 2 Bid, 3 Ask, 4 Midpoint # 'LimitPrice': float(self.price_to_precision(symbol, price)), } # If OrderType=1(Market), Side=0(Buy), and LimitPrice is supplied, the Market order will execute up to the value specified if price is not None: request['LimitPrice'] = float(self.price_to_precision(symbol, price)) if clientOrderId is not None: request['ClientOrderId'] = clientOrderId response = self.privatePostSendOrder(self.extend(request, params)) # # { # "status":"Accepted", # "errormsg":"", # "OrderId": 2543565231 # } # return self.parse_order(response, market) def edit_order(self, id, symbol, type, side, amount, price=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) clientOrderId = self.safe_integer_2(params, 'ClientOrderId', 'clientOrderId') params = self.omit(params, ['accountId', 'AccountId', 'clientOrderId', 'ClientOrderId']) market = self.market(symbol) orderSide = 0 if (side == 'buy') else 1 request = { 'OrderIdToReplace': int(id), 'InstrumentId': int(market['id']), 'omsId': omsId, 'AccountId': accountId, 'TimeInForce': 1, # 0 Unknown, 1 GTC by default, 2 OPG execute as close to opening price as possible, 3 IOC immediate or canceled, 4 FOK fill-or-kill, 5 GTX good 'til executed, 6 GTD good 'til date # 'ClientOrderId': clientOrderId, # defaults to 0 # If self order is order A, OrderIdOCO refers to the order ID of an order B(which is not the order being created by self call). # If order B executes, then order A created by self call is canceled. # You can also set up order B to watch order A in the same way, but that may require an update to order B to make it watch self one, which could have implications for priority in the order book. # See CancelReplaceOrder and ModifyOrder. # 'OrderIdOCO': 0, # The order ID if One Cancels the Other. # 'UseDisplayQuantity': False, # If you enter a Limit order with a reserve, you must set UseDisplayQuantity to True 'Side': orderSide, # 0 Buy, 1 Sell, 2 Short, 3 unknown an error condition 'Quantity': float(self.amount_to_precision(symbol, amount)), 'OrderType': self.safe_integer(self.options['orderTypes'], self.capitalize(type)), # 0 Unknown, 1 Market, 2 Limit, 3 StopMarket, 4 StopLimit, 5 TrailingStopMarket, 6 TrailingStopLimit, 7 BlockTrade # 'PegPriceType': 3, # 1 Last, 2 Bid, 3 Ask, 4 Midpoint # 'LimitPrice': float(self.price_to_precision(symbol, price)), } # If OrderType=1(Market), Side=0(Buy), and LimitPrice is supplied, the Market order will execute up to the value specified if price is not None: request['LimitPrice'] = float(self.price_to_precision(symbol, price)) if clientOrderId is not None: request['ClientOrderId'] = clientOrderId response = self.privatePostCancelReplaceOrder(self.extend(request, params)) # # { # "replacementOrderId": 1234, # "replacementClOrdId": 1561, # "origOrderId": 5678, # "origClOrdId": 91011, # } # return self.parse_order(response, market) def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) params = self.omit(params, ['accountId', 'AccountId']) request = { 'omsId': omsId, 'AccountId': accountId, # 'InstrumentId': market['id'], # 'TradeId': 123, # If you specify TradeId, GetTradesHistory can return all states for a single trade # 'OrderId': 456, # If specified, the call returns all trades associated with the order # 'UserId': integer. The ID of the logged-in user. If not specified, the call returns trades associated with the users belonging to the default account for the logged-in user of self OMS. # 'StartTimeStamp': long integer. The historical date and time at which to begin the trade report, in POSIX format. If not specified, reverts to the start date of self account on the trading venue. # 'EndTimeStamp': long integer. Date at which to end the trade report, in POSIX format. # 'Depth': integer. In self case, the count of trades to return, counting from the StartIndex. If Depth is not specified, returns all trades between BeginTimeStamp and EndTimeStamp, beginning at StartIndex. # 'StartIndex': 0 # from the most recent trade 0 and moving backwards in time # 'ExecutionId': 123, # The ID of the individual buy or sell execution. If not specified, returns all. } market = None if symbol is not None: market = self.market(symbol) request['InstrumentId'] = market['id'] if since is not None: request['StartTimeStamp'] = int(since / 1000) if limit is not None: request['Depth'] = limit response = self.privateGetGetTradesHistory(self.extend(request, params)) # # [ # { # "OMSId":1, # "ExecutionId":16916567, # "TradeId":14476351, # "OrderId":2543565231, # "AccountId":449, # "AccountName":"igor@ccxt.trade", # "SubAccountId":0, # "ClientOrderId":0, # "InstrumentId":8, # "Side":"Sell", # "OrderType":"Market", # "Quantity":0.1230000000000000000000000000, # "RemainingQuantity":0.0000000000000000000000000000, # "Price":19069.310000000000000000000000, # "Value":2345.5251300000000000000000000, # "CounterParty":"7", # "OrderTradeRevision":1, # "Direction":"NoChange", # "IsBlockTrade":false, # "Fee":1.1727625650000000000000000000, # "FeeProductId":8, # "OrderOriginator":446, # "UserName":"igor@ccxt.trade", # "TradeTimeMS":1607565031569, # "MakerTaker":"Taker", # "AdapterTradeId":0, # "InsideBid":19069.310000000000000000000000, # "InsideBidSize":0.2400950000000000000000000000, # "InsideAsk":19069.320000000000000000000000, # "InsideAskSize":0.0997360000000000000000000000, # "IsQuote":false, # "CounterPartyClientUserId":1, # "NotionalProductId":2, # "NotionalRate":1.0000000000000000000000000000, # "NotionalValue":2345.5251300000000000000000000, # "NotionalHoldAmount":0, # "TradeTime":637431618315686826 # } # ] # return self.parse_trades(response, market, since, limit) def cancel_all_orders(self, symbol=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) params = self.omit(params, ['accountId', 'AccountId']) request = { 'omsId': omsId, 'AccountId': accountId, } if symbol is not None: market = self.market(symbol) request['IntrumentId'] = market['id'] response = self.privatePostCancelAllOrders(self.extend(request, params)) # # { # "result":true, # "errormsg":null, # "errorcode":0, # "detail":null # } # return response def cancel_order(self, id, symbol=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() # defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) # accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) # params = self.omit(params, ['accountId', 'AccountId']) market = None if symbol is not None: market = self.market(symbol) request = { 'omsId': omsId, # 'AccountId': accountId, } clientOrderId = self.safe_integer_2(params, 'clientOrderId', 'ClOrderId') if clientOrderId is not None: request['ClOrderId'] = clientOrderId else: request['OrderId'] = int(id) params = self.omit(params, ['clientOrderId', 'ClOrderId']) response = self.privatePostCancelOrder(self.extend(request, params)) order = self.parse_order(response, market) return self.extend(order, { 'id': id, 'clientOrderId': clientOrderId, }) def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) params = self.omit(params, ['accountId', 'AccountId']) market = None if symbol is not None: market = self.market(symbol) request = { 'omsId': omsId, 'AccountId': accountId, } response = self.privateGetGetOpenOrders(self.extend(request, params)) # # [ # { # "Side":"Buy", # "OrderId":2543565233, # "Price":19010, # "Quantity":0.345, # "DisplayQuantity":0.345, # "Instrument":8, # "Account":449, # "AccountName":"igor@ccxt.trade", # "OrderType":"Limit", # "ClientOrderId":0, # "OrderState":"Working", # "ReceiveTime":1607579326003, # "ReceiveTimeTicks":637431761260028981, # "LastUpdatedTime":1607579326005, # "LastUpdatedTimeTicks":637431761260054714, # "OrigQuantity":0.345, # "QuantityExecuted":0, # "GrossValueExecuted":0, # "ExecutableValue":0, # "AvgPrice":0, # "CounterPartyId":0, # "ChangeReason":"NewInputAccepted", # "OrigOrderId":2543565233, # "OrigClOrdId":0, # "EnteredBy":446, # "UserName":"igor@ccxt.trade", # "IsQuote":false, # "InsideAsk":19069.32, # "InsideAskSize":0.099736, # "InsideBid":19068.25, # "InsideBidSize":1.330001, # "LastTradePrice":19068.25, # "RejectReason":"", # "IsLockedIn":false, # "CancelReason":"", # "OrderFlag":"AddedToBook", # "UseMargin":false, # "StopPrice":0, # "PegPriceType":"Unknown", # "PegOffset":0, # "PegLimitOffset":0, # "IpAddress":null, # "ClientOrderIdUuid":null, # "OMSId":1 # } # ] # return self.parse_orders(response, market, since, limit) def fetch_orders(self, symbol=None, since=None, limit=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) params = self.omit(params, ['accountId', 'AccountId']) request = { 'omsId': omsId, 'AccountId': accountId, # 'ClientOrderId': clientOrderId, # 'OriginalOrderId': id, # 'OriginalClientOrderId': long integer, # 'UserId': integer, # 'InstrumentId': market['id'], # 'StartTimestamp': since, # 'EndTimestamp': self.milliseconds(), # 'Depth': limit, # 'StartIndex': 0, } market = None if symbol is not None: market = self.market(symbol) request['InstrumentId'] = market['id'] if since is not None: request['StartTimeStamp'] = int(since / 1000) if limit is not None: request['Depth'] = limit response = self.privateGetGetOrdersHistory(self.extend(request, params)) # # [ # { # "Side":"Buy", # "OrderId":2543565233, # "Price":19010.000000000000000000000000, # "Quantity":0.0000000000000000000000000000, # "DisplayQuantity":0.3450000000000000000000000000, # "Instrument":8, # "Account":449, # "AccountName":"igor@ccxt.trade", # "OrderType":"Limit", # "ClientOrderId":0, # "OrderState":"Canceled", # "ReceiveTime":1607579326003, # "ReceiveTimeTicks":637431761260028981, # "LastUpdatedTime":1607580965346, # "LastUpdatedTimeTicks":637431777653463754, # "OrigQuantity":0.3450000000000000000000000000, # "QuantityExecuted":0.0000000000000000000000000000, # "GrossValueExecuted":0.0000000000000000000000000000, # "ExecutableValue":0.0000000000000000000000000000, # "AvgPrice":0.0000000000000000000000000000, # "CounterPartyId":0, # "ChangeReason":"UserModified", # "OrigOrderId":2543565233, # "OrigClOrdId":0, # "EnteredBy":446, # "UserName":"igor@ccxt.trade", # "IsQuote":false, # "InsideAsk":19069.320000000000000000000000, # "InsideAskSize":0.0997360000000000000000000000, # "InsideBid":19068.250000000000000000000000, # "InsideBidSize":1.3300010000000000000000000000, # "LastTradePrice":19068.250000000000000000000000, # "RejectReason":"", # "IsLockedIn":false, # "CancelReason":"UserModified", # "OrderFlag":"AddedToBook, RemovedFromBook", # "UseMargin":false, # "StopPrice":0.0000000000000000000000000000, # "PegPriceType":"Unknown", # "PegOffset":0.0000000000000000000000000000, # "PegLimitOffset":0.0000000000000000000000000000, # "IpAddress":"x.x.x.x", # "ClientOrderIdUuid":null, # "OMSId":1 # }, # ] # return self.parse_orders(response, market, since, limit) def fetch_order(self, id, symbol=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) params = self.omit(params, ['accountId', 'AccountId']) market = None if symbol is not None: market = self.market(symbol) request = { 'omsId': omsId, 'AccountId': accountId, 'OrderId': int(id), } response = self.privateGetGetOrderStatus(self.extend(request, params)) # # { # "Side":"Sell", # "OrderId":2543565232, # "Price":0.0000000000000000000000000000, # "Quantity":0.0000000000000000000000000000, # "DisplayQuantity":0.0000000000000000000000000000, # "Instrument":8, # "Account":449, # "AccountName":"igor@ccxt.trade", # "OrderType":"Market", # "ClientOrderId":0, # "OrderState":"FullyExecuted", # "ReceiveTime":1607569475591, # "ReceiveTimeTicks":637431662755912377, # "LastUpdatedTime":1607569475596, # "LastUpdatedTimeTicks":637431662755960902, # "OrigQuantity":1.0000000000000000000000000000, # "QuantityExecuted":1.0000000000000000000000000000, # "GrossValueExecuted":19068.270478610000000000000000, # "ExecutableValue":0.0000000000000000000000000000, # "AvgPrice":19068.270478610000000000000000, # "CounterPartyId":0, # "ChangeReason":"Trade", # "OrigOrderId":2543565232, # "OrigClOrdId":0, # "EnteredBy":446, # "UserName":"igor@ccxt.trade", # "IsQuote":false, # "InsideAsk":19069.320000000000000000000000, # "InsideAskSize":0.0997360000000000000000000000, # "InsideBid":19069.310000000000000000000000, # "InsideBidSize":0.2400950000000000000000000000, # "LastTradePrice":19069.310000000000000000000000, # "RejectReason":"", # "IsLockedIn":false, # "CancelReason":"", # "OrderFlag":"0", # "UseMargin":false, # "StopPrice":0.0000000000000000000000000000, # "PegPriceType":"Unknown", # "PegOffset":0.0000000000000000000000000000, # "PegLimitOffset":0.0000000000000000000000000000, # "IpAddress":"x.x.x.x", # "ClientOrderIdUuid":null, # "OMSId":1 # } # return self.parse_order(response, market) def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() # defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) # accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) # params = self.omit(params, ['accountId', 'AccountId']) market = None if symbol is not None: market = self.market(symbol) request = { 'OMSId': int(omsId), # 'AccountId': accountId, 'OrderId': int(id), } response = self.privatePostGetOrderHistoryByOrderId(self.extend(request, params)) # # [ # { # "Side":"Sell", # "OrderId":2543565235, # "Price":18600.000000000000000000000000, # "Quantity":0.0000000000000000000000000000, # "DisplayQuantity":0.0000000000000000000000000000, # "Instrument":8, # "Account":449, # "AccountName":"igor@ccxt.trade", # "OrderType":"Limit", # "ClientOrderId":0, # "OrderState":"FullyExecuted", # "ReceiveTime":1607585844956, # "ReceiveTimeTicks":637431826449564182, # "LastUpdatedTime":1607585844959, # "LastUpdatedTimeTicks":637431826449593893, # "OrigQuantity":0.1230000000000000000000000000, # "QuantityExecuted":0.1230000000000000000000000000, # "GrossValueExecuted":2345.3947500000000000000000000, # "ExecutableValue":0.0000000000000000000000000000, # "AvgPrice":19068.250000000000000000000000, # "CounterPartyId":0, # "ChangeReason":"Trade", # "OrigOrderId":2543565235, # "OrigClOrdId":0, # "EnteredBy":446, # "UserName":"igor@ccxt.trade", # "IsQuote":false, # "InsideAsk":19069.320000000000000000000000, # "InsideAskSize":0.0997360000000000000000000000, # "InsideBid":19068.250000000000000000000000, # "InsideBidSize":1.3300010000000000000000000000, # "LastTradePrice":19068.250000000000000000000000, # "RejectReason":"", # "IsLockedIn":false, # "CancelReason":"", # "OrderFlag":"0", # "UseMargin":false, # "StopPrice":0.0000000000000000000000000000, # "PegPriceType":"Unknown", # "PegOffset":0.0000000000000000000000000000, # "PegLimitOffset":0.0000000000000000000000000000, # "IpAddress":"x.x.x.x", # "ClientOrderIdUuid":null, # "OMSId":1 # }, # ] # grouped = self.group_by(response, 'ChangeReason') trades = self.safe_value(grouped, 'Trade', []) return self.parse_trades(trades, market, since, limit) def fetch_deposit_address(self, code, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) params = self.omit(params, ['accountId', 'AccountId']) currency = self.currency(code) request = { 'omsId': omsId, 'AccountId': accountId, 'ProductId': currency['id'], 'GenerateNewKey': False, } response = self.privateGetGetDepositInfo(self.extend(request, params)) # # { # "result":true, # "errormsg":null, # "statuscode":0, # "AssetManagerId":1, # "AccountId":57922, # "AssetId":16, # "ProviderId":23, # "DepositInfo":"[\"0x8A27564b5c30b91C93B1591821642420F323a210\"]" # } # return self.parse_deposit_address(response, currency) def parse_deposit_address(self, depositAddress, currency=None): # # fetchDepositAddress, createDepositAddress # # { # "result":true, # "errormsg":null, # "statuscode":0, # "AssetManagerId":1, # "AccountId":449, # "AssetId":1, # "ProviderId":1, # "DepositInfo":"[\"r3e95RwVsLH7yCbnMfyh7SA8FdwUJCB4S2?memo=241452010\"]" # } # depositInfoString = self.safe_string(depositAddress, 'DepositInfo') depositInfo = json.loads(depositInfoString) depositInfoLength = len(depositInfo) lastString = self.safe_string(depositInfo, depositInfoLength - 1) parts = lastString.split('?memo=') address = self.safe_string(parts, 0) tag = self.safe_string(parts, 1) code = None if currency is not None: code = currency['code'] self.check_address(address) return { 'currency': code, 'address': address, 'tag': tag, 'network': None, 'info': depositAddress, } def create_deposit_address(self, code, params={}): request = { 'GenerateNewKey': True, } return self.fetch_deposit_address(code, self.extend(request, params)) def fetch_deposits(self, code=None, since=None, limit=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) params = self.omit(params, ['accountId', 'AccountId']) currency = None if code is not None: currency = self.currency(code) request = { 'omsId': omsId, 'AccountId': accountId, } response = self.privateGetGetDeposits(self.extend(request, params)) # # [ # { # "OMSId":1, # "DepositId":44, # "AccountId":449, # "SubAccountId":0, # "ProductId":4, # "Amount":200.00000000000000000000000000, # "LastUpdateTimeStamp":637431291261187806, # "ProductType":"CryptoCurrency", # "TicketStatus":"FullyProcessed", # "DepositInfo":"{}", # "DepositCode":"ab0e23d5-a9ce-4d94-865f-9ab464fb1de3", # "TicketNumber":71, # "NotionalProductId":13, # "NotionalValue":200.00000000000000000000000000, # "FeeAmount":0.0000000000000000000000000000, # }, # ] # return self.parse_transactions(response, currency, since, limit) def fetch_withdrawals(self, code=None, since=None, limit=None, params={}): omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) params = self.omit(params, ['accountId', 'AccountId']) currency = None if code is not None: currency = self.currency(code) request = { 'omsId': omsId, 'AccountId': accountId, } response = self.privateGetGetWithdraws(self.extend(request, params)) # # [ # { # "Amount": 0.0, # "FeeAmount": 0.0, # "NotionalValue": 0.0, # "WithdrawId": 0, # "AssetManagerId": 0, # "AccountId": 0, # "AssetId": 0, # "TemplateForm": "{\"TemplateType\": \"TetherRPCWithdraw\",\"Comment\": \"TestWithdraw\",\"ExternalAddress\": \"ms6C3pKAAr8gRCcnVebs8VRkVrjcvqNYv3\"}", # "TemplateFormType": "TetherRPCWithdraw", # "omsId": 0, # "TicketStatus": 0, # "TicketNumber": 0, # "WithdrawTransactionDetails": "", # "WithdrawType": "", # "WithdrawCode": "490b4fa3-53fc-44f4-bd29-7e16be86fba3", # "AssetType": 0, # "Reaccepted": True, # "NotionalProductId": 0 # }, # ] # return self.parse_transactions(response, currency, since, limit) def parse_transaction_status_by_type(self, status, type=None): statusesByType = { 'deposit': { 'New': 'pending', # new ticket awaiting operator review 'AdminProcessing': 'pending', # an admin is looking at the ticket 'Accepted': 'pending', # an admin accepts the ticket 'Rejected': 'rejected', # admin rejects the ticket 'SystemProcessing': 'pending', # automatic processing; an unlikely status for a deposit 'FullyProcessed': 'ok', # the deposit has concluded 'Failed': 'failed', # the deposit has failed for some reason 'Pending': 'pending', # Account Provider has set status to pending 'Confirmed': 'pending', # Account Provider confirms the deposit 'AmlProcessing': 'pending', # anti-money-laundering process underway 'AmlAccepted': 'pending', # anti-money-laundering process successful 'AmlRejected': 'rejected', # deposit did not stand up to anti-money-laundering process 'AmlFailed': 'failed', # anti-money-laundering process failed/did not complete 'LimitsAccepted': 'pending', # deposit meets limits for fiat or crypto asset 'LimitsRejected': 'rejected', # deposit does not meet limits for fiat or crypto asset }, 'withdrawal': { 'New': 'pending', # awaiting operator review 'AdminProcessing': 'pending', # An admin is looking at the ticket 'Accepted': 'pending', # withdrawal will proceed 'Rejected': 'rejected', # admin or automatic rejection 'SystemProcessing': 'pending', # automatic processing underway 'FullyProcessed': 'ok', # the withdrawal has concluded 'Failed': 'failed', # the withdrawal failed for some reason 'Pending': 'pending', # the admin has placed the withdrawal in pending status 'Pending2Fa': 'pending', # user must click 2-factor authentication confirmation link 'AutoAccepted': 'pending', # withdrawal will be automatically processed 'Delayed': 'pending', # waiting for funds to be allocated for the withdrawal 'UserCanceled': 'canceled', # withdraw canceled by user or Superuser 'AdminCanceled': 'canceled', # withdraw canceled by Superuser 'AmlProcessing': 'pending', # anti-money-laundering process underway 'AmlAccepted': 'pending', # anti-money-laundering process complete 'AmlRejected': 'rejected', # withdrawal did not stand up to anti-money-laundering process 'AmlFailed': 'failed', # withdrawal did not complete anti-money-laundering process 'LimitsAccepted': 'pending', # withdrawal meets limits for fiat or crypto asset 'LimitsRejected': 'rejected', # withdrawal does not meet limits for fiat or crypto asset 'Submitted': 'pending', # withdrawal sent to Account Provider; awaiting blockchain confirmation 'Confirmed': 'pending', # Account Provider confirms that withdrawal is on the blockchain 'ManuallyConfirmed': 'pending', # admin has sent withdrawal via wallet or admin function directly; marks ticket as FullyProcessed; debits account 'Confirmed2Fa': 'pending', # user has confirmed withdraw via 2-factor authentication. }, } statuses = self.safe_value(statusesByType, type, {}) return self.safe_string(statuses, status, status) def parse_transaction(self, transaction, currency=None): # # fetchDeposits # # { # "OMSId":1, # "DepositId":44, # "AccountId":449, # "SubAccountId":0, # "ProductId":4, # "Amount":200.00000000000000000000000000, # "LastUpdateTimeStamp":637431291261187806, # "ProductType":"CryptoCurrency", # "TicketStatus":"FullyProcessed", # "DepositInfo":"{}", # "DepositCode":"ab0e23d5-a9ce-4d94-865f-9ab464fb1de3", # "TicketNumber":71, # "NotionalProductId":13, # "NotionalValue":200.00000000000000000000000000, # "FeeAmount":0.0000000000000000000000000000, # } # # fetchWithdrawals # # { # "Amount": 0.0, # "FeeAmount": 0.0, # "NotionalValue": 0.0, # "WithdrawId": 0, # "AssetManagerId": 0, # "AccountId": 0, # "AssetId": 0, # "TemplateForm": "{\"TemplateType\": \"TetherRPCWithdraw\",\"Comment\": \"TestWithdraw\",\"ExternalAddress\": \"ms6C3pKAAr8gRCcnVebs8VRkVrjcvqNYv3\"}", # "TemplateFormType": "TetherRPCWithdraw", # "omsId": 0, # "TicketStatus": 0, # "TicketNumber": 0, # "WithdrawTransactionDetails": "", # "WithdrawType": "", # "WithdrawCode": "490b4fa3-53fc-44f4-bd29-7e16be86fba3", # "AssetType": 0, # "Reaccepted": True, # "NotionalProductId": 0 # } # id = self.safe_string(transaction, 'DepositId') txid = None currencyId = self.safe_string(transaction, 'ProductId') code = self.safe_currency_code(currencyId, currency) timestamp = None type = None if 'DepositId' in transaction: type = 'deposit' elif 'WithdrawId' in transaction: type = 'withdrawal' templateFormString = self.safe_string(transaction, 'TemplateForm') address = None updated = self.safe_integer(transaction, 'LastUpdateTimeStamp') if templateFormString is not None: templateForm = json.loads(templateFormString) address = self.safe_string(templateForm, 'ExternalAddress') txid = self.safe_string(templateForm, 'TxId') timestamp = self.safe_integer(templateForm, 'TimeSubmitted') updated = self.safe_integer(templateForm, 'LastUpdated', updated) addressTo = address status = self.parse_transaction_status_by_type(self.safe_string(transaction, 'TicketStatus'), type) amount = self.safe_number(transaction, 'Amount') feeCost = self.safe_number(transaction, 'FeeAmount') fee = None if feeCost is not None: fee = {'currency': code, 'cost': feeCost} return { 'info': transaction, 'id': id, 'txid': txid, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'address': address, 'addressTo': addressTo, 'addressFrom': None, 'tag': None, 'tagTo': None, 'tagFrom': None, 'type': type, 'amount': amount, 'currency': code, 'status': status, 'updated': updated, 'fee': fee, } def withdraw(self, code, amount, address, tag=None, params={}): tag, params = self.handle_withdraw_tag_and_params(tag, params) # self method required login, password and twofa key sessionToken = self.safe_string(self.options, 'sessionToken') if sessionToken is None: raise AuthenticationError(self.id + ' call signIn() method to obtain a session token') if self.twofa is None: raise AuthenticationError(self.id + ' withdraw() requires exchange.twofa credentials') self.check_address(address) omsId = self.safe_integer(self.options, 'omsId', 1) self.load_markets() self.load_accounts() defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id'])) accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId) params = self.omit(params, ['accountId', 'AccountId']) currency = self.currency(code) withdrawTemplateTypesRequest = { 'omsId': omsId, 'AccountId': accountId, 'ProductId': currency['id'], } withdrawTemplateTypesResponse = self.privateGetGetWithdrawTemplateTypes(withdrawTemplateTypesRequest) # # { # result: True, # errormsg: null, # statuscode: "0", # TemplateTypes: [ # {AccountProviderId: "14", TemplateName: "ToExternalBitcoinAddress", AccountProviderName: "BitgoRPC-BTC"}, # {AccountProviderId: "20", TemplateName: "ToExternalBitcoinAddress", AccountProviderName: "TrezorBTC"}, # {AccountProviderId: "31", TemplateName: "BTC", AccountProviderName: "BTC Fireblocks 1"} # ] # } # templateTypes = self.safe_value(withdrawTemplateTypesResponse, 'TemplateTypes', []) firstTemplateType = self.safe_value(templateTypes, 0) if firstTemplateType is None: raise ExchangeError(self.id + ' withdraw() could not find a withdraw template type for ' + currency['code']) templateName = self.safe_string(firstTemplateType, 'TemplateName') withdrawTemplateRequest = { 'omsId': omsId, 'AccountId': accountId, 'ProductId': currency['id'], 'TemplateType': templateName, 'AccountProviderId': firstTemplateType['AccountProviderId'], } withdrawTemplateResponse = self.privateGetGetWithdrawTemplate(withdrawTemplateRequest) # # { # result: True, # errormsg: null, # statuscode: "0", # Template: "{\"TemplateType\":\"ToExternalBitcoinAddress\",\"Comment\":\"\",\"ExternalAddress\":\"\"}" # } # template = self.safe_string(withdrawTemplateResponse, 'Template') if template is None: raise ExchangeError(self.id + ' withdraw() could not find a withdraw template for ' + currency['code']) withdrawTemplate = json.loads(template) withdrawTemplate['ExternalAddress'] = address if tag is not None: if 'Memo' in withdrawTemplate: withdrawTemplate['Memo'] = tag withdrawPayload = { 'omsId': omsId, 'AccountId': accountId, 'ProductId': currency['id'], 'TemplateForm': self.json(withdrawTemplate), 'TemplateType': templateName, } withdrawRequest = { 'TfaType': 'Google', 'TFaCode': self.oath(), 'Payload': self.json(withdrawPayload), } response = self.privatePostCreateWithdrawTicket(self.deep_extend(withdrawRequest, params)) return { 'info': response, 'id': self.safe_string(response, 'Id'), } def nonce(self): return self.milliseconds() def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): url = self.urls['api'][api] + '/' + self.implode_params(path, params) query = self.omit(params, self.extract_params(path)) if api == 'public': if path == 'Authenticate': auth = self.login + ':' + self.password auth64 = self.string_to_base64(auth) headers = { 'Authorization': 'Basic ' + self.decode(auth64), # 'Content-Type': 'application/json', } elif path == 'Authenticate2FA': pending2faToken = self.safe_string(self.options, 'pending2faToken') if pending2faToken is not None: headers = { 'Pending2FaToken': pending2faToken, # 'Content-Type': 'application/json', } query = self.omit(query, 'pending2faToken') if query: url += '?' + self.urlencode(query) elif api == 'private': self.check_required_credentials() sessionToken = self.safe_string(self.options, 'sessionToken') if sessionToken is None: nonce = str(self.nonce()) auth = nonce + self.uid + self.apiKey signature = self.hmac(self.encode(auth), self.encode(self.secret)) headers = { 'Nonce': nonce, 'APIKey': self.apiKey, 'Signature': signature, 'UserId': self.uid, } else: headers = { 'APToken': sessionToken, } if method == 'POST': headers['Content-Type'] = 'application/json' body = self.json(query) else: if query: url += '?' + self.urlencode(query) return {'url': url, 'method': method, 'body': body, 'headers': headers} def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody): if code == 404: raise AuthenticationError(self.id + ' ' + body) if response is None: return # # {"status":"Rejected","errormsg":"Not_Enough_Funds","errorcode":101} # {"result":false,"errormsg":"Server Error","errorcode":102,"detail":null} # message = self.safe_string(response, 'errormsg') if (message is not None) and (message != ''): feedback = self.id + ' ' + body self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback) self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback) raise ExchangeError(feedback)
codeparrot/github-code-clean
""" KVM test utility functions. @copyright: 2008-2009 Red Hat Inc. """ import time, string, random, socket, os, signal, re, logging, commands, cPickle import fcntl, shelve, ConfigParser, threading, sys, UserDict, inspect, tarfile import struct, shutil, glob from autotest_lib.client.bin import utils, os_dep from autotest_lib.client.common_lib import error, logging_config from autotest_lib.client.common_lib import logging_manager, git import rss_client, aexpect import platform try: import koji KOJI_INSTALLED = True except ImportError: KOJI_INSTALLED = False ARCH = platform.machine() if ARCH == "ppc64": # From include/linux/sockios.h SIOCSIFHWADDR = 0x8924 SIOCGIFHWADDR = 0x8927 SIOCSIFFLAGS = 0x8914 SIOCGIFINDEX = 0x8933 SIOCBRADDIF = 0x89a2 # From linux/include/linux/if_tun.h TUNSETIFF = 0x800454ca TUNGETIFF = 0x400454d2 TUNGETFEATURES = 0x400454cf IFF_TAP = 0x2 IFF_NO_PI = 0x1000 IFF_VNET_HDR = 0x4000 # From linux/include/linux/if.h IFF_UP = 0x1 else: # From include/linux/sockios.h SIOCSIFHWADDR = 0x8924 SIOCGIFHWADDR = 0x8927 SIOCSIFFLAGS = 0x8914 SIOCGIFINDEX = 0x8933 SIOCBRADDIF = 0x89a2 # From linux/include/linux/if_tun.h TUNSETIFF = 0x400454ca TUNGETIFF = 0x800454d2 TUNGETFEATURES = 0x800454cf IFF_TAP = 0x0002 IFF_NO_PI = 0x1000 IFF_VNET_HDR = 0x4000 # From linux/include/linux/if.h IFF_UP = 0x1 def _lock_file(filename): f = open(filename, "w") fcntl.lockf(f, fcntl.LOCK_EX) return f def _unlock_file(f): fcntl.lockf(f, fcntl.LOCK_UN) f.close() def is_vm(obj): """ Tests whether a given object is a VM object. @param obj: Python object. """ return obj.__class__.__name__ == "VM" class NetError(Exception): pass class TAPModuleError(NetError): def __init__(self, devname, action="open", details=None): NetError.__init__(self, devname) self.devname = devname self.details = details def __str__(self): e_msg = "Can't %s %s" % (self.action, self.devname) if self.details is not None: e_msg += " : %s" % self.details return e_msg class TAPNotExistError(NetError): def __init__(self, ifname): NetError.__init__(self, ifname) self.ifname = ifname def __str__(self): return "Interface %s does not exist" % self.ifname class TAPCreationError(NetError): def __init__(self, ifname, details=None): NetError.__init__(self, ifname, details) self.ifname = ifname self.details = details def __str__(self): e_msg = "Cannot create TAP device %s" % self.ifname if self.details is not None: e_msg += ": %s" % self.details return e_msg class TAPBringUpError(NetError): def __init__(self, ifname): NetError.__init__(self, ifname) self.ifname = ifname def __str__(self): return "Cannot bring up TAP %s" % self.ifname class BRAddIfError(NetError): def __init__(self, ifname, brname, details): NetError.__init__(self, ifname, brname, details) self.ifname = ifname self.brname = brname self.details = details def __str__(self): return ("Can not add if %s to bridge %s: %s" % (self.ifname, self.brname, self.details)) class HwAddrSetError(NetError): def __init__(self, ifname, mac): NetError.__init__(self, ifname, mac) self.ifname = ifname self.mac = mac def __str__(self): return "Can not set mac %s to interface %s" % (self.mac, self.ifname) class HwAddrGetError(NetError): def __init__(self, ifname): NetError.__init__(self, ifname) self.ifname = ifname def __str__(self): return "Can not get mac of interface %s" % self.ifname class Env(UserDict.IterableUserDict): """ A dict-like object containing global objects used by tests. """ def __init__(self, filename=None, version=0): """ Create an empty Env object or load an existing one from a file. If the version recorded in the file is lower than version, or if some error occurs during unpickling, or if filename is not supplied, create an empty Env object. @param filename: Path to an env file. @param version: Required env version (int). """ UserDict.IterableUserDict.__init__(self) empty = {"version": version} if filename: self._filename = filename try: if os.path.isfile(filename): f = open(filename, "r") env = cPickle.load(f) f.close() if env.get("version", 0) >= version: self.data = env else: logging.warn("Incompatible env file found. Not using it.") self.data = empty else: # No previous env file found, proceed... self.data = empty # Almost any exception can be raised during unpickling, so let's # catch them all except Exception, e: logging.warn(e) self.data = empty else: self.data = empty def save(self, filename=None): """ Pickle the contents of the Env object into a file. @param filename: Filename to pickle the dict into. If not supplied, use the filename from which the dict was loaded. """ filename = filename or self._filename f = open(filename, "w") cPickle.dump(self.data, f) f.close() def get_all_vms(self): """ Return a list of all VM objects in this Env object. """ return [o for o in self.values() if is_vm(o)] def get_vm(self, name): """ Return a VM object by its name. @param name: VM name. """ return self.get("vm__%s" % name) def register_vm(self, name, vm): """ Register a VM in this Env object. @param name: VM name. @param vm: VM object. """ self["vm__%s" % name] = vm def unregister_vm(self, name): """ Remove a given VM. @param name: VM name. """ del self["vm__%s" % name] def register_installer(self, installer): """ Register a installer that was just run The installer will be available for other tests, so that information about the installed KVM modules and qemu-kvm can be used by them. """ self['last_installer'] = installer def previous_installer(self): """ Return the last installer that was registered """ return self.get('last_installer') class Params(UserDict.IterableUserDict): """ A dict-like object passed to every test. """ def objects(self, key): """ Return the names of objects defined using a given key. @param key: The name of the key whose value lists the objects (e.g. 'nics'). """ return self.get(key, "").split() def object_params(self, obj_name): """ Return a dict-like object containing the parameters of an individual object. This method behaves as follows: the suffix '_' + obj_name is removed from all key names that have it. Other key names are left unchanged. The values of keys with the suffix overwrite the values of their suffixless versions. @param obj_name: The name of the object (objects are listed by the objects() method). """ suffix = "_" + obj_name new_dict = self.copy() for key in self: if key.endswith(suffix): new_key = key.split(suffix)[0] new_dict[new_key] = self[key] return new_dict # Functions related to MAC/IP addresses def _open_mac_pool(lock_mode): lock_file = open("/tmp/mac_lock", "w+") fcntl.lockf(lock_file, lock_mode) pool = shelve.open("/tmp/address_pool") return pool, lock_file def _close_mac_pool(pool, lock_file): pool.close() fcntl.lockf(lock_file, fcntl.LOCK_UN) lock_file.close() def _generate_mac_address_prefix(mac_pool): """ Generate a random MAC address prefix and add it to the MAC pool dictionary. If there's a MAC prefix there already, do not update the MAC pool and just return what's in there. By convention we will set KVM autotest MAC addresses to start with 0x9a. @param mac_pool: The MAC address pool object. @return: The MAC address prefix. """ if "prefix" in mac_pool: prefix = mac_pool["prefix"] else: r = random.SystemRandom() prefix = "9a:%02x:%02x:%02x:" % (r.randint(0x00, 0xff), r.randint(0x00, 0xff), r.randint(0x00, 0xff)) mac_pool["prefix"] = prefix return prefix def generate_mac_address(vm_instance, nic_index): """ Randomly generate a MAC address and add it to the MAC address pool. Try to generate a MAC address based on a randomly generated MAC address prefix and add it to a persistent dictionary. key = VM instance + NIC index, value = MAC address e.g. {'20100310-165222-Wt7l:0': '9a:5d:94:6a:9b:f9'} @param vm_instance: The instance attribute of a VM. @param nic_index: The index of the NIC. @return: MAC address string. """ mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_EX) key = "%s:%s" % (vm_instance, nic_index) if key in mac_pool: mac = mac_pool[key] else: prefix = _generate_mac_address_prefix(mac_pool) r = random.SystemRandom() while key not in mac_pool: mac = prefix + "%02x:%02x" % (r.randint(0x00, 0xff), r.randint(0x00, 0xff)) if mac in mac_pool.values(): continue mac_pool[key] = mac _close_mac_pool(mac_pool, lock_file) return mac def free_mac_address(vm_instance, nic_index): """ Remove a MAC address from the address pool. @param vm_instance: The instance attribute of a VM. @param nic_index: The index of the NIC. """ mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_EX) key = "%s:%s" % (vm_instance, nic_index) if key in mac_pool: del mac_pool[key] _close_mac_pool(mac_pool, lock_file) def set_mac_address(vm_instance, nic_index, mac): """ Set a MAC address in the pool. @param vm_instance: The instance attribute of a VM. @param nic_index: The index of the NIC. """ mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_EX) mac_pool["%s:%s" % (vm_instance, nic_index)] = mac _close_mac_pool(mac_pool, lock_file) def get_mac_address(vm_instance, nic_index): """ Return a MAC address from the pool. @param vm_instance: The instance attribute of a VM. @param nic_index: The index of the NIC. @return: MAC address string. """ mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_SH) mac = mac_pool.get("%s:%s" % (vm_instance, nic_index)) _close_mac_pool(mac_pool, lock_file) return mac def verify_ip_address_ownership(ip, macs, timeout=10.0): """ Use arping and the ARP cache to make sure a given IP address belongs to one of the given MAC addresses. @param ip: An IP address. @param macs: A list or tuple of MAC addresses. @return: True iff ip is assigned to a MAC address in macs. """ # Compile a regex that matches the given IP address and any of the given # MAC addresses mac_regex = "|".join("(%s)" % mac for mac in macs) regex = re.compile(r"\b%s\b.*\b(%s)\b" % (ip, mac_regex), re.IGNORECASE) # Check the ARP cache o = commands.getoutput("%s -n" % find_command("arp")) if regex.search(o): return True # Get the name of the bridge device for arping o = commands.getoutput("%s route get %s" % (find_command("ip"), ip)) dev = re.findall("dev\s+\S+", o, re.IGNORECASE) if not dev: return False dev = dev[0].split()[-1] # Send an ARP request o = commands.getoutput("%s -f -c 3 -I %s %s" % (find_command("arping"), dev, ip)) return bool(regex.search(o)) # Utility functions for dealing with external processes def find_command(cmd): for dir in ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"]: file = os.path.join(dir, cmd) if os.path.exists(file): return file raise ValueError('Missing command: %s' % cmd) def pid_exists(pid): """ Return True if a given PID exists. @param pid: Process ID number. """ try: os.kill(pid, 0) return True except Exception: return False def safe_kill(pid, signal): """ Attempt to send a signal to a given process that may or may not exist. @param signal: Signal number. """ try: os.kill(pid, signal) return True except Exception: return False def kill_process_tree(pid, sig=signal.SIGKILL): """Signal a process and all of its children. If the process does not exist -- return. @param pid: The pid of the process to signal. @param sig: The signal to send to the processes. """ if not safe_kill(pid, signal.SIGSTOP): return children = commands.getoutput("ps --ppid=%d -o pid=" % pid).split() for child in children: kill_process_tree(int(child), sig) safe_kill(pid, sig) safe_kill(pid, signal.SIGCONT) def check_kvm_source_dir(source_dir): """ Inspects the kvm source directory and verifies its disposition. In some occasions build may be dependant on the source directory disposition. The reason why the return codes are numbers is that we might have more changes on the source directory layout, so it's not scalable to just use strings like 'old_repo', 'new_repo' and such. @param source_dir: Source code path that will be inspected. """ os.chdir(source_dir) has_qemu_dir = os.path.isdir('qemu') has_kvm_dir = os.path.isdir('kvm') if has_qemu_dir: logging.debug("qemu directory detected, source dir layout 1") return 1 if has_kvm_dir and not has_qemu_dir: logging.debug("kvm directory detected, source dir layout 2") return 2 else: raise error.TestError("Unknown source dir layout, cannot proceed.") # Functions and classes used for logging into guests and transferring files class LoginError(Exception): def __init__(self, msg, output): Exception.__init__(self, msg, output) self.msg = msg self.output = output def __str__(self): return "%s (output: %r)" % (self.msg, self.output) class LoginAuthenticationError(LoginError): pass class LoginTimeoutError(LoginError): def __init__(self, output): LoginError.__init__(self, "Login timeout expired", output) class LoginProcessTerminatedError(LoginError): def __init__(self, status, output): LoginError.__init__(self, None, output) self.status = status def __str__(self): return ("Client process terminated (status: %s, output: %r)" % (self.status, self.output)) class LoginBadClientError(LoginError): def __init__(self, client): LoginError.__init__(self, None, None) self.client = client def __str__(self): return "Unknown remote shell client: %r" % self.client class SCPError(Exception): def __init__(self, msg, output): Exception.__init__(self, msg, output) self.msg = msg self.output = output def __str__(self): return "%s (output: %r)" % (self.msg, self.output) class SCPAuthenticationError(SCPError): pass class SCPAuthenticationTimeoutError(SCPAuthenticationError): def __init__(self, output): SCPAuthenticationError.__init__(self, "Authentication timeout expired", output) class SCPTransferTimeoutError(SCPError): def __init__(self, output): SCPError.__init__(self, "Transfer timeout expired", output) class SCPTransferFailedError(SCPError): def __init__(self, status, output): SCPError.__init__(self, None, output) self.status = status def __str__(self): return ("SCP transfer failed (status: %s, output: %r)" % (self.status, self.output)) def _remote_login(session, username, password, prompt, timeout=10, debug=False): """ Log into a remote host (guest) using SSH or Telnet. Wait for questions and provide answers. If timeout expires while waiting for output from the child (e.g. a password prompt or a shell prompt) -- fail. @brief: Log into a remote host (guest) using SSH or Telnet. @param session: An Expect or ShellSession instance to operate on @param username: The username to send in reply to a login prompt @param password: The password to send in reply to a password prompt @param prompt: The shell prompt that indicates a successful login @param timeout: The maximal time duration (in seconds) to wait for each step of the login procedure (i.e. the "Are you sure" prompt, the password prompt, the shell prompt, etc) @raise LoginTimeoutError: If timeout expires @raise LoginAuthenticationError: If authentication fails @raise LoginProcessTerminatedError: If the client terminates during login @raise LoginError: If some other error occurs """ password_prompt_count = 0 login_prompt_count = 0 while True: try: match, text = session.read_until_last_line_matches( [r"[Aa]re you sure", r"[Pp]assword:\s*$", r"[Ll]ogin:\s*$", r"[Cc]onnection.*closed", r"[Cc]onnection.*refused", r"[Pp]lease wait", r"[Ww]arning", prompt], timeout=timeout, internal_timeout=0.5) if match == 0: # "Are you sure you want to continue connecting" if debug: logging.debug("Got 'Are you sure...', sending 'yes'") session.sendline("yes") continue elif match == 1: # "password:" if password_prompt_count == 0: if debug: logging.debug("Got password prompt, sending '%s'", password) session.sendline(password) password_prompt_count += 1 continue else: raise LoginAuthenticationError("Got password prompt twice", text) elif match == 2: # "login:" if login_prompt_count == 0 and password_prompt_count == 0: if debug: logging.debug("Got username prompt; sending '%s'", username) session.sendline(username) login_prompt_count += 1 continue else: if login_prompt_count > 0: msg = "Got username prompt twice" else: msg = "Got username prompt after password prompt" raise LoginAuthenticationError(msg, text) elif match == 3: # "Connection closed" raise LoginError("Client said 'connection closed'", text) elif match == 4: # "Connection refused" raise LoginError("Client said 'connection refused'", text) elif match == 5: # "Please wait" if debug: logging.debug("Got 'Please wait'") timeout = 30 continue elif match == 6: # "Warning added RSA" if debug: logging.debug("Got 'Warning added RSA to known host list") continue elif match == 7: # prompt if debug: logging.debug("Got shell prompt -- logged in") break except aexpect.ExpectTimeoutError, e: raise LoginTimeoutError(e.output) except aexpect.ExpectProcessTerminatedError, e: raise LoginProcessTerminatedError(e.status, e.output) def remote_login(client, host, port, username, password, prompt, linesep="\n", log_filename=None, timeout=10): """ Log into a remote host (guest) using SSH/Telnet/Netcat. @param client: The client to use ('ssh', 'telnet' or 'nc') @param host: Hostname or IP address @param port: Port to connect to @param username: Username (if required) @param password: Password (if required) @param prompt: Shell prompt (regular expression) @param linesep: The line separator to use when sending lines (e.g. '\\n' or '\\r\\n') @param log_filename: If specified, log all output to this file @param timeout: The maximal time duration (in seconds) to wait for each step of the login procedure (i.e. the "Are you sure" prompt or the password prompt) @raise LoginBadClientError: If an unknown client is requested @raise: Whatever _remote_login() raises @return: A ShellSession object. """ if client == "ssh": cmd = ("ssh -o UserKnownHostsFile=/dev/null " "-o PreferredAuthentications=password -p %s %s@%s" % (port, username, host)) elif client == "telnet": cmd = "telnet -l %s %s %s" % (username, host, port) elif client == "nc": cmd = "nc %s %s" % (host, port) else: raise LoginBadClientError(client) logging.debug("Login command: '%s'", cmd) session = aexpect.ShellSession(cmd, linesep=linesep, prompt=prompt) try: _remote_login(session, username, password, prompt, timeout) except Exception: session.close() raise if log_filename: session.set_output_func(log_line) session.set_output_params((log_filename,)) return session def wait_for_login(client, host, port, username, password, prompt, linesep="\n", log_filename=None, timeout=240, internal_timeout=10): """ Make multiple attempts to log into a remote host (guest) until one succeeds or timeout expires. @param timeout: Total time duration to wait for a successful login @param internal_timeout: The maximal time duration (in seconds) to wait for each step of the login procedure (e.g. the "Are you sure" prompt or the password prompt) @see: remote_login() @raise: Whatever remote_login() raises @return: A ShellSession object. """ logging.debug("Attempting to log into %s:%s using %s (timeout %ds)", host, port, client, timeout) end_time = time.time() + timeout while time.time() < end_time: try: return remote_login(client, host, port, username, password, prompt, linesep, log_filename, internal_timeout) except LoginError, e: logging.debug(e) time.sleep(2) # Timeout expired; try one more time but don't catch exceptions return remote_login(client, host, port, username, password, prompt, linesep, log_filename, internal_timeout) def _remote_scp(session, password_list, transfer_timeout=600, login_timeout=20): """ Transfer file(s) to a remote host (guest) using SCP. Wait for questions and provide answers. If login_timeout expires while waiting for output from the child (e.g. a password prompt), fail. If transfer_timeout expires while waiting for the transfer to complete, fail. @brief: Transfer files using SCP, given a command line. @param session: An Expect or ShellSession instance to operate on @param password_list: Password list to send in reply to the password prompt @param transfer_timeout: The time duration (in seconds) to wait for the transfer to complete. @param login_timeout: The maximal time duration (in seconds) to wait for each step of the login procedure (i.e. the "Are you sure" prompt or the password prompt) @raise SCPAuthenticationError: If authentication fails @raise SCPTransferTimeoutError: If the transfer fails to complete in time @raise SCPTransferFailedError: If the process terminates with a nonzero exit code @raise SCPError: If some other error occurs """ password_prompt_count = 0 timeout = login_timeout authentication_done = False scp_type = len(password_list) while True: try: match, text = session.read_until_last_line_matches( [r"[Aa]re you sure", r"[Pp]assword:\s*$", r"lost connection"], timeout=timeout, internal_timeout=0.5) if match == 0: # "Are you sure you want to continue connecting" logging.debug("Got 'Are you sure...', sending 'yes'") session.sendline("yes") continue elif match == 1: # "password:" if password_prompt_count == 0: logging.debug("Got password prompt, sending '%s'" % password_list[password_prompt_count]) session.sendline(password_list[password_prompt_count]) password_prompt_count += 1 timeout = transfer_timeout if scp_type == 1: authentication_done = True continue elif password_prompt_count == 1 and scp_type == 2: logging.debug("Got password prompt, sending '%s'" % password_list[password_prompt_count]) session.sendline(password_list[password_prompt_count]) password_prompt_count += 1 timeout = transfer_timeout authentication_done = True continue else: raise SCPAuthenticationError("Got password prompt twice", text) elif match == 2: # "lost connection" raise SCPError("SCP client said 'lost connection'", text) except aexpect.ExpectTimeoutError, e: if authentication_done: raise SCPTransferTimeoutError(e.output) else: raise SCPAuthenticationTimeoutError(e.output) except aexpect.ExpectProcessTerminatedError, e: if e.status == 0: logging.debug("SCP process terminated with status 0") break else: raise SCPTransferFailedError(e.status, e.output) def remote_scp(command, password_list, log_filename=None, transfer_timeout=600, login_timeout=20): """ Transfer file(s) to a remote host (guest) using SCP. @brief: Transfer files using SCP, given a command line. @param command: The command to execute (e.g. "scp -r foobar root@localhost:/tmp/"). @param password_list: Password list to send in reply to a password prompt. @param log_filename: If specified, log all output to this file @param transfer_timeout: The time duration (in seconds) to wait for the transfer to complete. @param login_timeout: The maximal time duration (in seconds) to wait for each step of the login procedure (i.e. the "Are you sure" prompt or the password prompt) @raise: Whatever _remote_scp() raises """ logging.debug("Trying to SCP with command '%s', timeout %ss", command, transfer_timeout) if log_filename: output_func = log_line output_params = (log_filename,) else: output_func = None output_params = () session = aexpect.Expect(command, output_func=output_func, output_params=output_params) try: _remote_scp(session, password_list, transfer_timeout, login_timeout) finally: session.close() def scp_to_remote(host, port, username, password, local_path, remote_path, log_filename=None, timeout=600): """ Copy files to a remote host (guest) through scp. @param host: Hostname or IP address @param username: Username (if required) @param password: Password (if required) @param local_path: Path on the local machine where we are copying from @param remote_path: Path on the remote machine where we are copying to @param log_filename: If specified, log all output to this file @param timeout: The time duration (in seconds) to wait for the transfer to complete. @raise: Whatever remote_scp() raises """ command = ("scp -v -o UserKnownHostsFile=/dev/null " "-o PreferredAuthentications=password -r -P %s %s %s@%s:%s" % (port, local_path, username, host, remote_path)) password_list = [] password_list.append(password) return remote_scp(command, password_list, log_filename, timeout) def scp_from_remote(host, port, username, password, remote_path, local_path, log_filename=None, timeout=600): """ Copy files from a remote host (guest). @param host: Hostname or IP address @param username: Username (if required) @param password: Password (if required) @param local_path: Path on the local machine where we are copying from @param remote_path: Path on the remote machine where we are copying to @param log_filename: If specified, log all output to this file @param timeout: The time duration (in seconds) to wait for the transfer to complete. @raise: Whatever remote_scp() raises """ command = ("scp -v -o UserKnownHostsFile=/dev/null " "-o PreferredAuthentications=password -r -P %s %s@%s:%s %s" % (port, username, host, remote_path, local_path)) password_list = [] password_list.append(password) remote_scp(command, password_list, log_filename, timeout) def scp_between_remotes(src, dst, port, s_passwd, d_passwd, s_name, d_name, s_path, d_path, log_filename=None, timeout=600): """ Copy files from a remote host (guest) to another remote host (guest). @param src/dst: Hostname or IP address of src and dst @param s_name/d_name: Username (if required) @param s_passwd/d_passwd: Password (if required) @param s_path/d_path: Path on the remote machine where we are copying from/to @param log_filename: If specified, log all output to this file @param timeout: The time duration (in seconds) to wait for the transfer to complete. @return: True on success and False on failure. """ command = ("scp -v -o UserKnownHostsFile=/dev/null -o " "PreferredAuthentications=password -r -P %s %s@%s:%s %s@%s:%s" % (port, s_name, src, s_path, d_name, dst, d_path)) password_list = [] password_list.append(s_passwd) password_list.append(d_passwd) return remote_scp(command, password_list, log_filename, timeout) def copy_files_to(address, client, username, password, port, local_path, remote_path, log_filename=None, verbose=False, timeout=600): """ Copy files to a remote host (guest) using the selected client. @param client: Type of transfer client @param username: Username (if required) @param password: Password (if requried) @param local_path: Path on the local machine where we are copying from @param remote_path: Path on the remote machine where we are copying to @param address: Address of remote host(guest) @param log_filename: If specified, log all output to this file (SCP only) @param verbose: If True, log some stats using logging.debug (RSS only) @param timeout: The time duration (in seconds) to wait for the transfer to complete. @raise: Whatever remote_scp() raises """ if client == "scp": scp_to_remote(address, port, username, password, local_path, remote_path, log_filename, timeout) elif client == "rss": log_func = None if verbose: log_func = logging.debug c = rss_client.FileUploadClient(address, port, log_func) c.upload(local_path, remote_path, timeout) c.close() def copy_files_from(address, client, username, password, port, remote_path, local_path, log_filename=None, verbose=False, timeout=600): """ Copy files from a remote host (guest) using the selected client. @param client: Type of transfer client @param username: Username (if required) @param password: Password (if requried) @param remote_path: Path on the remote machine where we are copying from @param local_path: Path on the local machine where we are copying to @param address: Address of remote host(guest) @param log_filename: If specified, log all output to this file (SCP only) @param verbose: If True, log some stats using logging.debug (RSS only) @param timeout: The time duration (in seconds) to wait for the transfer to complete. @raise: Whatever remote_scp() raises """ if client == "scp": scp_from_remote(address, port, username, password, remote_path, local_path, log_filename, timeout) elif client == "rss": log_func = None if verbose: log_func = logging.debug c = rss_client.FileDownloadClient(address, port, log_func) c.download(remote_path, local_path, timeout) c.close() # The following are utility functions related to ports. def is_port_free(port, address): """ Return True if the given port is available for use. @param port: Port number """ try: s = socket.socket() #s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if address == "localhost": s.bind(("localhost", port)) free = True else: s.connect((address, port)) free = False except socket.error: if address == "localhost": free = False else: free = True s.close() return free def find_free_port(start_port, end_port, address="localhost"): """ Return a host free port in the range [start_port, end_port]. @param start_port: First port that will be checked. @param end_port: Port immediately after the last one that will be checked. """ for i in range(start_port, end_port): if is_port_free(i, address): return i return None def find_free_ports(start_port, end_port, count, address="localhost"): """ Return count of host free ports in the range [start_port, end_port]. @count: Initial number of ports known to be free in the range. @param start_port: First port that will be checked. @param end_port: Port immediately after the last one that will be checked. """ ports = [] i = start_port while i < end_port and count > 0: if is_port_free(i, address): ports.append(i) count -= 1 i += 1 return ports # An easy way to log lines to files when the logging system can't be used _open_log_files = {} _log_file_dir = "/tmp" def log_line(filename, line): """ Write a line to a file. '\n' is appended to the line. @param filename: Path of file to write to, either absolute or relative to the dir set by set_log_file_dir(). @param line: Line to write. """ global _open_log_files, _log_file_dir if filename not in _open_log_files: path = get_path(_log_file_dir, filename) try: os.makedirs(os.path.dirname(path)) except OSError: pass _open_log_files[filename] = open(path, "w") timestr = time.strftime("%Y-%m-%d %H:%M:%S") _open_log_files[filename].write("%s: %s\n" % (timestr, line)) _open_log_files[filename].flush() def set_log_file_dir(dir): """ Set the base directory for log files created by log_line(). @param dir: Directory for log files. """ global _log_file_dir _log_file_dir = dir # The following are miscellaneous utility functions. def get_path(base_path, user_path): """ Translate a user specified path to a real path. If user_path is relative, append it to base_path. If user_path is absolute, return it as is. @param base_path: The base path of relative user specified paths. @param user_path: The user specified path. """ if os.path.isabs(user_path): return user_path else: return os.path.join(base_path, user_path) def generate_random_string(length): """ Return a random string using alphanumeric characters. @length: length of the string that will be generated. """ r = random.SystemRandom() str = "" chars = string.letters + string.digits while length > 0: str += r.choice(chars) length -= 1 return str def generate_random_id(): """ Return a random string suitable for use as a qemu id. """ return "id" + generate_random_string(6) def generate_tmp_file_name(file, ext=None, dir='/tmp/'): """ Returns a temporary file name. The file is not created. """ while True: file_name = (file + '-' + time.strftime("%Y%m%d-%H%M%S-") + generate_random_string(4)) if ext: file_name += '.' + ext file_name = os.path.join(dir, file_name) if not os.path.exists(file_name): break return file_name def format_str_for_message(str): """ Format str so that it can be appended to a message. If str consists of one line, prefix it with a space. If str consists of multiple lines, prefix it with a newline. @param str: string that will be formatted. """ lines = str.splitlines() num_lines = len(lines) str = "\n".join(lines) if num_lines == 0: return "" elif num_lines == 1: return " " + str else: return "\n" + str def wait_for(func, timeout, first=0.0, step=1.0, text=None): """ If func() evaluates to True before timeout expires, return the value of func(). Otherwise return None. @brief: Wait until func() evaluates to True. @param timeout: Timeout in seconds @param first: Time to sleep before first attempt @param steps: Time to sleep between attempts in seconds @param text: Text to print while waiting, for debug purposes """ start_time = time.time() end_time = time.time() + timeout time.sleep(first) while time.time() < end_time: if text: logging.debug("%s (%f secs)", text, (time.time() - start_time)) output = func() if output: return output time.sleep(step) return None def get_hash_from_file(hash_path, dvd_basename): """ Get the a hash from a given DVD image from a hash file (Hash files are usually named MD5SUM or SHA1SUM and are located inside the download directories of the DVDs) @param hash_path: Local path to a hash file. @param cd_image: Basename of a CD image """ hash_file = open(hash_path, 'r') for line in hash_file.readlines(): if dvd_basename in line: return line.split()[0] def run_tests(parser, job): """ Runs the sequence of KVM tests based on the list of dictionaries generated by the configuration system, handling dependencies. @param parser: Config parser object. @param job: Autotest job object. @return: True, if all tests ran passed, False if any of them failed. """ for i, d in enumerate(parser.get_dicts()): logging.info("Test %4d: %s" % (i + 1, d["shortname"])) status_dict = {} failed = False for dict in parser.get_dicts(): if dict.get("skip") == "yes": continue dependencies_satisfied = True for dep in dict.get("dep"): for test_name in status_dict.keys(): if not dep in test_name: continue # So the only really non-fatal state is WARN, # All the others make it not safe to proceed with dependency # execution if status_dict[test_name] not in ['GOOD', 'WARN']: dependencies_satisfied = False break test_iterations = int(dict.get("iterations", 1)) test_tag = dict.get("shortname") if dependencies_satisfied: # Setting up profilers during test execution. profilers = dict.get("profilers", "").split() for profiler in profilers: job.profilers.add(profiler) # We need only one execution, profiled, hence we're passing # the profile_only parameter to job.run_test(). profile_only = bool(profilers) or None current_status = job.run_test_detail(dict.get("vm_type"), params=dict, tag=test_tag, iterations=test_iterations, profile_only=profile_only) for profiler in profilers: job.profilers.delete(profiler) else: # We will force the test to fail as TestNA during preprocessing dict['dependency_failed'] = 'yes' current_status = job.run_test_detail(dict.get("vm_type"), params=dict, tag=test_tag, iterations=test_iterations) if not current_status: failed = True status_dict[dict.get("name")] = current_status return not failed def display_attributes(instance): """ Inspects a given class instance attributes and displays them, convenient for debugging. """ logging.debug("Attributes set:") for member in inspect.getmembers(instance): name, value = member attribute = getattr(instance, name) if not (name.startswith("__") or callable(attribute) or not value): logging.debug(" %s: %s", name, value) def get_full_pci_id(pci_id): """ Get full PCI ID of pci_id. @param pci_id: PCI ID of a device. """ cmd = "lspci -D | awk '/%s/ {print $1}'" % pci_id status, full_id = commands.getstatusoutput(cmd) if status != 0: return None return full_id def get_vendor_from_pci_id(pci_id): """ Check out the device vendor ID according to pci_id. @param pci_id: PCI ID of a device. """ cmd = "lspci -n | awk '/%s/ {print $3}'" % pci_id return re.sub(":", " ", commands.getoutput(cmd)) class Flag(str): """ Class for easy merge cpuflags. """ aliases = {} def __new__(cls, flag): if flag in Flag.aliases: flag = Flag.aliases[flag] return str.__new__(cls, flag) def __eq__(self, other): s = set(self.split("|")) o = set(other.split("|")) if s & o: return True else: return False def __hash__(self, *args, **kwargs): return 0 kvm_map_flags_to_test = { Flag('avx') :set(['avx']), Flag('sse3') :set(['sse3']), Flag('ssse3') :set(['ssse3']), Flag('sse4.1|sse4_1|sse4.2|sse4_2'):set(['sse4']), Flag('aes') :set(['aes','pclmul']), Flag('pclmuldq') :set(['pclmul']), Flag('pclmulqdq') :set(['pclmul']), Flag('rdrand') :set(['rdrand']), Flag('sse4a') :set(['sse4a']), Flag('fma4') :set(['fma4']), Flag('xop') :set(['xop']), } kvm_map_flags_aliases = { 'sse4.1' :'sse4_1', 'sse4.2' :'sse4_2', 'pclmulqdq' :'pclmuldq', } def kvm_flags_to_stresstests(flags): """ Covert [cpu flags] to [tests] @param cpuflags: list of cpuflags @return: Return tests like string. """ tests = set([]) for f in flags: tests |= kvm_map_flags_to_test[f] param = "" for f in tests: param += ","+f return param def get_cpu_flags(): """ Returns a list of the CPU flags """ flags_re = re.compile(r'^flags\s*:(.*)') for line in open('/proc/cpuinfo').readlines(): match = flags_re.match(line) if match: return match.groups()[0].split() return [] def get_cpu_vendor(cpu_flags=[], verbose=True): """ Returns the name of the CPU vendor, either intel, amd or unknown """ if not cpu_flags: cpu_flags = get_cpu_flags() if 'vmx' in cpu_flags: vendor = 'intel' elif 'svm' in cpu_flags: vendor = 'amd' else: vendor = 'unknown' if verbose: logging.debug("Detected CPU vendor as '%s'", vendor) return vendor def get_archive_tarball_name(source_dir, tarball_name, compression): ''' Get the name for a tarball file, based on source, name and compression ''' if tarball_name is None: tarball_name = os.path.basename(source_dir) if not tarball_name.endswith('.tar'): tarball_name = '%s.tar' % tarball_name if compression and not tarball_name.endswith('.%s' % compression): tarball_name = '%s.%s' % (tarball_name, compression) return tarball_name def archive_as_tarball(source_dir, dest_dir, tarball_name=None, compression='bz2', verbose=True): ''' Saves the given source directory to the given destination as a tarball If the name of the archive is omitted, it will be taken from the source_dir. If it is an absolute path, dest_dir will be ignored. But, if both the destination directory and tarball anem is given, and the latter is not an absolute path, they will be combined. For archiving directory '/tmp' in '/net/server/backup' as file 'tmp.tar.bz2', simply use: >>> virt_utils.archive_as_tarball('/tmp', '/net/server/backup') To save the file it with a different name, say 'host1-tmp.tar.bz2' and save it under '/net/server/backup', use: >>> virt_utils.archive_as_tarball('/tmp', '/net/server/backup', 'host1-tmp') To save with gzip compression instead (resulting in the file '/net/server/backup/host1-tmp.tar.gz'), use: >>> virt_utils.archive_as_tarball('/tmp', '/net/server/backup', 'host1-tmp', 'gz') ''' tarball_name = get_archive_tarball_name(source_dir, tarball_name, compression) if not os.path.isabs(tarball_name): tarball_path = os.path.join(dest_dir, tarball_name) else: tarball_path = tarball_name if verbose: logging.debug('Archiving %s as %s' % (source_dir, tarball_path)) os.chdir(os.path.dirname(source_dir)) tarball = tarfile.TarFile(name=tarball_path, mode='w') tarball = tarball.open(name=tarball_path, mode='w:%s' % compression) tarball.add(os.path.basename(source_dir)) tarball.close() class Thread(threading.Thread): """ Run a function in a background thread. """ def __init__(self, target, args=(), kwargs={}): """ Initialize the instance. @param target: Function to run in the thread. @param args: Arguments to pass to target. @param kwargs: Keyword arguments to pass to target. """ threading.Thread.__init__(self) self._target = target self._args = args self._kwargs = kwargs def run(self): """ Run target (passed to the constructor). No point in calling this function directly. Call start() to make this function run in a new thread. """ self._e = None self._retval = None try: try: self._retval = self._target(*self._args, **self._kwargs) except Exception: self._e = sys.exc_info() raise finally: # Avoid circular references (start() may be called only once so # it's OK to delete these) del self._target, self._args, self._kwargs def join(self, timeout=None, suppress_exception=False): """ Join the thread. If target raised an exception, re-raise it. Otherwise, return the value returned by target. @param timeout: Timeout value to pass to threading.Thread.join(). @param suppress_exception: If True, don't re-raise the exception. """ threading.Thread.join(self, timeout) try: if self._e: if not suppress_exception: # Because the exception was raised in another thread, we # need to explicitly insert the current context into it s = error.exception_context(self._e[1]) s = error.join_contexts(error.get_context(), s) error.set_exception_context(self._e[1], s) raise self._e[0], self._e[1], self._e[2] else: return self._retval finally: # Avoid circular references (join() may be called multiple times # so we can't delete these) self._e = None self._retval = None def parallel(targets): """ Run multiple functions in parallel. @param targets: A sequence of tuples or functions. If it's a sequence of tuples, each tuple will be interpreted as (target, args, kwargs) or (target, args) or (target,) depending on its length. If it's a sequence of functions, the functions will be called without arguments. @return: A list of the values returned by the functions called. """ threads = [] for target in targets: if isinstance(target, tuple) or isinstance(target, list): t = Thread(*target) else: t = Thread(target) threads.append(t) t.start() return [t.join() for t in threads] class VirtLoggingConfig(logging_config.LoggingConfig): """ Used with the sole purpose of providing convenient logging setup for the KVM test auxiliary programs. """ def configure_logging(self, results_dir=None, verbose=False): super(VirtLoggingConfig, self).configure_logging(use_console=True, verbose=verbose) class PciAssignable(object): """ Request PCI assignable devices on host. It will check whether to request PF (physical Functions) or VF (Virtual Functions). """ def __init__(self, type="vf", driver=None, driver_option=None, names=None, devices_requested=None): """ Initialize parameter 'type' which could be: vf: Virtual Functions pf: Physical Function (actual hardware) mixed: Both includes VFs and PFs If pass through Physical NIC cards, we need to specify which devices to be assigned, e.g. 'eth1 eth2'. If pass through Virtual Functions, we need to specify how many vfs are going to be assigned, e.g. passthrough_count = 8 and max_vfs in config file. @param type: PCI device type. @param driver: Kernel module for the PCI assignable device. @param driver_option: Module option to specify the maximum number of VFs (eg 'max_vfs=7') @param names: Physical NIC cards correspondent network interfaces, e.g.'eth1 eth2 ...' @param devices_requested: Number of devices being requested. """ self.type = type self.driver = driver self.driver_option = driver_option if names: self.name_list = names.split() if devices_requested: self.devices_requested = int(devices_requested) else: self.devices_requested = None def _get_pf_pci_id(self, name, search_str): """ Get the PF PCI ID according to name. @param name: Name of the PCI device. @param search_str: Search string to be used on lspci. """ cmd = "ethtool -i %s | awk '/bus-info/ {print $2}'" % name s, pci_id = commands.getstatusoutput(cmd) if not (s or "Cannot get driver information" in pci_id): return pci_id[5:] cmd = "lspci | awk '/%s/ {print $1}'" % search_str pci_ids = [id for id in commands.getoutput(cmd).splitlines()] nic_id = int(re.search('[0-9]+', name).group(0)) if (len(pci_ids) - 1) < nic_id: return None return pci_ids[nic_id] def _release_dev(self, pci_id): """ Release a single PCI device. @param pci_id: PCI ID of a given PCI device. """ base_dir = "/sys/bus/pci" full_id = get_full_pci_id(pci_id) vendor_id = get_vendor_from_pci_id(pci_id) drv_path = os.path.join(base_dir, "devices/%s/driver" % full_id) if 'pci-stub' in os.readlink(drv_path): cmd = "echo '%s' > %s/new_id" % (vendor_id, drv_path) if os.system(cmd): return False stub_path = os.path.join(base_dir, "drivers/pci-stub") cmd = "echo '%s' > %s/unbind" % (full_id, stub_path) if os.system(cmd): return False driver = self.dev_drivers[pci_id] cmd = "echo '%s' > %s/bind" % (full_id, driver) if os.system(cmd): return False return True def get_vf_devs(self): """ Catch all VFs PCI IDs. @return: List with all PCI IDs for the Virtual Functions avaliable """ if not self.sr_iov_setup(): return [] cmd = "lspci | awk '/Virtual Function/ {print $1}'" return commands.getoutput(cmd).split() def get_pf_devs(self): """ Catch all PFs PCI IDs. @return: List with all PCI IDs for the physical hardware requested """ pf_ids = [] for name in self.name_list: pf_id = self._get_pf_pci_id(name, "Ethernet") if not pf_id: continue pf_ids.append(pf_id) return pf_ids def get_devs(self, count): """ Check out all devices' PCI IDs according to their name. @param count: count number of PCI devices needed for pass through @return: a list of all devices' PCI IDs """ if self.type == "vf": vf_ids = self.get_vf_devs() elif self.type == "pf": vf_ids = self.get_pf_devs() elif self.type == "mixed": vf_ids = self.get_vf_devs() vf_ids.extend(self.get_pf_devs()) return vf_ids[0:count] def get_vfs_count(self): """ Get VFs count number according to lspci. """ # FIXME: Need to think out a method of identify which # 'virtual function' belongs to which physical card considering # that if the host has more than one 82576 card. PCI_ID? cmd = "lspci | grep 'Virtual Function' | wc -l" return int(commands.getoutput(cmd)) def check_vfs_count(self): """ Check VFs count number according to the parameter driver_options. """ # Network card 82576 has two network interfaces and each can be # virtualized up to 7 virtual functions, therefore we multiply # two for the value of driver_option 'max_vfs'. expected_count = int((re.findall("(\d)", self.driver_option)[0])) * 2 return (self.get_vfs_count == expected_count) def is_binded_to_stub(self, full_id): """ Verify whether the device with full_id is already binded to pci-stub. @param full_id: Full ID for the given PCI device """ base_dir = "/sys/bus/pci" stub_path = os.path.join(base_dir, "drivers/pci-stub") if os.path.exists(os.path.join(stub_path, full_id)): return True return False def sr_iov_setup(self): """ Ensure the PCI device is working in sr_iov mode. Check if the PCI hardware device drive is loaded with the appropriate, parameters (number of VFs), and if it's not, perform setup. @return: True, if the setup was completed successfuly, False otherwise. """ re_probe = False s, o = commands.getstatusoutput('lsmod | grep %s' % self.driver) if s: re_probe = True elif not self.check_vfs_count(): os.system("modprobe -r %s" % self.driver) re_probe = True else: return True # Re-probe driver with proper number of VFs if re_probe: cmd = "modprobe %s %s" % (self.driver, self.driver_option) logging.info("Loading the driver '%s' with option '%s'", self.driver, self.driver_option) s, o = commands.getstatusoutput(cmd) if s: return False return True def request_devs(self): """ Implement setup process: unbind the PCI device and then bind it to the pci-stub driver. @return: a list of successfully requested devices' PCI IDs. """ base_dir = "/sys/bus/pci" stub_path = os.path.join(base_dir, "drivers/pci-stub") self.pci_ids = self.get_devs(self.devices_requested) logging.debug("The following pci_ids were found: %s", self.pci_ids) requested_pci_ids = [] self.dev_drivers = {} # Setup all devices specified for assignment to guest for pci_id in self.pci_ids: full_id = get_full_pci_id(pci_id) if not full_id: continue drv_path = os.path.join(base_dir, "devices/%s/driver" % full_id) dev_prev_driver = os.path.realpath(os.path.join(drv_path, os.readlink(drv_path))) self.dev_drivers[pci_id] = dev_prev_driver # Judge whether the device driver has been binded to stub if not self.is_binded_to_stub(full_id): logging.debug("Binding device %s to stub", full_id) vendor_id = get_vendor_from_pci_id(pci_id) stub_new_id = os.path.join(stub_path, 'new_id') unbind_dev = os.path.join(drv_path, 'unbind') stub_bind = os.path.join(stub_path, 'bind') info_write_to_files = [(vendor_id, stub_new_id), (full_id, unbind_dev), (full_id, stub_bind)] for content, file in info_write_to_files: try: utils.open_write_close(file, content) except IOError: logging.debug("Failed to write %s to file %s", content, file) continue if not self.is_binded_to_stub(full_id): logging.error("Binding device %s to stub failed", pci_id) continue else: logging.debug("Device %s already binded to stub", pci_id) requested_pci_ids.append(pci_id) self.pci_ids = requested_pci_ids return self.pci_ids def release_devs(self): """ Release all PCI devices currently assigned to VMs back to the virtualization host. """ try: for pci_id in self.dev_drivers: if not self._release_dev(pci_id): logging.error("Failed to release device %s to host", pci_id) else: logging.info("Released device %s successfully", pci_id) except Exception: return class KojiClient(object): """ Stablishes a connection with the build system, either koji or brew. This class provides convenience methods to retrieve information on packages and the packages themselves hosted on the build system. Packages should be specified in the KojiPgkSpec syntax. """ CMD_LOOKUP_ORDER = ['/usr/bin/brew', '/usr/bin/koji' ] CONFIG_MAP = {'/usr/bin/brew': '/etc/brewkoji.conf', '/usr/bin/koji': '/etc/koji.conf'} def __init__(self, cmd=None): """ Verifies whether the system has koji or brew installed, then loads the configuration file that will be used to download the files. @type cmd: string @param cmd: Optional command name, either 'brew' or 'koji'. If not set, get_default_command() is used and to look for one of them. @raise: ValueError """ if not KOJI_INSTALLED: raise ValueError('No koji/brew installed on the machine') # Instance variables used by many methods self.command = None self.config = None self.config_options = {} self.session = None # Set koji command or get default if cmd is None: self.command = self.get_default_command() else: self.command = cmd # Check koji command if not self.is_command_valid(): raise ValueError('Koji command "%s" is not valid' % self.command) # Assuming command is valid, set configuration file and read it self.config = self.CONFIG_MAP[self.command] self.read_config() # Setup koji session server_url = self.config_options['server'] session_options = self.get_session_options() self.session = koji.ClientSession(server_url, session_options) def read_config(self, check_is_valid=True): ''' Reads options from the Koji configuration file By default it checks if the koji configuration is valid @type check_valid: boolean @param check_valid: whether to include a check on the configuration @raises: ValueError @returns: None ''' if check_is_valid: if not self.is_config_valid(): raise ValueError('Koji config "%s" is not valid' % self.config) config = ConfigParser.ConfigParser() config.read(self.config) basename = os.path.basename(self.command) for name, value in config.items(basename): self.config_options[name] = value def get_session_options(self): ''' Filter only options necessary for setting up a cobbler client session @returns: only the options used for session setup ''' session_options = {} for name, value in self.config_options.items(): if name in ('user', 'password', 'debug_xmlrpc', 'debug'): session_options[name] = value return session_options def is_command_valid(self): ''' Checks if the currently set koji command is valid @returns: True or False ''' koji_command_ok = True if not os.path.isfile(self.command): logging.error('Koji command "%s" is not a regular file', self.command) koji_command_ok = False if not os.access(self.command, os.X_OK): logging.warn('Koji command "%s" is not executable: this is ' 'not fatal but indicates an unexpected situation', self.command) if not self.command in self.CONFIG_MAP.keys(): logging.error('Koji command "%s" does not have a configuration ' 'file associated to it', self.command) koji_command_ok = False return koji_command_ok def is_config_valid(self): ''' Checks if the currently set koji configuration is valid @returns: True or False ''' koji_config_ok = True if not os.path.isfile(self.config): logging.error('Koji config "%s" is not a regular file', self.config) koji_config_ok = False if not os.access(self.config, os.R_OK): logging.error('Koji config "%s" is not readable', self.config) koji_config_ok = False config = ConfigParser.ConfigParser() config.read(self.config) basename = os.path.basename(self.command) if not config.has_section(basename): logging.error('Koji configuration file "%s" does not have a ' 'section "%s", named after the base name of the ' 'currently set koji command "%s"', self.config, basename, self.command) koji_config_ok = False return koji_config_ok def get_default_command(self): ''' Looks up for koji or brew "binaries" on the system Systems with plain koji usually don't have a brew cmd, while systems with koji, have *both* koji and brew utilities. So we look for brew first, and if found, we consider that the system is configured for brew. If not, we consider this is a system with plain koji. @returns: either koji or brew command line executable path, or None ''' koji_command = None for command in self.CMD_LOOKUP_ORDER: if os.path.isfile(command): koji_command = command break else: koji_command_basename = os.path.basename(command) try: koji_command = os_dep.command(koji_command_basename) break except ValueError: pass return koji_command def get_pkg_info(self, pkg): ''' Returns information from Koji on the package @type pkg: KojiPkgSpec @param pkg: information about the package, as a KojiPkgSpec instance @returns: information from Koji about the specified package ''' info = {} if pkg.build is not None: info = self.session.getBuild(int(pkg.build)) elif pkg.tag is not None and pkg.package is not None: builds = self.session.listTagged(pkg.tag, latest=True, inherit=True, package=pkg.package) if builds: info = builds[0] return info def is_pkg_valid(self, pkg): ''' Checks if this package is altogether valid on Koji This verifies if the build or tag specified in the package specification actually exist on the Koji server @returns: True or False ''' valid = True if pkg.build: if not self.is_pkg_spec_build_valid(pkg): valid = False elif pkg.tag: if not self.is_pkg_spec_tag_valid(pkg): valid = False else: valid = False return valid def is_pkg_spec_build_valid(self, pkg): ''' Checks if build is valid on Koji @param pkg: a Pkg instance ''' if pkg.build is not None: info = self.session.getBuild(int(pkg.build)) if info: return True return False def is_pkg_spec_tag_valid(self, pkg): ''' Checks if tag is valid on Koji @type pkg: KojiPkgSpec @param pkg: a package specification ''' if pkg.tag is not None: tag = self.session.getTag(pkg.tag) if tag: return True return False def get_pkg_rpm_info(self, pkg, arch=None): ''' Returns a list of infomation on the RPM packages found on koji @type pkg: KojiPkgSpec @param pkg: a package specification @type arch: string @param arch: packages built for this architecture, but also including architecture independent (noarch) packages ''' if arch is None: arch = utils.get_arch() rpms = [] info = self.get_pkg_info(pkg) if info: rpms = self.session.listRPMs(buildID=info['id'], arches=[arch, 'noarch']) if pkg.subpackages: rpms = [d for d in rpms if d['name'] in pkg.subpackages] return rpms def get_pkg_rpm_names(self, pkg, arch=None): ''' Gets the names for the RPM packages specified in pkg @type pkg: KojiPkgSpec @param pkg: a package specification @type arch: string @param arch: packages built for this architecture, but also including architecture independent (noarch) packages ''' if arch is None: arch = utils.get_arch() rpms = self.get_pkg_rpm_info(pkg, arch) return [rpm['name'] for rpm in rpms] def get_pkg_rpm_file_names(self, pkg, arch=None): ''' Gets the file names for the RPM packages specified in pkg @type pkg: KojiPkgSpec @param pkg: a package specification @type arch: string @param arch: packages built for this architecture, but also including architecture independent (noarch) packages ''' if arch is None: arch = utils.get_arch() rpm_names = [] rpms = self.get_pkg_rpm_info(pkg, arch) for rpm in rpms: arch_rpm_name = koji.pathinfo.rpm(rpm) rpm_name = os.path.basename(arch_rpm_name) rpm_names.append(rpm_name) return rpm_names def get_pkg_urls(self, pkg, arch=None): ''' Gets the urls for the packages specified in pkg @type pkg: KojiPkgSpec @param pkg: a package specification @type arch: string @param arch: packages built for this architecture, but also including architecture independent (noarch) packages ''' info = self.get_pkg_info(pkg) rpms = self.get_pkg_rpm_info(pkg, arch) rpm_urls = [] if self.config_options.has_key('pkgurl'): base_url = self.config_options['pkgurl'] else: base_url = "%s/%s" % (self.config_options['topurl'], 'packages') for rpm in rpms: rpm_name = koji.pathinfo.rpm(rpm) url = ("%s/%s/%s/%s/%s" % (base_url, info['package_name'], info['version'], info['release'], rpm_name)) rpm_urls.append(url) return rpm_urls def get_pkgs(self, pkg, dst_dir, arch=None): ''' Download the packages @type pkg: KojiPkgSpec @param pkg: a package specification @type dst_dir: string @param dst_dir: the destination directory, where the downloaded packages will be saved on @type arch: string @param arch: packages built for this architecture, but also including architecture independent (noarch) packages ''' rpm_urls = self.get_pkg_urls(pkg, arch) for url in rpm_urls: utils.get_file(url, os.path.join(dst_dir, os.path.basename(url))) DEFAULT_KOJI_TAG = None def set_default_koji_tag(tag): ''' Sets the default tag that will be used ''' global DEFAULT_KOJI_TAG DEFAULT_KOJI_TAG = tag def get_default_koji_tag(): return DEFAULT_KOJI_TAG class KojiPkgSpec(object): ''' A package specification syntax parser for Koji This holds information on either tag or build, and packages to be fetched from koji and possibly installed (features external do this class). New objects can be created either by providing information in the textual format or by using the actual parameters for tag, build, package and sub- packages. The textual format is useful for command line interfaces and configuration files, while using parameters is better for using this in a programatic fashion. The following sets of examples are interchangeable. Specifying all packages part of build number 1000: >>> from kvm_utils import KojiPkgSpec >>> pkg = KojiPkgSpec('1000') >>> pkg = KojiPkgSpec(build=1000) Specifying only a subset of packages of build number 1000: >>> pkg = KojiPkgSpec('1000:kernel,kernel-devel') >>> pkg = KojiPkgSpec(build=1000, subpackages=['kernel', 'kernel-devel']) Specifying the latest build for the 'kernel' package tagged with 'dist-f14': >>> pkg = KojiPkgSpec('dist-f14:kernel') >>> pkg = KojiPkgSpec(tag='dist-f14', package='kernel') Specifying the 'kernel' package using the default tag: >>> kvm_utils.set_default_koji_tag('dist-f14') >>> pkg = KojiPkgSpec('kernel') >>> pkg = KojiPkgSpec(package='kernel') Specifying the 'kernel' package using the default tag: >>> kvm_utils.set_default_koji_tag('dist-f14') >>> pkg = KojiPkgSpec('kernel') >>> pkg = KojiPkgSpec(package='kernel') If you do not specify a default tag, and give a package name without an explicit tag, your package specification is considered invalid: >>> print kvm_utils.get_default_koji_tag() None >>> print kvm_utils.KojiPkgSpec('kernel').is_valid() False >>> print kvm_utils.KojiPkgSpec(package='kernel').is_valid() False ''' SEP = ':' def __init__(self, text='', tag=None, build=None, package=None, subpackages=[]): ''' Instantiates a new KojiPkgSpec object @type text: string @param text: a textual representation of a package on Koji that will be parsed @type tag: string @param tag: a koji tag, example: Fedora-14-RELEASE (see U{http://fedoraproject.org/wiki/Koji#Tags_and_Targets}) @type build: number @param build: a koji build, example: 1001 (see U{http://fedoraproject.org/wiki/Koji#Koji_Architecture}) @type package: string @param package: a koji package, example: python (see U{http://fedoraproject.org/wiki/Koji#Koji_Architecture}) @type subpackages: list of strings @param subpackages: a list of package names, usually a subset of the RPM packages generated by a given build ''' # Set to None to indicate 'not set' (and be able to use 'is') self.tag = None self.build = None self.package = None self.subpackages = [] self.default_tag = None # Textual representation takes precedence (most common use case) if text: self.parse(text) else: self.tag = tag self.build = build self.package = package self.subpackages = subpackages # Set the default tag, if set, as a fallback if not self.build and not self.tag: default_tag = get_default_koji_tag() if default_tag is not None: self.tag = default_tag def parse(self, text): ''' Parses a textual representation of a package specification @type text: string @param text: textual representation of a package in koji ''' parts = text.count(self.SEP) + 1 if parts == 1: if text.isdigit(): self.build = text else: self.package = text elif parts == 2: part1, part2 = text.split(self.SEP) if part1.isdigit(): self.build = part1 self.subpackages = part2.split(',') else: self.tag = part1 self.package = part2 elif parts >= 3: # Instead of erroring on more arguments, we simply ignore them # This makes the parser suitable for future syntax additions, such # as specifying the package architecture part1, part2, part3 = text.split(self.SEP)[0:3] self.tag = part1 self.package = part2 self.subpackages = part3.split(',') def _is_invalid_neither_tag_or_build(self): ''' Checks if this package is invalid due to not having either a valid tag or build set, that is, both are empty. @returns: True if this is invalid and False if it's valid ''' return (self.tag is None and self.build is None) def _is_invalid_package_but_no_tag(self): ''' Checks if this package is invalid due to having a package name set but tag or build set, that is, both are empty. @returns: True if this is invalid and False if it's valid ''' return (self.package and not self.tag) def _is_invalid_subpackages_but_no_main_package(self): ''' Checks if this package is invalid due to having a tag set (this is Ok) but specifying subpackage names without specifying the main package name. Specifying subpackages without a main package name is only valid when a build is used instead of a tag. @returns: True if this is invalid and False if it's valid ''' return (self.tag and self.subpackages and not self.package) def is_valid(self): ''' Checks if this package specification is valid. Being valid means that it has enough and not conflicting information. It does not validate that the packages specified actually existe on the Koji server. @returns: True or False ''' if self._is_invalid_neither_tag_or_build(): return False elif self._is_invalid_package_but_no_tag(): return False elif self._is_invalid_subpackages_but_no_main_package(): return False return True def describe_invalid(self): ''' Describes why this is not valid, in a human friendly way ''' if self._is_invalid_neither_tag_or_build(): return 'neither a tag or build are set, and of them should be set' elif self._is_invalid_package_but_no_tag(): return 'package name specified but no tag is set' elif self._is_invalid_subpackages_but_no_main_package(): return 'subpackages specified but no main package is set' return 'unkwown reason, seems to be valid' def describe(self): ''' Describe this package specification, in a human friendly way @returns: package specification description ''' if self.is_valid(): description = '' if not self.subpackages: description += 'all subpackages from %s ' % self.package else: description += ('only subpackage(s) %s from package %s ' % (', '.join(self.subpackages), self.package)) if self.build: description += 'from build %s' % self.build elif self.tag: description += 'tagged with %s' % self.tag else: raise ValueError, 'neither build or tag is set' return description else: return ('Invalid package specification: %s' % self.describe_invalid()) def to_text(self): ''' Return the textual representation of this package spec The output should be consumable by parse() and produce the same package specification. We find that it's acceptable to put the currently set default tag as the package explicit tag in the textual definition for completeness. @returns: package specification in a textual representation ''' default_tag = get_default_koji_tag() if self.build: if self.subpackages: return "%s:%s" % (self.build, ",".join(self.subpackages)) else: return "%s" % self.build elif self.tag: if self.subpackages: return "%s:%s:%s" % (self.tag, self.package, ",".join(self.subpackages)) else: return "%s:%s" % (self.tag, self.package) elif default_tag is not None: # neither build or tag is set, try default_tag as a fallback if self.subpackages: return "%s:%s:%s" % (default_tag, self.package, ",".join(self.subpackages)) else: return "%s:%s" % (default_tag, self.package) else: raise ValueError, 'neither build or tag is set' def __repr__(self): return ("<KojiPkgSpec tag=%s build=%s pkg=%s subpkgs=%s>" % (self.tag, self.build, self.package, ", ".join(self.subpackages))) def umount(src, mount_point, type): """ Umount the src mounted in mount_point. @src: mount source @mount_point: mount point @type: file system type """ mount_string = "%s %s %s" % (src, mount_point, type) if mount_string in file("/etc/mtab").read(): umount_cmd = "umount %s" % mount_point try: utils.system(umount_cmd) return True except error.CmdError: return False else: logging.debug("%s is not mounted under %s", src, mount_point) return True def mount(src, mount_point, type, perm="rw"): """ Mount the src into mount_point of the host. @src: mount source @mount_point: mount point @type: file system type @perm: mount premission """ umount(src, mount_point, type) mount_string = "%s %s %s %s" % (src, mount_point, type, perm) if mount_string in file("/etc/mtab").read(): logging.debug("%s is already mounted in %s with %s", src, mount_point, perm) return True mount_cmd = "mount -t %s %s %s -o %s" % (type, src, mount_point, perm) try: utils.system(mount_cmd) except error.CmdError: return False logging.debug("Verify the mount through /etc/mtab") if mount_string in file("/etc/mtab").read(): logging.debug("%s is successfully mounted", src) return True else: logging.error("Can't find mounted NFS share - /etc/mtab contents \n%s", file("/etc/mtab").read()) return False class GitRepoParamHelper(git.GitRepoHelper): ''' Helps to deal with git repos specified in cartersian config files This class attempts to make it simple to manage a git repo, by using a naming standard that follows this basic syntax: <prefix>_name_<suffix> <prefix> is always 'git_repo' and <suffix> sets options for this git repo. Example for repo named foo: git_repo_foo_uri = git://git.foo.org/foo.git git_repo_foo_base_uri = /home/user/code/foo git_repo_foo_branch = master git_repo_foo_lbranch = master git_repo_foo_commit = bb5fb8e678aabe286e74c4f2993dc2a9e550b627 ''' def __init__(self, params, name, destination_dir): ''' Instantiates a new GitRepoParamHelper ''' self.params = params self.name = name self.destination_dir = destination_dir self._parse_params() def _parse_params(self): ''' Parses the params items for entries related to this repo This method currently does everything that the parent class __init__() method does, that is, sets all instance variables needed by other methods. That means it's not strictly necessary to call parent's __init__(). ''' config_prefix = 'git_repo_%s' % self.name logging.debug('Parsing parameters for git repo %s, configuration ' 'prefix is %s' % (self.name, config_prefix)) self.base_uri = self.params.get('%s_base_uri' % config_prefix) if self.base_uri is None: logging.debug('Git repo %s base uri is not set' % self.name) else: logging.debug('Git repo %s base uri: %s' % (self.name, self.base_uri)) self.uri = self.params.get('%s_uri' % config_prefix) logging.debug('Git repo %s uri: %s' % (self.name, self.uri)) self.branch = self.params.get('%s_branch' % config_prefix, 'master') logging.debug('Git repo %s branch: %s' % (self.name, self.branch)) self.lbranch = self.params.get('%s_lbranch' % config_prefix) if self.lbranch is None: self.lbranch = self.branch logging.debug('Git repo %s lbranch: %s' % (self.name, self.lbranch)) self.commit = self.params.get('%s_commit' % config_prefix) if self.commit is None: logging.debug('Git repo %s commit is not set' % self.name) else: logging.debug('Git repo %s commit: %s' % (self.name, self.commit)) self.cmd = os_dep.command('git') class LocalSourceDirHelper(object): ''' Helper class to deal with source code sitting somewhere in the filesystem ''' def __init__(self, source_dir, destination_dir): ''' @param source_dir: @param destination_dir: @return: new LocalSourceDirHelper instance ''' self.source = source_dir self.destination = destination_dir def execute(self): ''' Copies the source directory to the destination directory ''' if os.path.isdir(self.destination): shutil.rmtree(self.destination) if os.path.isdir(self.source): shutil.copytree(self.source, self.destination) class LocalSourceDirParamHelper(LocalSourceDirHelper): ''' Helps to deal with source dirs specified in cartersian config files This class attempts to make it simple to manage a source dir, by using a naming standard that follows this basic syntax: <prefix>_name_<suffix> <prefix> is always 'local_src' and <suffix> sets options for this source dir. Example for source dir named foo: local_src_foo_path = /home/user/foo ''' def __init__(self, params, name, destination_dir): ''' Instantiate a new LocalSourceDirParamHelper ''' self.params = params self.name = name self.destination_dir = destination_dir self._parse_params() def _parse_params(self): ''' Parses the params items for entries related to source dir ''' config_prefix = 'local_src_%s' % self.name logging.debug('Parsing parameters for local source %s, configuration ' 'prefix is %s' % (self.name, config_prefix)) self.path = self.params.get('%s_path' % config_prefix) logging.debug('Local source directory %s path: %s' % (self.name, self.path)) self.source = self.path self.destination = self.destination_dir class LocalTarHelper(object): ''' Helper class to deal with source code in a local tarball ''' def __init__(self, source, destination_dir): self.source = source self.destination = destination_dir def extract(self): ''' Extracts the tarball into the destination directory ''' if os.path.isdir(self.destination): shutil.rmtree(self.destination) if os.path.isfile(self.source) and tarfile.is_tarfile(self.source): name = os.path.basename(self.destination) temp_dir = os.path.join(os.path.dirname(self.destination), '%s.tmp' % name) logging.debug('Temporary directory for extracting tarball is %s' % temp_dir) if not os.path.isdir(temp_dir): os.makedirs(temp_dir) tarball = tarfile.open(self.source) tarball.extractall(temp_dir) # # If there's a directory at the toplevel of the tarfile, assume # it's the root for the contents, usually source code # tarball_info = tarball.members[0] if tarball_info.isdir(): content_path = os.path.join(temp_dir, tarball_info.name) else: content_path = temp_dir # # Now move the content directory to the final destination # shutil.move(content_path, self.destination) else: raise OSError("%s is not a file or tar file" % self.source) def execute(self): ''' Executes all action this helper is suposed to perform This is the main entry point method for this class, and all other helper classes. ''' self.extract() class LocalTarParamHelper(LocalTarHelper): ''' Helps to deal with source tarballs specified in cartersian config files This class attempts to make it simple to manage a tarball with source code, by using a naming standard that follows this basic syntax: <prefix>_name_<suffix> <prefix> is always 'local_tar' and <suffix> sets options for this source tarball. Example for source tarball named foo: local_tar_foo_path = /tmp/foo-1.0.tar.gz ''' def __init__(self, params, name, destination_dir): ''' Instantiates a new LocalTarParamHelper ''' self.params = params self.name = name self.destination_dir = destination_dir self._parse_params() def _parse_params(self): ''' Parses the params items for entries related to this local tar helper ''' config_prefix = 'local_tar_%s' % self.name logging.debug('Parsing parameters for local tar %s, configuration ' 'prefix is %s' % (self.name, config_prefix)) self.path = self.params.get('%s_path' % config_prefix) logging.debug('Local source tar %s path: %s' % (self.name, self.path)) self.source = self.path self.destination = self.destination_dir class RemoteTarHelper(LocalTarHelper): ''' Helper that fetches a tarball and extracts it locally ''' def __init__(self, source_uri, destination_dir): self.source = source_uri self.destination = destination_dir def execute(self): ''' Executes all action this helper class is suposed to perform This is the main entry point method for this class, and all other helper classes. This implementation fetches the remote tar file and then extracts it using the functionality present in the parent class. ''' name = os.path.basename(self.source) base_dest = os.path.dirname(self.destination_dir) dest = os.path.join(base_dest, name) utils.get_file(self.source, dest) self.source = dest self.extract() class RemoteTarParamHelper(RemoteTarHelper): ''' Helps to deal with remote source tarballs specified in cartersian config This class attempts to make it simple to manage a tarball with source code, by using a naming standard that follows this basic syntax: <prefix>_name_<suffix> <prefix> is always 'local_tar' and <suffix> sets options for this source tarball. Example for source tarball named foo: remote_tar_foo_uri = http://foo.org/foo-1.0.tar.gz ''' def __init__(self, params, name, destination_dir): ''' Instantiates a new RemoteTarParamHelper instance ''' self.params = params self.name = name self.destination_dir = destination_dir self._parse_params() def _parse_params(self): ''' Parses the params items for entries related to this remote tar helper ''' config_prefix = 'remote_tar_%s' % self.name logging.debug('Parsing parameters for remote tar %s, configuration ' 'prefix is %s' % (self.name, config_prefix)) self.uri = self.params.get('%s_uri' % config_prefix) logging.debug('Remote source tar %s uri: %s' % (self.name, self.uri)) self.source = self.uri self.destination = self.destination_dir class PatchHelper(object): ''' Helper that encapsulates the patching of source code with patch files ''' def __init__(self, source_dir, patches): ''' Initializes a new PatchHelper ''' self.source_dir = source_dir self.patches = patches def download(self): ''' Copies patch files from remote locations to the source directory ''' for patch in self.patches: utils.get_file(patch, os.path.join(self.source_dir, os.path.basename(patch))) def patch(self): ''' Patches the source dir with all patch files ''' os.chdir(self.source_dir) for patch in self.patches: patch_file = os.path.join(self.source_dir, os.path.basename(patch)) utils.system('patch -p1 < %s' % os.path.basename(patch)) def execute(self): ''' Performs all steps necessary to download patches and apply them ''' self.download() self.patch() class PatchParamHelper(PatchHelper): ''' Helps to deal with patches specified in cartersian config files This class attempts to make it simple to patch source coude, by using a naming standard that follows this basic syntax: [<git_repo>|<local_src>|<local_tar>|<remote_tar>]_<name>_patches <prefix> is either a 'local_src' or 'git_repo', that, together with <name> specify a directory containing source code to receive the patches. That is, for source code coming from git repo foo, patches would be specified as: git_repo_foo_patches = ['http://foo/bar.patch', 'http://foo/baz.patch'] And for for patches to be applied on local source code named also foo: local_src_foo_patches = ['http://foo/bar.patch', 'http://foo/baz.patch'] ''' def __init__(self, params, prefix, source_dir): ''' Initializes a new PatchParamHelper instance ''' self.params = params self.prefix = prefix self.source_dir = source_dir self._parse_params() def _parse_params(self): ''' Parses the params items for entries related to this set of patches This method currently does everything that the parent class __init__() method does, that is, sets all instance variables needed by other methods. That means it's not strictly necessary to call parent's __init__(). ''' logging.debug('Parsing patch parameters for prefix %s' % self.prefix) patches_param_key = '%s_patches' % self.prefix self.patches_str = self.params.get(patches_param_key, '[]') logging.debug('Patches config for prefix %s: %s' % (self.prefix, self.patches_str)) self.patches = eval(self.patches_str) logging.debug('Patches for prefix %s: %s' % (self.prefix, ", ".join(self.patches))) class GnuSourceBuildInvalidSource(Exception): ''' Exception raised when build source dir/file is not valid ''' pass class SourceBuildFailed(Exception): ''' Exception raised when building with parallel jobs fails This serves as feedback for code using *BuildHelper ''' pass class SourceBuildParallelFailed(Exception): ''' Exception raised when building with parallel jobs fails This serves as feedback for code using *BuildHelper ''' pass class GnuSourceBuildHelper(object): ''' Handles software installation of GNU-like source code This basically means that the build will go though the classic GNU autotools steps: ./configure, make, make install ''' def __init__(self, source, build_dir, prefix, configure_options=[]): ''' @type source: string @param source: source directory or tarball @type prefix: string @param prefix: installation prefix @type build_dir: string @param build_dir: temporary directory used for building the source code @type configure_options: list @param configure_options: options to pass to configure @throws: GnuSourceBuildInvalidSource ''' self.source = source self.build_dir = build_dir self.prefix = prefix self.configure_options = configure_options self.install_debug_info = True self.include_pkg_config_path() def include_pkg_config_path(self): ''' Adds the current prefix to the list of paths that pkg-config searches This is currently not optional as there is no observed adverse side effects of enabling this. As the "prefix" is usually only valid during a test run, we believe that having other pkg-config files (*.pc) in either '<prefix>/share/pkgconfig' or '<prefix>/lib/pkgconfig' is exactly for the purpose of using them. @returns: None ''' env_var = 'PKG_CONFIG_PATH' include_paths = [os.path.join(self.prefix, 'share', 'pkgconfig'), os.path.join(self.prefix, 'lib', 'pkgconfig')] if os.environ.has_key(env_var): paths = os.environ[env_var].split(':') for include_path in include_paths: if include_path not in paths: paths.append(include_path) os.environ[env_var] = ':'.join(paths) else: os.environ[env_var] = ':'.join(include_paths) logging.debug('PKG_CONFIG_PATH is: %s' % os.environ['PKG_CONFIG_PATH']) def get_configure_path(self): ''' Checks if 'configure' exists, if not, return 'autogen.sh' as a fallback ''' configure_path = os.path.abspath(os.path.join(self.source, "configure")) autogen_path = os.path.abspath(os.path.join(self.source, "autogen.sh")) if os.path.exists(configure_path): return configure_path elif os.path.exists(autogen_path): return autogen_path else: raise GnuSourceBuildInvalidSource('configure script does not exist') def get_available_configure_options(self): ''' Return the list of available options of a GNU like configure script This will run the "configure" script at the source directory @returns: list of options accepted by configure script ''' help_raw = utils.system_output('%s --help' % self.get_configure_path(), ignore_status=True) help_output = help_raw.split("\n") option_list = [] for line in help_output: cleaned_line = line.lstrip() if cleaned_line.startswith("--"): option = cleaned_line.split()[0] option = option.split("=")[0] option_list.append(option) return option_list def enable_debug_symbols(self): ''' Enables option that leaves debug symbols on compiled software This makes debugging a lot easier. ''' enable_debug_option = "--disable-strip" if enable_debug_option in self.get_available_configure_options(): self.configure_options.append(enable_debug_option) logging.debug('Enabling debug symbols with option: %s' % enable_debug_option) def get_configure_command(self): ''' Formats configure script with all options set @returns: string with all configure options, including prefix ''' prefix_option = "--prefix=%s" % self.prefix options = self.configure_options options.append(prefix_option) return "%s %s" % (self.get_configure_path(), " ".join(options)) def configure(self): ''' Runs the "configure" script passing apropriate command line options ''' configure_command = self.get_configure_command() logging.info('Running configure on build dir') os.chdir(self.build_dir) utils.system(configure_command) def make_parallel(self): ''' Runs "make" using the correct number of parallel jobs ''' parallel_make_jobs = utils.count_cpus() make_command = "make -j %s" % parallel_make_jobs logging.info("Running parallel make on build dir") os.chdir(self.build_dir) utils.system(make_command) def make_non_parallel(self): ''' Runs "make", using a single job ''' os.chdir(self.build_dir) utils.system("make") def make_clean(self): ''' Runs "make clean" ''' os.chdir(self.build_dir) utils.system("make clean") def make(self, failure_feedback=True): ''' Runs a parallel make, falling back to a single job in failure @param failure_feedback: return information on build failure by raising the appropriate exceptions @raise: SourceBuildParallelFailed if parallel build fails, or SourceBuildFailed if single job build fails ''' try: self.make_parallel() except error.CmdError: try: self.make_clean() self.make_non_parallel() except error.CmdError: if failure_feedback: raise SourceBuildFailed if failure_feedback: raise SourceBuildParallelFailed def make_install(self): ''' Runs "make install" ''' os.chdir(self.build_dir) utils.system("make install") install = make_install def execute(self): ''' Runs appropriate steps for *building* this source code tree ''' if self.install_debug_info: self.enable_debug_symbols() self.configure() self.make() class LinuxKernelBuildHelper(object): ''' Handles Building Linux Kernel. ''' def __init__(self, params, prefix, source): ''' @type params: dict @param params: dictionary containing the test parameters @type source: string @param source: source directory or tarball @type prefix: string @param prefix: installation prefix ''' self.params = params self.prefix = prefix self.source = source self._parse_params() def _parse_params(self): ''' Parses the params items for entries related to guest kernel ''' configure_opt_key = '%s_config' % self.prefix self.config = self.params.get(configure_opt_key, '') build_image_key = '%s_build_image' % self.prefix self.build_image = self.params.get(build_image_key, 'arch/x86/boot/bzImage') build_target_key = '%s_build_target' % self.prefix self.build_target = self.params.get(build_target_key, 'bzImage') kernel_path_key = '%s_kernel_path' % self.prefix default_kernel_path = os.path.join('/tmp/kvm_autotest_root/images', self.build_target) self.kernel_path = self.params.get(kernel_path_key, default_kernel_path) logging.info('Parsing Linux kernel build parameters for %s', self.prefix) def make_guest_kernel(self): ''' Runs "make", using a single job ''' os.chdir(self.source) logging.info("Building guest kernel") logging.debug("Kernel config is %s" % self.config) utils.get_file(self.config, '.config') # FIXME currently no support for builddir # run old config utils.system('yes "" | make oldconfig > /dev/null') parallel_make_jobs = utils.count_cpus() make_command = "make -j %s %s" % (parallel_make_jobs, self.build_target) logging.info("Running parallel make on src dir") utils.system(make_command) def make_clean(self): ''' Runs "make clean" ''' os.chdir(self.source) utils.system("make clean") def make(self, failure_feedback=True): ''' Runs a parallel make @param failure_feedback: return information on build failure by raising the appropriate exceptions @raise: SourceBuildParallelFailed if parallel build fails, or ''' try: self.make_clean() self.make_guest_kernel() except error.CmdError: if failure_feedback: raise SourceBuildParallelFailed def cp_linux_kernel(self): ''' Copying Linux kernel to target path ''' os.chdir(self.source) utils.force_copy(self.build_image, self.kernel_path) install = cp_linux_kernel def execute(self): ''' Runs appropriate steps for *building* this source code tree ''' self.make() class GnuSourceBuildParamHelper(GnuSourceBuildHelper): ''' Helps to deal with gnu_autotools build helper in cartersian config files This class attempts to make it simple to build source coude, by using a naming standard that follows this basic syntax: [<git_repo>|<local_src>]_<name>_<option> = value To pass extra options to the configure script, while building foo from a git repo, set the following variable: git_repo_foo_configure_options = --enable-feature ''' def __init__(self, params, name, destination_dir, install_prefix): ''' Instantiates a new GnuSourceBuildParamHelper ''' self.params = params self.name = name self.destination_dir = destination_dir self.install_prefix = install_prefix self._parse_params() def _parse_params(self): ''' Parses the params items for entries related to source directory This method currently does everything that the parent class __init__() method does, that is, sets all instance variables needed by other methods. That means it's not strictly necessary to call parent's __init__(). ''' logging.debug('Parsing gnu_autotools build parameters for %s' % self.name) configure_opt_key = '%s_configure_options' % self.name configure_options = self.params.get(configure_opt_key, '').split() logging.debug('Configure options for %s: %s' % (self.name, configure_options)) self.source = self.destination_dir self.build_dir = self.destination_dir self.prefix = self.install_prefix self.configure_options = configure_options self.include_pkg_config_path() # Support the install_debug_info feature, that automatically # adds/keeps debug information on generated libraries/binaries install_debug_info_cfg = self.params.get("install_debug_info", "yes") self.install_debug_info = install_debug_info_cfg != "no" def install_host_kernel(job, params): """ Install a host kernel, given the appropriate params. @param job: Job object. @param params: Dict with host kernel install params. """ install_type = params.get('host_kernel_install_type') if install_type == 'rpm': logging.info('Installing host kernel through rpm') rpm_url = params.get('host_kernel_rpm_url') dst = os.path.join("/tmp", os.path.basename(rpm_url)) k = utils.get_file(rpm_url, dst) host_kernel = job.kernel(k) host_kernel.install(install_vmlinux=False) host_kernel.boot() elif install_type in ['koji', 'brew']: logging.info('Installing host kernel through koji/brew') koji_cmd = params.get('host_kernel_koji_cmd') koji_build = params.get('host_kernel_koji_build') koji_tag = params.get('host_kernel_koji_tag') k_deps = KojiPkgSpec(tag=koji_tag, package='kernel', subpackages=['kernel-devel', 'kernel-firmware']) k = KojiPkgSpec(tag=koji_tag, package='kernel', subpackages=['kernel']) c = KojiClient(koji_cmd) logging.info('Fetching kernel dependencies (-devel, -firmware)') c.get_pkgs(k_deps, job.tmpdir) logging.info('Installing kernel dependencies (-devel, -firmware) ' 'through %s', install_type) k_deps_rpm_file_names = [os.path.join(job.tmpdir, rpm_file_name) for rpm_file_name in c.get_pkg_rpm_file_names(k_deps)] utils.run('rpm -U --force %s' % " ".join(k_deps_rpm_file_names)) c.get_pkgs(k, job.tmpdir) k_rpm = os.path.join(job.tmpdir, c.get_pkg_rpm_file_names(k)[0]) host_kernel = job.kernel(k_rpm) host_kernel.install(install_vmlinux=False) host_kernel.boot() elif install_type == 'git': logging.info('Chose to install host kernel through git, proceeding') repo = params.get('host_kernel_git_repo') repo_base = params.get('host_kernel_git_repo_base', None) branch = params.get('host_kernel_git_branch') commit = params.get('host_kernel_git_commit') patch_list = params.get('host_kernel_patch_list') if patch_list: patch_list = patch_list.split() kernel_config = params.get('host_kernel_config') repodir = os.path.join("/tmp", 'kernel_src') r = git.get_repo(uri=repo, branch=branch, destination_dir=repodir, commit=commit, base_uri=repo_base) host_kernel = job.kernel(r) if patch_list: host_kernel.patch(patch_list) host_kernel.config(kernel_config) host_kernel.build() host_kernel.install() host_kernel.boot() else: logging.info('Chose %s, using the current kernel for the host', install_type) def if_nametoindex(ifname): """ Map an interface name into its corresponding index. Returns 0 on error, as 0 is not a valid index @param ifname: interface name """ index = 0 ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) ifr = struct.pack("16si", ifname, 0) r = fcntl.ioctl(ctrl_sock, SIOCGIFINDEX, ifr) index = struct.unpack("16si", r)[1] ctrl_sock.close() return index def vnet_hdr_probe(tapfd): """ Check if the IFF_VNET_HDR is support by tun. @param tapfd: the file descriptor of /dev/net/tun """ u = struct.pack("I", 0) try: r = fcntl.ioctl(tapfd, TUNGETFEATURES, u) except OverflowError: return False flags = struct.unpack("I", r)[0] if flags & IFF_VNET_HDR: return True else: return False def open_tap(devname, ifname, vnet_hdr=True): """ Open a tap device and returns its file descriptor which is used by fd=<fd> parameter of qemu-kvm. @param ifname: TAP interface name @param vnet_hdr: Whether enable the vnet header """ try: tapfd = os.open(devname, os.O_RDWR) except OSError, e: raise TAPModuleError(devname, "open", e) flags = IFF_TAP | IFF_NO_PI if vnet_hdr and vnet_hdr_probe(tapfd): flags |= IFF_VNET_HDR ifr = struct.pack("16sh", ifname, flags) try: r = fcntl.ioctl(tapfd, TUNSETIFF, ifr) except IOError, details: raise TAPCreationError(ifname, details) ifname = struct.unpack("16sh", r)[0].strip("\x00") return tapfd def add_to_bridge(ifname, brname): """ Add a TAP device to bridge @param ifname: Name of TAP device @param brname: Name of the bridge """ ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) index = if_nametoindex(ifname) if index == 0: raise TAPNotExistError(ifname) ifr = struct.pack("16si", brname, index) try: r = fcntl.ioctl(ctrl_sock, SIOCBRADDIF, ifr) except IOError, details: raise BRAddIfError(ifname, brname, details) ctrl_sock.close() def bring_up_ifname(ifname): """ Bring up an interface @param ifname: Name of the interface """ ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) ifr = struct.pack("16si", ifname, IFF_UP) try: fcntl.ioctl(ctrl_sock, SIOCSIFFLAGS, ifr) except IOError: raise TAPBringUpError(ifname) ctrl_sock.close() def if_set_macaddress(ifname, mac): """ Set the mac address for an interface @param ifname: Name of the interface @mac: Mac address """ ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) ifr = struct.pack("256s", ifname) try: mac_dev = fcntl.ioctl(ctrl_sock, SIOCGIFHWADDR, ifr)[18:24] mac_dev = ":".join(["%02x" % ord(m) for m in mac_dev]) except IOError, e: raise HwAddrGetError(ifname) if mac_dev.lower() == mac.lower(): return ifr = struct.pack("16sH14s", ifname, 1, "".join([chr(int(m, 16)) for m in mac.split(":")])) try: fcntl.ioctl(ctrl_sock, SIOCSIFHWADDR, ifr) except IOError, e: logging.info(e) raise HwAddrSetError(ifname, mac) ctrl_sock.close() def check_iso(url, destination, iso_sha1): """ Verifies if ISO that can be find on url is on destination with right hash. This function will verify the SHA1 hash of the ISO image. If the file turns out to be missing or corrupted, let the user know we can download it. @param url: URL where the ISO file can be found. @param destination: Directory in local disk where we'd like the iso to be. @param iso_sha1: SHA1 hash for the ISO image. """ file_ok = False if not os.path.isdir(destination): os.makedirs(destination) iso_path = os.path.join(destination, os.path.basename(url)) if not os.path.isfile(iso_path): logging.warning("File %s not found", iso_path) logging.warning("Expected SHA1 sum: %s", iso_sha1) answer = utils.ask("Would you like to download it from %s?" % url) if answer == 'y': utils.interactive_download(url, iso_path, 'ISO Download') else: logging.warning("Missing file %s", iso_path) logging.warning("Please download it or put an existing copy on the " "appropriate location") return else: logging.info("Found %s", iso_path) logging.info("Expected SHA1 sum: %s", iso_sha1) answer = utils.ask("Would you like to check %s? It might take a while" % iso_path) if answer == 'y': actual_iso_sha1 = utils.hash_file(iso_path, method='sha1') if actual_iso_sha1 != iso_sha1: logging.error("Actual SHA1 sum: %s", actual_iso_sha1) else: logging.info("SHA1 sum check OK") else: logging.info("File %s present, but chose to not verify it", iso_path) return if file_ok: logging.info("%s present, with proper checksum", iso_path) def virt_test_assistant(test_name, test_dir, base_dir, default_userspace_paths, check_modules, online_docs_url): """ Common virt test assistant module. @param test_name: Test name, such as "kvm". @param test_dir: Path with the test directory. @param base_dir: Base directory used to hold images and isos. @param default_userspace_paths: Important programs for a successful test execution. @param check_modules: Whether we want to verify if a given list of modules is loaded in the system. @param online_docs_url: URL to an online documentation system, such as an wiki page. """ logging_manager.configure_logging(VirtLoggingConfig(), verbose=True) logging.info("%s test config helper", test_name) step = 0 common_dir = os.path.dirname(sys.modules[__name__].__file__) logging.info("") step += 1 logging.info("%d - Verifying directories (check if the directory structure " "expected by the default test config is there)", step) sub_dir_list = ["images", "isos", "steps_data"] for sub_dir in sub_dir_list: sub_dir_path = os.path.join(base_dir, sub_dir) if not os.path.isdir(sub_dir_path): logging.debug("Creating %s", sub_dir_path) os.makedirs(sub_dir_path) else: logging.debug("Dir %s exists, not creating" % sub_dir_path) logging.info("") step += 1 logging.info("%d - Creating config files from samples (copy the default " "config samples to actual config files)", step) config_file_list = glob.glob(os.path.join(test_dir, "*.cfg.sample")) config_file_list += glob.glob(os.path.join(common_dir, "*.cfg.sample")) for config_file in config_file_list: src_file = config_file dst_file = os.path.join(test_dir, os.path.basename(config_file)) dst_file = dst_file.rstrip(".sample") if not os.path.isfile(dst_file): logging.debug("Creating config file %s from sample", dst_file) shutil.copyfile(src_file, dst_file) else: logging.debug("Config file %s exists, not touching" % dst_file) logging.info("") step += 1 logging.info("%s - Verifying iso (make sure we have the OS ISO needed for " "the default test set)", step) iso_name = "Fedora-16-x86_64-DVD.iso" fedora_dir = "pub/fedora/linux/releases/16/Fedora/x86_64/iso" url = os.path.join("http://download.fedoraproject.org/", fedora_dir, iso_name) iso_sha1 = "76dd59c37e9a0ec2af56263fa892ff571c92c89a" destination = os.path.join(base_dir, 'isos', 'linux') check_iso(url, destination, iso_sha1) logging.info("") step += 1 logging.info("%d - Verifying winutils.iso (make sure we have the utility " "ISO needed for Windows testing)", step) logging.info("In order to run the KVM autotests in Windows guests, we " "provide you an ISO that this script can download") url = "http://people.redhat.com/mrodrigu/kvm/winutils.iso" iso_sha1 = "02930224756510e383c44c49bffb760e35d6f892" destination = os.path.join(base_dir, 'isos', 'windows') path = os.path.join(destination, iso_name) check_iso(url, destination, iso_sha1) logging.info("") step += 1 logging.info("%d - Checking if the appropriate userspace programs are " "installed", step) for path in default_userspace_paths: if not os.path.isfile(path): logging.warning("No %s found. You might need to install %s.", path, os.path.basename(path)) else: logging.debug("%s present", path) logging.info("If you wish to change any userspace program path, " "you will have to modify tests.cfg") if check_modules: logging.info("") step += 1 logging.info("%d - Checking for modules %s", step, ",".join(check_modules)) for module in check_modules: if not utils.module_is_loaded(module): logging.warning("Module %s is not loaded. You might want to " "load it", module) else: logging.debug("Module %s loaded", module) if online_docs_url: logging.info("") step += 1 logging.info("%d - Verify needed packages to get started", step) logging.info("Please take a look at the online documentation: %s", online_docs_url) client_dir = os.path.abspath(os.path.join(test_dir, "..", "..")) autotest_bin = os.path.join(client_dir, 'bin', 'autotest') control_file = os.path.join(test_dir, 'control') logging.info("") logging.info("When you are done fixing eventual warnings found, " "you can run the test using this command line AS ROOT:") logging.info("%s %s", autotest_bin, control_file) logging.info("Autotest prints the results dir, so you can look at DEBUG " "logs if something went wrong") logging.info("You can also edit the test config files") class NumaNode(object): """ Numa node to control processes and shared memory. """ def __init__(self, i=-1): self.num = self.get_node_num() if i < 0: self.cpus = self.get_node_cpus(int(self.num) + i).split() else: self.cpus = self.get_node_cpus(i - 1).split() self.dict = {} for i in self.cpus: self.dict[i] = "free" def get_node_num(self): """ Get the number of nodes of current host. """ cmd = utils.run("numactl --hardware") return re.findall("available: (\d+) nodes", cmd.stdout)[0] def get_node_cpus(self, i): """ Get cpus of a specific node @param i: Index of the CPU inside the node. """ cmd = utils.run("numactl --hardware") return re.findall("node %s cpus: (.*)" % i, cmd.stdout)[0] def free_cpu(self, i): """ Release pin of one node. @param i: Index of the node. """ self.dict[i] = "free" def _flush_pin(self): """ Flush pin dict, remove the record of exited process. """ cmd = utils.run("ps -eLf | awk '{print $4}'") all_pids = cmd.stdout for i in self.cpus: if self.dict[i] != "free" and self.dict[i] not in all_pids: self.free_cpu(i) @error.context_aware def pin_cpu(self, process): """ Pin one process to a single cpu. @param process: Process ID. """ self._flush_pin() error.context("Pinning process %s to the CPU" % process) for i in self.cpus: if self.dict[i] == "free": self.dict[i] = str(process) cmd = "taskset -p %s %s" % (hex(2 ** int(i)), process) logging.debug("NumaNode (%s): " % i + cmd) utils.run(cmd) return i def show(self): """ Display the record dict in a convenient way. """ logging.info("Numa Node record dict:") for i in self.cpus: logging.info(" %s: %s" % (i, self.dict[i]))
codeparrot/github-code-clean
"""This file contains code for use with "Think Stats" and "Think Bayes", both by Allen B. Downey, available from greenteapress.com Copyright 2014 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function, division """This file contains class definitions for: Hist: represents a histogram (map from values to integer frequencies). Pmf: represents a probability mass function (map from values to probs). _DictWrapper: private parent class for Hist and Pmf. Cdf: represents a discrete cumulative distribution function Pdf: represents a continuous probability density function """ import bisect import copy import logging import math import random import re from collections import Counter from operator import itemgetter import thinkplot import numpy as np import pandas import scipy from scipy import stats from scipy import special from scipy import ndimage from io import open ROOT2 = math.sqrt(2) def RandomSeed(x): """Initialize the random and np.random generators. x: int seed """ random.seed(x) np.random.seed(x) def Odds(p): """Computes odds for a given probability. Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor. Note: when p=1, the formula for odds divides by zero, which is normally undefined. But I think it is reasonable to define Odds(1) to be infinity, so that's what this function does. p: float 0-1 Returns: float odds """ if p == 1: return float('inf') return p / (1 - p) def Probability(o): """Computes the probability corresponding to given odds. Example: o=2 means 2:1 odds in favor, or 2/3 probability o: float odds, strictly positive Returns: float probability """ return o / (o + 1) def Probability2(yes, no): """Computes the probability corresponding to given odds. Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability. yes, no: int or float odds in favor """ return yes / (yes + no) class Interpolator(object): """Represents a mapping between sorted sequences; performs linear interp. Attributes: xs: sorted list ys: sorted list """ def __init__(self, xs, ys): self.xs = xs self.ys = ys def Lookup(self, x): """Looks up x and returns the corresponding value of y.""" return self._Bisect(x, self.xs, self.ys) def Reverse(self, y): """Looks up y and returns the corresponding value of x.""" return self._Bisect(y, self.ys, self.xs) def _Bisect(self, x, xs, ys): """Helper function.""" if x <= xs[0]: return ys[0] if x >= xs[-1]: return ys[-1] i = bisect.bisect(xs, x) frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1]) y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1]) return y class _DictWrapper(object): """An object that contains a dictionary.""" def __init__(self, obj=None, label=None): """Initializes the distribution. obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs label: string label """ self.label = label if label is not None else '_nolegend_' self.d = {} # flag whether the distribution is under a log transform self.log = False if obj is None: return if isinstance(obj, (_DictWrapper, Cdf, Pdf)): self.label = label if label is not None else obj.label if isinstance(obj, dict): self.d.update(obj.items()) elif isinstance(obj, (_DictWrapper, Cdf, Pdf)): self.d.update(obj.Items()) elif isinstance(obj, pandas.Series): self.d.update(obj.value_counts().iteritems()) else: # finally, treat it like a list self.d.update(Counter(obj)) if len(self) > 0 and isinstance(self, Pmf): self.Normalize() def __hash__(self): return id(self) def __str__(self): cls = self.__class__.__name__ return '%s(%s)' % (cls, str(self.d)) __repr__ = __str__ def __eq__(self, other): return self.d == other.d def __len__(self): return len(self.d) def __iter__(self): return iter(self.d) def iterkeys(self): """Returns an iterator over keys.""" return iter(self.d) def __contains__(self, value): return value in self.d def __getitem__(self, value): return self.d.get(value, 0) def __setitem__(self, value, prob): self.d[value] = prob def __delitem__(self, value): del self.d[value] def Copy(self, label=None): """Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. label: string label for the new Hist returns: new _DictWrapper with the same type """ new = copy.copy(self) new.d = copy.copy(self.d) new.label = label if label is not None else self.label return new def Scale(self, factor): """Multiplies the values by a factor. factor: what to multiply by Returns: new object """ new = self.Copy() new.d.clear() for val, prob in self.Items(): new.Set(val * factor, prob) return new def Log(self, m=None): """Log transforms the probabilities. Removes values with probability 0. Normalizes so that the largest logprob is 0. """ if self.log: raise ValueError("Pmf/Hist already under a log transform") self.log = True if m is None: m = self.MaxLike() for x, p in self.d.items(): if p: self.Set(x, math.log(p / m)) else: self.Remove(x) def Exp(self, m=None): """Exponentiates the probabilities. m: how much to shift the ps before exponentiating If m is None, normalizes so that the largest prob is 1. """ if not self.log: raise ValueError("Pmf/Hist not under a log transform") self.log = False if m is None: m = self.MaxLike() for x, p in self.d.items(): self.Set(x, math.exp(p - m)) def GetDict(self): """Gets the dictionary.""" return self.d def SetDict(self, d): """Sets the dictionary.""" self.d = d def Values(self): """Gets an unsorted sequence of values. Note: one source of confusion is that the keys of this dictionary are the values of the Hist/Pmf, and the values of the dictionary are frequencies/probabilities. """ return self.d.keys() def Items(self): """Gets an unsorted sequence of (value, freq/prob) pairs.""" return self.d.items() def Render(self, **options): """Generates a sequence of points suitable for plotting. Note: options are ignored Returns: tuple of (sorted value sequence, freq/prob sequence) """ if min(self.d.keys()) is np.nan: logging.warning('Hist: contains NaN, may not render correctly.') return zip(*sorted(self.Items())) def MakeCdf(self, label=None): """Makes a Cdf.""" label = label if label is not None else self.label return Cdf(self, label=label) def Print(self): """Prints the values and freqs/probs in ascending order.""" for val, prob in sorted(self.d.items()): print(val, prob) def Set(self, x, y=0): """Sets the freq/prob associated with the value x. Args: x: number value y: number freq or prob """ self.d[x] = y def Incr(self, x, term=1): """Increments the freq/prob associated with the value x. Args: x: number value term: how much to increment by """ self.d[x] = self.d.get(x, 0) + term def Mult(self, x, factor): """Scales the freq/prob associated with the value x. Args: x: number value factor: how much to multiply by """ self.d[x] = self.d.get(x, 0) * factor def Remove(self, x): """Removes a value. Throws an exception if the value is not there. Args: x: value to remove """ del self.d[x] def Total(self): """Returns the total of the frequencies/probabilities in the map.""" total = sum(self.d.values()) return total def MaxLike(self): """Returns the largest frequency/probability in the map.""" return max(self.d.values()) def Largest(self, n=10): """Returns the largest n values, with frequency/probability. n: number of items to return """ return sorted(self.d.items(), reverse=True)[:n] def Smallest(self, n=10): """Returns the smallest n values, with frequency/probability. n: number of items to return """ return sorted(self.d.items(), reverse=False)[:n] class Hist(_DictWrapper): """Represents a histogram, which is a map from values to frequencies. Values can be any hashable type; frequencies are integer counters. """ def Freq(self, x): """Gets the frequency associated with the value x. Args: x: number value Returns: int frequency """ return self.d.get(x, 0) def Freqs(self, xs): """Gets frequencies for a sequence of values.""" return [self.Freq(x) for x in xs] def IsSubset(self, other): """Checks whether the values in this histogram are a subset of the values in the given histogram.""" for val, freq in self.Items(): if freq > other.Freq(val): return False return True def Subtract(self, other): """Subtracts the values in the given histogram from this histogram.""" for val, freq in other.Items(): self.Incr(val, -freq) class Pmf(_DictWrapper): """Represents a probability mass function. Values can be any hashable type; probabilities are floating-point. Pmfs are not necessarily normalized. """ def Prob(self, x, default=0): """Gets the probability associated with the value x. Args: x: number value default: value to return if the key is not there Returns: float probability """ return self.d.get(x, default) def Probs(self, xs): """Gets probabilities for a sequence of values.""" return [self.Prob(x) for x in xs] def Percentile(self, percentage): """Computes a percentile of a given Pmf. Note: this is not super efficient. If you are planning to compute more than a few percentiles, compute the Cdf. percentage: float 0-100 returns: value from the Pmf """ p = percentage / 100.0 total = 0 for val, prob in sorted(self.Items()): total += prob if total >= p: return val def ProbGreater(self, x): """Probability that a sample from this Pmf exceeds x. x: number returns: float probability """ if isinstance(x, _DictWrapper): return PmfProbGreater(self, x) else: t = [prob for (val, prob) in self.d.items() if val > x] return sum(t) def ProbLess(self, x): """Probability that a sample from this Pmf is less than x. x: number returns: float probability """ if isinstance(x, _DictWrapper): return PmfProbLess(self, x) else: t = [prob for (val, prob) in self.d.items() if val < x] return sum(t) def __lt__(self, obj): """Less than. obj: number or _DictWrapper returns: float probability """ return self.ProbLess(obj) def __gt__(self, obj): """Greater than. obj: number or _DictWrapper returns: float probability """ return self.ProbGreater(obj) def __ge__(self, obj): """Greater than or equal. obj: number or _DictWrapper returns: float probability """ return 1 - (self < obj) def __le__(self, obj): """Less than or equal. obj: number or _DictWrapper returns: float probability """ return 1 - (self > obj) def Normalize(self, fraction=1.0): """Normalizes this PMF so the sum of all probs is fraction. Args: fraction: what the total should be after normalization Returns: the total probability before normalizing """ if self.log: raise ValueError("Normalize: Pmf is under a log transform") total = self.Total() if total == 0.0: raise ValueError('Normalize: total probability is zero.') #logging.warning('Normalize: total probability is zero.') #return total factor = fraction / total for x in self.d: self.d[x] *= factor return total def Random(self): """Chooses a random element from this PMF. Note: this is not very efficient. If you plan to call this more than a few times, consider converting to a CDF. Returns: float value from the Pmf """ target = random.random() total = 0.0 for x, p in self.d.items(): total += p if total >= target: return x # we shouldn't get here raise ValueError('Random: Pmf might not be normalized.') def Mean(self): """Computes the mean of a PMF. Returns: float mean """ mean = 0.0 for x, p in self.d.items(): mean += p * x return mean def Var(self, mu=None): """Computes the variance of a PMF. mu: the point around which the variance is computed; if omitted, computes the mean returns: float variance """ if mu is None: mu = self.Mean() var = 0.0 for x, p in self.d.items(): var += p * (x - mu) ** 2 return var def Std(self, mu=None): """Computes the standard deviation of a PMF. mu: the point around which the variance is computed; if omitted, computes the mean returns: float standard deviation """ var = self.Var(mu) return math.sqrt(var) def MaximumLikelihood(self): """Returns the value with the highest probability. Returns: float probability """ _, val = max((prob, val) for val, prob in self.Items()) return val def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ cdf = self.MakeCdf() return cdf.CredibleInterval(percentage) def __add__(self, other): """Computes the Pmf of the sum of values drawn from self and other. other: another Pmf or a scalar returns: new Pmf """ try: return self.AddPmf(other) except AttributeError: return self.AddConstant(other) def AddPmf(self, other): """Computes the Pmf of the sum of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 + v2, p1 * p2) return pmf def AddConstant(self, other): """Computes the Pmf of the sum a constant and values from self. other: a number returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 + other, p1) return pmf def __sub__(self, other): """Computes the Pmf of the diff of values drawn from self and other. other: another Pmf returns: new Pmf """ try: return self.SubPmf(other) except AttributeError: return self.AddConstant(-other) def SubPmf(self, other): """Computes the Pmf of the diff of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 - v2, p1 * p2) return pmf def __mul__(self, other): """Computes the Pmf of the product of values drawn from self and other. other: another Pmf returns: new Pmf """ try: return self.MulPmf(other) except AttributeError: return self.MulConstant(other) def MulPmf(self, other): """Computes the Pmf of the diff of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 * v2, p1 * p2) return pmf def MulConstant(self, other): """Computes the Pmf of the product of a constant and values from self. other: a number returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): pmf.Set(v1 * other, p1) return pmf def __div__(self, other): """Computes the Pmf of the ratio of values drawn from self and other. other: another Pmf returns: new Pmf """ try: return self.DivPmf(other) except AttributeError: return self.MulConstant(1/other) __truediv__ = __div__ def DivPmf(self, other): """Computes the Pmf of the ratio of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf() for v1, p1 in self.Items(): for v2, p2 in other.Items(): pmf.Incr(v1 / v2, p1 * p2) return pmf def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ cdf = self.MakeCdf() return cdf.Max(k) class Joint(Pmf): """Represents a joint distribution. The values are sequences (usually tuples) """ def Marginal(self, i, label=None): """Gets the marginal distribution of the indicated variable. i: index of the variable we want Returns: Pmf """ pmf = Pmf(label=label) for vs, prob in self.Items(): pmf.Incr(vs[i], prob) return pmf def Conditional(self, i, j, val, label=None): """Gets the conditional distribution of the indicated variable. Distribution of vs[i], conditioned on vs[j] = val. i: index of the variable we want j: which variable is conditioned on val: the value the jth variable has to have Returns: Pmf """ pmf = Pmf(label=label) for vs, prob in self.Items(): if vs[j] != val: continue pmf.Incr(vs[i], prob) pmf.Normalize() return pmf def MaxLikeInterval(self, percentage=90): """Returns the maximum-likelihood credible interval. If percentage=90, computes a 90% CI containing the values with the highest likelihoods. percentage: float between 0 and 100 Returns: list of values from the suite """ interval = [] total = 0 t = [(prob, val) for val, prob in self.Items()] t.sort(reverse=True) for prob, val in t: interval.append(val) total += prob if total >= percentage / 100.0: break return interval def MakeJoint(pmf1, pmf2): """Joint distribution of values from pmf1 and pmf2. Assumes that the PMFs represent independent random variables. Args: pmf1: Pmf object pmf2: Pmf object Returns: Joint pmf of value pairs """ joint = Joint() for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): joint.Set((v1, v2), p1 * p2) return joint def MakeHistFromList(t, label=None): """Makes a histogram from an unsorted sequence of values. Args: t: sequence of numbers label: string label for this histogram Returns: Hist object """ return Hist(t, label=label) def MakeHistFromDict(d, label=None): """Makes a histogram from a map from values to frequencies. Args: d: dictionary that maps values to frequencies label: string label for this histogram Returns: Hist object """ return Hist(d, label) def MakePmfFromList(t, label=None): """Makes a PMF from an unsorted sequence of values. Args: t: sequence of numbers label: string label for this PMF Returns: Pmf object """ return Pmf(t, label=label) def MakePmfFromDict(d, label=None): """Makes a PMF from a map from values to probabilities. Args: d: dictionary that maps values to probabilities label: string label for this PMF Returns: Pmf object """ return Pmf(d, label=label) def MakePmfFromItems(t, label=None): """Makes a PMF from a sequence of value-probability pairs Args: t: sequence of value-probability pairs label: string label for this PMF Returns: Pmf object """ return Pmf(dict(t), label=label) def MakePmfFromHist(hist, label=None): """Makes a normalized PMF from a Hist object. Args: hist: Hist object label: string label Returns: Pmf object """ if label is None: label = hist.label return Pmf(hist, label=label) def MakeMixture(metapmf, label='mix'): """Make a mixture distribution. Args: metapmf: Pmf that maps from Pmfs to probs. label: string label for the new Pmf. Returns: Pmf object. """ mix = Pmf(label=label) for pmf, p1 in metapmf.Items(): for x, p2 in pmf.Items(): mix.Incr(x, p1 * p2) return mix def MakeUniformPmf(low, high, n): """Make a uniform Pmf. low: lowest value (inclusive) high: highest value (inclusize) n: number of values """ pmf = Pmf() for x in np.linspace(low, high, n): pmf.Set(x, 1) pmf.Normalize() return pmf class Cdf(object): """Represents a cumulative distribution function. Attributes: xs: sequence of values ps: sequence of probabilities label: string used as a graph label. """ def __init__(self, obj=None, ps=None, label=None): """Initializes. If ps is provided, obj must be the corresponding list of values. obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs ps: list of cumulative probabilities label: string label """ self.label = label if label is not None else '_nolegend_' if isinstance(obj, (_DictWrapper, Cdf, Pdf)): if not label: self.label = label if label is not None else obj.label if obj is None: # caller does not provide obj, make an empty Cdf self.xs = np.asarray([]) self.ps = np.asarray([]) if ps is not None: logging.warning("Cdf: can't pass ps without also passing xs.") return else: # if the caller provides xs and ps, just store them if ps is not None: if isinstance(ps, str): logging.warning("Cdf: ps can't be a string") self.xs = np.asarray(obj) self.ps = np.asarray(ps) return # caller has provided just obj, not ps if isinstance(obj, Cdf): self.xs = copy.copy(obj.xs) self.ps = copy.copy(obj.ps) return if isinstance(obj, _DictWrapper): dw = obj else: dw = Hist(obj) if len(dw) == 0: self.xs = np.asarray([]) self.ps = np.asarray([]) return xs, freqs = zip(*sorted(dw.Items())) self.xs = np.asarray(xs) self.ps = np.cumsum(freqs, dtype=np.float) self.ps /= self.ps[-1] def __str__(self): return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps)) __repr__ = __str__ def __len__(self): return len(self.xs) def __getitem__(self, x): return self.Prob(x) def __setitem__(self): raise UnimplementedMethodException() def __delitem__(self): raise UnimplementedMethodException() def __eq__(self, other): return np.all(self.xs == other.xs) and np.all(self.ps == other.ps) def Copy(self, label=None): """Returns a copy of this Cdf. label: string label for the new Cdf """ if label is None: label = self.label return Cdf(list(self.xs), list(self.ps), label=label) def MakePmf(self, label=None): """Makes a Pmf.""" if label is None: label = self.label return Pmf(self, label=label) def Values(self): """Returns a sorted list of values. """ return self.xs def Items(self): """Returns a sorted sequence of (value, probability) pairs. Note: in Python3, returns an iterator. """ # TODO: rethink this function: should it just iterate # over xs and ps (cumulative probabilities) and not compute # differences? a = self.ps b = np.roll(a, 1) b[0] = 0 return zip(self.xs, a-b) def Shift(self, term): """Adds a term to the xs. term: how much to add """ new = self.Copy() # don't use +=, or else an int array + float yields int array new.xs = new.xs + term return new def Scale(self, factor): """Multiplies the xs by a factor. factor: what to multiply by """ new = self.Copy() # don't use *=, or else an int array * float yields int array new.xs = new.xs * factor return new def Prob(self, x): """Returns CDF(x), the probability that corresponds to value x. Args: x: number Returns: float probability """ if x < self.xs[0]: return 0.0 index = bisect.bisect(self.xs, x) p = self.ps[index-1] return p def Probs(self, xs): """Gets probabilities for a sequence of values. xs: any sequence that can be converted to NumPy array returns: NumPy array of cumulative probabilities """ xs = np.asarray(xs) index = np.searchsorted(self.xs, xs, side='right') ps = self.ps[index-1] ps[xs < self.xs[0]] = 0.0 return ps ProbArray = Probs def Value(self, p): """Returns InverseCDF(p), the value that corresponds to probability p. Args: p: number in the range [0, 1] Returns: number value """ if p < 0 or p > 1: raise ValueError('Probability p must be in range [0, 1]') index = bisect.bisect_left(self.ps, p) return self.xs[index] def ValueArray(self, ps): """Returns InverseCDF(p), the value that corresponds to probability p. Args: ps: NumPy array of numbers in the range [0, 1] Returns: NumPy array of values """ ps = np.asarray(ps) if np.any(ps < 0) or np.any(ps > 1): raise ValueError('Probability p must be in range [0, 1]') index = np.searchsorted(self.ps, ps, side='left') return self.xs[index] def Percentile(self, p): """Returns the value that corresponds to percentile p. Args: p: number in the range [0, 100] Returns: number value """ return self.Value(p / 100.0) def PercentileRank(self, x): """Returns the percentile rank of the value x. x: potential value in the CDF returns: percentile rank in the range 0 to 100 """ return self.Prob(x) * 100.0 def Random(self): """Chooses a random value from this distribution.""" return self.Value(random.random()) def Sample(self, n): """Generates a random sample from this distribution. n: int length of the sample returns: NumPy array """ ps = np.random.random(n) return self.ValueArray(ps) def Mean(self): """Computes the mean of a CDF. Returns: float mean """ old_p = 0 total = 0.0 for x, new_p in zip(self.xs, self.ps): p = new_p - old_p total += p * x old_p = new_p return total def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ prob = (1 - percentage / 100.0) / 2 interval = self.Value(prob), self.Value(1 - prob) return interval ConfidenceInterval = CredibleInterval def _Round(self, multiplier=1000.0): """ An entry is added to the cdf only if the percentile differs from the previous value in a significant digit, where the number of significant digits is determined by multiplier. The default is 1000, which keeps log10(1000) = 3 significant digits. """ # TODO(write this method) raise UnimplementedMethodException() def Render(self, **options): """Generates a sequence of points suitable for plotting. An empirical CDF is a step function; linear interpolation can be misleading. Note: options are ignored Returns: tuple of (xs, ps) """ def interleave(a, b): c = np.empty(a.shape[0] + b.shape[0]) c[::2] = a c[1::2] = b return c a = np.array(self.xs) xs = interleave(a, a) shift_ps = np.roll(self.ps, 1) shift_ps[0] = 0 ps = interleave(shift_ps, self.ps) return xs, ps def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ cdf = self.Copy() cdf.ps **= k return cdf def MakeCdfFromItems(items, label=None): """Makes a cdf from an unsorted sequence of (value, frequency) pairs. Args: items: unsorted sequence of (value, frequency) pairs label: string label for this CDF Returns: cdf: list of (value, fraction) pairs """ return Cdf(dict(items), label=label) def MakeCdfFromDict(d, label=None): """Makes a CDF from a dictionary that maps values to frequencies. Args: d: dictionary that maps values to frequencies. label: string label for the data. Returns: Cdf object """ return Cdf(d, label=label) def MakeCdfFromList(seq, label=None): """Creates a CDF from an unsorted sequence. Args: seq: unsorted sequence of sortable values label: string label for the cdf Returns: Cdf object """ return Cdf(seq, label=label) def MakeCdfFromHist(hist, label=None): """Makes a CDF from a Hist object. Args: hist: Pmf.Hist object label: string label for the data. Returns: Cdf object """ if label is None: label = hist.label return Cdf(hist, label=label) def MakeCdfFromPmf(pmf, label=None): """Makes a CDF from a Pmf object. Args: pmf: Pmf.Pmf object label: string label for the data. Returns: Cdf object """ if label is None: label = pmf.label return Cdf(pmf, label=label) class UnimplementedMethodException(Exception): """Exception if someone calls a method that should be overridden.""" class Suite(Pmf): """Represents a suite of hypotheses and their probabilities.""" def Update(self, data): """Updates each hypothesis based on the data. data: any representation of the data returns: the normalizing constant """ for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize() def LogUpdate(self, data): """Updates a suite of hypotheses based on new data. Modifies the suite directly; if you want to keep the original, make a copy. Note: unlike Update, LogUpdate does not normalize. Args: data: any representation of the data """ for hypo in self.Values(): like = self.LogLikelihood(data, hypo) self.Incr(hypo, like) def UpdateSet(self, dataset): """Updates each hypothesis based on the dataset. This is more efficient than calling Update repeatedly because it waits until the end to Normalize. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: the normalizing constant """ for data in dataset: for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize() def LogUpdateSet(self, dataset): """Updates each hypothesis based on the dataset. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: None """ for data in dataset: self.LogUpdate(data) def Likelihood(self, data, hypo): """Computes the likelihood of the data under the hypothesis. hypo: some representation of the hypothesis data: some representation of the data """ raise UnimplementedMethodException() def LogLikelihood(self, data, hypo): """Computes the log likelihood of the data under the hypothesis. hypo: some representation of the hypothesis data: some representation of the data """ raise UnimplementedMethodException() def Print(self): """Prints the hypotheses and their probabilities.""" for hypo, prob in sorted(self.Items()): print(hypo, prob) def MakeOdds(self): """Transforms from probabilities to odds. Values with prob=0 are removed. """ for hypo, prob in self.Items(): if prob: self.Set(hypo, Odds(prob)) else: self.Remove(hypo) def MakeProbs(self): """Transforms from odds to probabilities.""" for hypo, odds in self.Items(): self.Set(hypo, Probability(odds)) def MakeSuiteFromList(t, label=None): """Makes a suite from an unsorted sequence of values. Args: t: sequence of numbers label: string label for this suite Returns: Suite object """ hist = MakeHistFromList(t, label=label) d = hist.GetDict() return MakeSuiteFromDict(d) def MakeSuiteFromHist(hist, label=None): """Makes a normalized suite from a Hist object. Args: hist: Hist object label: string label Returns: Suite object """ if label is None: label = hist.label # make a copy of the dictionary d = dict(hist.GetDict()) return MakeSuiteFromDict(d, label) def MakeSuiteFromDict(d, label=None): """Makes a suite from a map from values to probabilities. Args: d: dictionary that maps values to probabilities label: string label for this suite Returns: Suite object """ suite = Suite(label=label) suite.SetDict(d) suite.Normalize() return suite class Pdf(object): """Represents a probability density function (PDF).""" def Density(self, x): """Evaluates this Pdf at x. Returns: float or NumPy array of probability density """ raise UnimplementedMethodException() def GetLinspace(self): """Get a linspace for plotting. Not all subclasses of Pdf implement this. Returns: numpy array """ raise UnimplementedMethodException() def MakePmf(self, **options): """Makes a discrete version of this Pdf. options can include label: string low: low end of range high: high end of range n: number of places to evaluate Returns: new Pmf """ label = options.pop('label', '') xs, ds = self.Render(**options) return Pmf(dict(zip(xs, ds)), label=label) def Render(self, **options): """Generates a sequence of points suitable for plotting. If options includes low and high, it must also include n; in that case the density is evaluated an n locations between low and high, including both. If options includes xs, the density is evaluate at those location. Otherwise, self.GetLinspace is invoked to provide the locations. Returns: tuple of (xs, densities) """ low, high = options.pop('low', None), options.pop('high', None) if low is not None and high is not None: n = options.pop('n', 101) xs = np.linspace(low, high, n) else: xs = options.pop('xs', None) if xs is None: xs = self.GetLinspace() ds = self.Density(xs) return xs, ds def Items(self): """Generates a sequence of (value, probability) pairs. """ return zip(*self.Render()) class NormalPdf(Pdf): """Represents the PDF of a Normal distribution.""" def __init__(self, mu=0, sigma=1, label=None): """Constructs a Normal Pdf with given mu and sigma. mu: mean sigma: standard deviation label: string """ self.mu = mu self.sigma = sigma self.label = label if label is not None else '_nolegend_' def __str__(self): return 'NormalPdf(%f, %f)' % (self.mu, self.sigma) def GetLinspace(self): """Get a linspace for plotting. Returns: numpy array """ low, high = self.mu-3*self.sigma, self.mu+3*self.sigma return np.linspace(low, high, 101) def Density(self, xs): """Evaluates this Pdf at xs. xs: scalar or sequence of floats returns: float or NumPy array of probability density """ return stats.norm.pdf(xs, self.mu, self.sigma) class ExponentialPdf(Pdf): """Represents the PDF of an exponential distribution.""" def __init__(self, lam=1, label=None): """Constructs an exponential Pdf with given parameter. lam: rate parameter label: string """ self.lam = lam self.label = label if label is not None else '_nolegend_' def __str__(self): return 'ExponentialPdf(%f)' % (self.lam) def GetLinspace(self): """Get a linspace for plotting. Returns: numpy array """ low, high = 0, 5.0/self.lam return np.linspace(low, high, 101) def Density(self, xs): """Evaluates this Pdf at xs. xs: scalar or sequence of floats returns: float or NumPy array of probability density """ return stats.expon.pdf(xs, scale=1.0/self.lam) class EstimatedPdf(Pdf): """Represents a PDF estimated by KDE.""" def __init__(self, sample, label=None): """Estimates the density function based on a sample. sample: sequence of data label: string """ self.label = label if label is not None else '_nolegend_' self.kde = stats.gaussian_kde(sample) low = min(sample) high = max(sample) self.linspace = np.linspace(low, high, 101) def __str__(self): return 'EstimatedPdf(label=%s)' % str(self.label) def GetLinspace(self): """Get a linspace for plotting. Returns: numpy array """ return self.linspace def Density(self, xs): """Evaluates this Pdf at xs. returns: float or NumPy array of probability density """ return self.kde.evaluate(xs) def Sample(self, n): """Generates a random sample from the estimated Pdf. n: size of sample """ # NOTE: we have to flatten because resample returns a 2-D # array for some reason. return self.kde.resample(n).flatten() def CredibleInterval(pmf, percentage=90): """Computes a credible interval for a given distribution. If percentage=90, computes the 90% CI. Args: pmf: Pmf object representing a posterior distribution percentage: float between 0 and 100 Returns: sequence of two floats, low and high """ cdf = pmf.MakeCdf() prob = (1 - percentage / 100.0) / 2 interval = cdf.Value(prob), cdf.Value(1 - prob) return interval def PmfProbLess(pmf1, pmf2): """Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 < v2: total += p1 * p2 return total def PmfProbGreater(pmf1, pmf2): """Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 > v2: total += p1 * p2 return total def PmfProbEqual(pmf1, pmf2): """Probability that a value from pmf1 equals a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 == v2: total += p1 * p2 return total def RandomSum(dists): """Chooses a random value from each dist and returns the sum. dists: sequence of Pmf or Cdf objects returns: numerical sum """ total = sum(dist.Random() for dist in dists) return total def SampleSum(dists, n): """Draws a sample of sums from a list of distributions. dists: sequence of Pmf or Cdf objects n: sample size returns: new Pmf of sums """ pmf = Pmf(RandomSum(dists) for i in range(n)) return pmf def EvalNormalPdf(x, mu, sigma): """Computes the unnormalized PDF of the normal distribution. x: value mu: mean sigma: standard deviation returns: float probability density """ return stats.norm.pdf(x, mu, sigma) def MakeNormalPmf(mu, sigma, num_sigmas, n=201): """Makes a PMF discrete approx to a Normal distribution. mu: float mean sigma: float standard deviation num_sigmas: how many sigmas to extend in each direction n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf() low = mu - num_sigmas * sigma high = mu + num_sigmas * sigma for x in np.linspace(low, high, n): p = EvalNormalPdf(x, mu, sigma) pmf.Set(x, p) pmf.Normalize() return pmf def EvalBinomialPmf(k, n, p): """Evaluates the binomial PMF. Returns the probabily of k successes in n trials with probability p. """ return stats.binom.pmf(k, n, p) def MakeBinomialPmf(n, p): """Evaluates the binomial PMF. Returns the distribution of successes in n trials with probability p. """ pmf = Pmf() for k in range(n+1): pmf[k] = stats.binom.pmf(k, n, p) return pmf def EvalHypergeomPmf(k, N, K, n): """Evaluates the hypergeometric PMF. Returns the probabily of k successes in n trials from a population N with K successes in it. """ return stats.hypergeom.pmf(k, N, K, n) def EvalPoissonPmf(k, lam): """Computes the Poisson PMF. k: number of events lam: parameter lambda in events per unit time returns: float probability """ # don't use the scipy function (yet). for lam=0 it returns NaN; # should be 0.0 # return stats.poisson.pmf(k, lam) return lam ** k * math.exp(-lam) / special.gamma(k+1) def MakePoissonPmf(lam, high, step=1): """Makes a PMF discrete approx to a Poisson distribution. lam: parameter lambda in events per unit time high: upper bound of the Pmf returns: normalized Pmf """ pmf = Pmf() for k in range(0, high + 1, step): p = EvalPoissonPmf(k, lam) pmf.Set(k, p) pmf.Normalize() return pmf def EvalExponentialPdf(x, lam): """Computes the exponential PDF. x: value lam: parameter lambda in events per unit time returns: float probability density """ return lam * math.exp(-lam * x) def EvalExponentialCdf(x, lam): """Evaluates CDF of the exponential distribution with parameter lam.""" return 1 - math.exp(-lam * x) def MakeExponentialPmf(lam, high, n=200): """Makes a PMF discrete approx to an exponential distribution. lam: parameter lambda in events per unit time high: upper bound n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf() for x in np.linspace(0, high, n): p = EvalExponentialPdf(x, lam) pmf.Set(x, p) pmf.Normalize() return pmf def StandardNormalCdf(x): """Evaluates the CDF of the standard Normal distribution. See http://en.wikipedia.org/wiki/Normal_distribution #Cumulative_distribution_function Args: x: float Returns: float """ return (math.erf(x / ROOT2) + 1) / 2 def EvalNormalCdf(x, mu=0, sigma=1): """Evaluates the CDF of the normal distribution. Args: x: float mu: mean parameter sigma: standard deviation parameter Returns: float """ return stats.norm.cdf(x, loc=mu, scale=sigma) def EvalNormalCdfInverse(p, mu=0, sigma=1): """Evaluates the inverse CDF of the normal distribution. See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function Args: p: float mu: mean parameter sigma: standard deviation parameter Returns: float """ return stats.norm.ppf(p, loc=mu, scale=sigma) def EvalLognormalCdf(x, mu=0, sigma=1): """Evaluates the CDF of the lognormal distribution. x: float or sequence mu: mean parameter sigma: standard deviation parameter Returns: float or sequence """ return stats.lognorm.cdf(x, loc=mu, scale=sigma) def RenderExpoCdf(lam, low, high, n=101): """Generates sequences of xs and ps for an exponential CDF. lam: parameter low: float high: float n: number of points to render returns: numpy arrays (xs, ps) """ xs = np.linspace(low, high, n) ps = 1 - np.exp(-lam * xs) #ps = stats.expon.cdf(xs, scale=1.0/lam) return xs, ps def RenderNormalCdf(mu, sigma, low, high, n=101): """Generates sequences of xs and ps for a Normal CDF. mu: parameter sigma: parameter low: float high: float n: number of points to render returns: numpy arrays (xs, ps) """ xs = np.linspace(low, high, n) ps = stats.norm.cdf(xs, mu, sigma) return xs, ps def RenderParetoCdf(xmin, alpha, low, high, n=50): """Generates sequences of xs and ps for a Pareto CDF. xmin: parameter alpha: parameter low: float high: float n: number of points to render returns: numpy arrays (xs, ps) """ if low < xmin: low = xmin xs = np.linspace(low, high, n) ps = 1 - (xs / xmin) ** -alpha #ps = stats.pareto.cdf(xs, scale=xmin, b=alpha) return xs, ps class Beta(object): """Represents a Beta distribution. See http://en.wikipedia.org/wiki/Beta_distribution """ def __init__(self, alpha=1, beta=1, label=None): """Initializes a Beta distribution.""" self.alpha = alpha self.beta = beta self.label = label if label is not None else '_nolegend_' def Update(self, data): """Updates a Beta distribution. data: pair of int (heads, tails) """ heads, tails = data self.alpha += heads self.beta += tails def Mean(self): """Computes the mean of this distribution.""" return self.alpha / (self.alpha + self.beta) def Random(self): """Generates a random variate from this distribution.""" return random.betavariate(self.alpha, self.beta) def Sample(self, n): """Generates a random sample from this distribution. n: int sample size """ size = n, return np.random.beta(self.alpha, self.beta, size) def EvalPdf(self, x): """Evaluates the PDF at x.""" return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1) def MakePmf(self, steps=101, label=None): """Returns a Pmf of this distribution. Note: Normally, we just evaluate the PDF at a sequence of points and treat the probability density as a probability mass. But if alpha or beta is less than one, we have to be more careful because the PDF goes to infinity at x=0 and x=1. In that case we evaluate the CDF and compute differences. The result is a little funny, because the values at 0 and 1 are not symmetric. Nevertheless, it is a reasonable discrete model of the continuous distribution, and behaves well as the number of values increases. """ if self.alpha < 1 or self.beta < 1: cdf = self.MakeCdf() pmf = cdf.MakePmf() return pmf xs = [i / (steps - 1.0) for i in range(steps)] probs = [self.EvalPdf(x) for x in xs] pmf = Pmf(dict(zip(xs, probs)), label=label) return pmf def MakeCdf(self, steps=101): """Returns the CDF of this distribution.""" xs = [i / (steps - 1.0) for i in range(steps)] ps = special.betainc(self.alpha, self.beta, xs) cdf = Cdf(xs, ps) return cdf def Percentile(self, ps): """Returns the given percentiles from this distribution. ps: scalar, array, or list of [0-100] """ ps = np.asarray(ps) / 100 xs = special.betaincinv(self.alpha, self.beta, ps) return xs class Dirichlet(object): """Represents a Dirichlet distribution. See http://en.wikipedia.org/wiki/Dirichlet_distribution """ def __init__(self, n, conc=1, label=None): """Initializes a Dirichlet distribution. n: number of dimensions conc: concentration parameter (smaller yields more concentration) label: string label """ if n < 2: raise ValueError('A Dirichlet distribution with ' 'n<2 makes no sense') self.n = n self.params = np.ones(n, dtype=np.float) * conc self.label = label if label is not None else '_nolegend_' def Update(self, data): """Updates a Dirichlet distribution. data: sequence of observations, in order corresponding to params """ m = len(data) self.params[:m] += data def Random(self): """Generates a random variate from this distribution. Returns: normalized vector of fractions """ p = np.random.gamma(self.params) return p / p.sum() def Likelihood(self, data): """Computes the likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float probability """ m = len(data) if self.n < m: return 0 x = data p = self.Random() q = p[:m] ** x return q.prod() def LogLikelihood(self, data): """Computes the log likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float log probability """ m = len(data) if self.n < m: return float('-inf') x = self.Random() y = np.log(x[:m]) * data return y.sum() def MarginalBeta(self, i): """Computes the marginal distribution of the ith element. See http://en.wikipedia.org/wiki/Dirichlet_distribution #Marginal_distributions i: int Returns: Beta object """ alpha0 = self.params.sum() alpha = self.params[i] return Beta(alpha, alpha0 - alpha) def PredictivePmf(self, xs, label=None): """Makes a predictive distribution. xs: values to go into the Pmf Returns: Pmf that maps from x to the mean prevalence of x """ alpha0 = self.params.sum() ps = self.params / alpha0 return Pmf(zip(xs, ps), label=label) def BinomialCoef(n, k): """Compute the binomial coefficient "n choose k". n: number of trials k: number of successes Returns: float """ return scipy.misc.comb(n, k) def LogBinomialCoef(n, k): """Computes the log of the binomial coefficient. http://math.stackexchange.com/questions/64716/ approximating-the-logarithm-of-the-binomial-coefficient n: number of trials k: number of successes Returns: float """ return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k) def NormalProbability(ys, jitter=0.0): """Generates data for a normal probability plot. ys: sequence of values jitter: float magnitude of jitter added to the ys returns: numpy arrays xs, ys """ n = len(ys) xs = np.random.normal(0, 1, n) xs.sort() if jitter: ys = Jitter(ys, jitter) else: ys = np.array(ys) ys.sort() return xs, ys def Jitter(values, jitter=0.5): """Jitters the values by adding a uniform variate in (-jitter, jitter). values: sequence jitter: scalar magnitude of jitter returns: new numpy array """ n = len(values) return np.random.normal(0, jitter, n) + values def NormalProbabilityPlot(sample, fit_color='0.8', **options): """Makes a normal probability plot with a fitted line. sample: sequence of numbers fit_color: color string for the fitted line options: passed along to Plot """ xs, ys = NormalProbability(sample) mean, var = MeanVar(sample) std = math.sqrt(var) fit = FitLine(xs, mean, std) thinkplot.Plot(*fit, color=fit_color, label='model') xs, ys = NormalProbability(sample) thinkplot.Plot(xs, ys, **options) def Mean(xs): """Computes mean. xs: sequence of values returns: float mean """ return np.mean(xs) def Var(xs, mu=None, ddof=0): """Computes variance. xs: sequence of values mu: option known mean ddof: delta degrees of freedom returns: float """ xs = np.asarray(xs) if mu is None: mu = xs.mean() ds = xs - mu return np.dot(ds, ds) / (len(xs) - ddof) def Std(xs, mu=None, ddof=0): """Computes standard deviation. xs: sequence of values mu: option known mean ddof: delta degrees of freedom returns: float """ var = Var(xs, mu, ddof) return math.sqrt(var) def MeanVar(xs, ddof=0): """Computes mean and variance. Based on http://stackoverflow.com/questions/19391149/ numpy-mean-and-variance-from-single-function xs: sequence of values ddof: delta degrees of freedom returns: pair of float, mean and var """ xs = np.asarray(xs) mean = xs.mean() s2 = Var(xs, mean, ddof) return mean, s2 def Trim(t, p=0.01): """Trims the largest and smallest elements of t. Args: t: sequence of numbers p: fraction of values to trim off each end Returns: sequence of values """ n = int(p * len(t)) t = sorted(t)[n:-n] return t def TrimmedMean(t, p=0.01): """Computes the trimmed mean of a sequence of numbers. Args: t: sequence of numbers p: fraction of values to trim off each end Returns: float """ t = Trim(t, p) return Mean(t) def TrimmedMeanVar(t, p=0.01): """Computes the trimmed mean and variance of a sequence of numbers. Side effect: sorts the list. Args: t: sequence of numbers p: fraction of values to trim off each end Returns: float """ t = Trim(t, p) mu, var = MeanVar(t) return mu, var def CohenEffectSize(group1, group2): """Compute Cohen's d. group1: Series or NumPy array group2: Series or NumPy array returns: float """ diff = group1.mean() - group2.mean() n1, n2 = len(group1), len(group2) var1 = group1.var() var2 = group2.var() pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2) d = diff / math.sqrt(pooled_var) return d def Cov(xs, ys, meanx=None, meany=None): """Computes Cov(X, Y). Args: xs: sequence of values ys: sequence of values meanx: optional float mean of xs meany: optional float mean of ys Returns: Cov(X, Y) """ xs = np.asarray(xs) ys = np.asarray(ys) if meanx is None: meanx = np.mean(xs) if meany is None: meany = np.mean(ys) cov = np.dot(xs-meanx, ys-meany) / len(xs) return cov def Corr(xs, ys): """Computes Corr(X, Y). Args: xs: sequence of values ys: sequence of values Returns: Corr(X, Y) """ xs = np.asarray(xs) ys = np.asarray(ys) meanx, varx = MeanVar(xs) meany, vary = MeanVar(ys) corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary) return corr def SerialCorr(series, lag=1): """Computes the serial correlation of a series. series: Series lag: integer number of intervals to shift returns: float correlation """ xs = series[lag:] ys = series.shift(lag)[lag:] corr = Corr(xs, ys) return corr def SpearmanCorr(xs, ys): """Computes Spearman's rank correlation. Args: xs: sequence of values ys: sequence of values Returns: float Spearman's correlation """ xranks = pandas.Series(xs).rank() yranks = pandas.Series(ys).rank() return Corr(xranks, yranks) def MapToRanks(t): """Returns a list of ranks corresponding to the elements in t. Args: t: sequence of numbers Returns: list of integer ranks, starting at 1 """ # pair up each value with its index pairs = enumerate(t) # sort by value sorted_pairs = sorted(pairs, key=itemgetter(1)) # pair up each pair with its rank ranked = enumerate(sorted_pairs) # sort by index resorted = sorted(ranked, key=lambda trip: trip[1][0]) # extract the ranks ranks = [trip[0]+1 for trip in resorted] return ranks def LeastSquares(xs, ys): """Computes a linear least squares fit for ys as a function of xs. Args: xs: sequence of values ys: sequence of values Returns: tuple of (intercept, slope) """ meanx, varx = MeanVar(xs) meany = Mean(ys) slope = Cov(xs, ys, meanx, meany) / varx inter = meany - slope * meanx return inter, slope def FitLine(xs, inter, slope): """Fits a line to the given data. xs: sequence of x returns: tuple of numpy arrays (sorted xs, fit ys) """ fit_xs = np.sort(xs) fit_ys = inter + slope * fit_xs return fit_xs, fit_ys def Residuals(xs, ys, inter, slope): """Computes residuals for a linear fit with parameters inter and slope. Args: xs: independent variable ys: dependent variable inter: float intercept slope: float slope Returns: list of residuals """ xs = np.asarray(xs) ys = np.asarray(ys) res = ys - (inter + slope * xs) return res def CoefDetermination(ys, res): """Computes the coefficient of determination (R^2) for given residuals. Args: ys: dependent variable res: residuals Returns: float coefficient of determination """ return 1 - Var(res) / Var(ys) def CorrelatedGenerator(rho): """Generates standard normal variates with serial correlation. rho: target coefficient of correlation Returns: iterable """ x = random.gauss(0, 1) yield x sigma = math.sqrt(1 - rho**2) while True: x = random.gauss(x * rho, sigma) yield x def CorrelatedNormalGenerator(mu, sigma, rho): """Generates normal variates with serial correlation. mu: mean of variate sigma: standard deviation of variate rho: target coefficient of correlation Returns: iterable """ for x in CorrelatedGenerator(rho): yield x * sigma + mu def RawMoment(xs, k): """Computes the kth raw moment of xs. """ return sum(x**k for x in xs) / len(xs) def CentralMoment(xs, k): """Computes the kth central moment of xs. """ mean = RawMoment(xs, 1) return sum((x - mean)**k for x in xs) / len(xs) def StandardizedMoment(xs, k): """Computes the kth standardized moment of xs. """ var = CentralMoment(xs, 2) std = math.sqrt(var) return CentralMoment(xs, k) / std**k def Skewness(xs): """Computes skewness. """ return StandardizedMoment(xs, 3) def Median(xs): """Computes the median (50th percentile) of a sequence. xs: sequence or anything else that can initialize a Cdf returns: float """ cdf = Cdf(xs) return cdf.Value(0.5) def IQR(xs): """Computes the interquartile of a sequence. xs: sequence or anything else that can initialize a Cdf returns: pair of floats """ cdf = Cdf(xs) return cdf.Value(0.25), cdf.Value(0.75) def PearsonMedianSkewness(xs): """Computes the Pearson median skewness. """ median = Median(xs) mean = RawMoment(xs, 1) var = CentralMoment(xs, 2) std = math.sqrt(var) gp = 3 * (mean - median) / std return gp class FixedWidthVariables(object): """Represents a set of variables in a fixed width file.""" def __init__(self, variables, index_base=0): """Initializes. variables: DataFrame index_base: are the indices 0 or 1 based? Attributes: colspecs: list of (start, end) index tuples names: list of string variable names """ self.variables = variables # note: by default, subtract 1 from colspecs self.colspecs = variables[['start', 'end']] - index_base # convert colspecs to a list of pair of int self.colspecs = self.colspecs.astype(np.int).values.tolist() self.names = variables['name'] def ReadFixedWidth(self, filename, **options): """Reads a fixed width ASCII file. filename: string filename returns: DataFrame """ df = pandas.read_fwf(filename, colspecs=self.colspecs, names=self.names, **options) return df def ReadStataDct(dct_file, **options): """Reads a Stata dictionary file. dct_file: string filename options: dict of options passed to open() returns: FixedWidthVariables object """ type_map = dict(byte=int, int=int, long=int, float=float, double=float) var_info = [] for line in open(dct_file, **options): match = re.search( r'_column\(([^)]*)\)', line) if match: start = int(match.group(1)) t = line.split() vtype, name, fstring = t[1:4] name = name.lower() if vtype.startswith('str'): vtype = str else: vtype = type_map[vtype] long_desc = ' '.join(t[4:]).strip('"') var_info.append((start, vtype, name, fstring, long_desc)) columns = ['start', 'type', 'name', 'fstring', 'desc'] variables = pandas.DataFrame(var_info, columns=columns) # fill in the end column by shifting the start column variables['end'] = variables.start.shift(-1) variables.loc[len(variables)-1, 'end'] = 0 dct = FixedWidthVariables(variables, index_base=1) return dct def Resample(xs, n=None): """Draw a sample from xs with the same length as xs. xs: sequence n: sample size (default: len(xs)) returns: NumPy array """ if n is None: n = len(xs) return np.random.choice(xs, n, replace=True) def SampleRows(df, nrows, replace=False): """Choose a sample of rows from a DataFrame. df: DataFrame nrows: number of rows replace: whether to sample with replacement returns: DataDf """ indices = np.random.choice(df.index, nrows, replace=replace) sample = df.loc[indices] return sample def ResampleRows(df): """Resamples rows from a DataFrame. df: DataFrame returns: DataFrame """ return SampleRows(df, len(df), replace=True) def ResampleRowsWeighted(df, column='finalwgt'): """Resamples a DataFrame using probabilities proportional to given column. df: DataFrame column: string column name to use as weights returns: DataFrame """ weights = df[column].copy() weights /= sum(weights) indices = np.random.choice(df.index, len(df), replace=True, p=weights) sample = df.loc[indices] return sample def PercentileRow(array, p): """Selects the row from a sorted array that maps to percentile p. p: float 0--100 returns: NumPy array (one row) """ rows, cols = array.shape index = int(rows * p / 100) return array[index,] def PercentileRows(ys_seq, percents): """Given a collection of lines, selects percentiles along vertical axis. For example, if ys_seq contains simulation results like ys as a function of time, and percents contains (5, 95), the result would be a 90% CI for each vertical slice of the simulation results. ys_seq: sequence of lines (y values) percents: list of percentiles (0-100) to select returns: list of NumPy arrays, one for each percentile """ nrows = len(ys_seq) ncols = len(ys_seq[0]) array = np.zeros((nrows, ncols)) for i, ys in enumerate(ys_seq): array[i,] = ys array = np.sort(array, axis=0) rows = [PercentileRow(array, p) for p in percents] return rows def Smooth(xs, sigma=2, **options): """Smooths a NumPy array with a Gaussian filter. xs: sequence sigma: standard deviation of the filter """ return ndimage.filters.gaussian_filter1d(xs, sigma, **options) class HypothesisTest(object): """Represents a hypothesis test.""" def __init__(self, data): """Initializes. data: data in whatever form is relevant """ self.data = data self.MakeModel() self.actual = self.TestStatistic(data) self.test_stats = None self.test_cdf = None def PValue(self, iters=1000): """Computes the distribution of the test statistic and p-value. iters: number of iterations returns: float p-value """ self.test_stats = [self.TestStatistic(self.RunModel()) for _ in range(iters)] self.test_cdf = Cdf(self.test_stats) count = sum(1 for x in self.test_stats if x >= self.actual) return count / iters def MaxTestStat(self): """Returns the largest test statistic seen during simulations. """ return max(self.test_stats) def PlotCdf(self, label=None): """Draws a Cdf with vertical lines at the observed test stat. """ def VertLine(x): """Draws a vertical line at x.""" thinkplot.Plot([x, x], [0, 1], color='0.8') VertLine(self.actual) thinkplot.Cdf(self.test_cdf, label=label) def TestStatistic(self, data): """Computes the test statistic. data: data in whatever form is relevant """ raise UnimplementedMethodException() def MakeModel(self): """Build a model of the null hypothesis. """ pass def RunModel(self): """Run the model of the null hypothesis. returns: simulated data """ raise UnimplementedMethodException() def main(): pass if __name__ == '__main__': main()
codeparrot/github-code-clean
from header_common import * from header_presentations import * from header_mission_templates import * from header_operations import * from header_triggers import * from header_items import * from module_constants import * import header_debug as dbg import header_lazy_evaluation as lazy #################################################################################################################### # Each presentation record contains the following fields: # 1) Presentation id: used for referencing presentations in other files. The prefix prsnt_ is automatically added before each presentation id. # 2) Presentation flags. See header_presentations.py for a list of available flags # 3) Presentation background mesh: See module_meshes.py for a list of available background meshes # 4) Triggers: Simple triggers that are associated with the presentation #################################################################################################################### presentations = [] presentations.extend([ ("game_credits",prsntf_read_only,"mesh_load_window", [(ti_on_presentation_load, [(assign, "$g_presentation_credits_obj_0", -1), (assign, "$g_presentation_credits_obj_1", -1), (assign, "$g_presentation_credits_obj_2", -1), (assign, "$g_presentation_credits_obj_3", -1), (assign, "$g_presentation_credits_obj_4", -1), (assign, "$g_presentation_credits_obj_5", -1), (assign, "$g_presentation_credits_obj_6", -1), (assign, "$g_presentation_credits_obj_7", -1), (assign, "$g_presentation_credits_obj_8", -1), (assign, "$g_presentation_credits_obj_9", -1), (assign, "$g_presentation_credits_obj_10", -1), (assign, "$g_presentation_credits_obj_11", -1), (assign, "$g_presentation_credits_obj_12", -1), (assign, "$g_presentation_credits_obj_0_alpha", 0), (assign, "$g_presentation_credits_obj_1_alpha", 0), (assign, "$g_presentation_credits_obj_2_alpha", 0), (assign, "$g_presentation_credits_obj_3_alpha", 0), (assign, "$g_presentation_credits_obj_4_alpha", 0), (assign, "$g_presentation_credits_obj_5_alpha", 0), (assign, "$g_presentation_credits_obj_6_alpha", 0), (assign, "$g_presentation_credits_obj_7_alpha", 0), (assign, "$g_presentation_credits_obj_8_alpha", 0), (assign, "$g_presentation_credits_obj_9_alpha", 0), ]), (ti_on_presentation_run, [(store_trigger_param_1, ":cur_time"), (set_fixed_point_multiplier, 1000), (presentation_set_duration, 1000000), (try_begin), (this_or_next|key_clicked, key_space), (this_or_next|key_clicked, key_enter), (this_or_next|key_clicked, key_escape), (this_or_next|key_clicked, key_back_space), (this_or_next|key_clicked, key_left_mouse_button), (key_clicked, key_right_mouse_button), (presentation_set_duration, 0), (try_end), (try_begin), (lt, "$g_presentation_credits_obj_0", 0), (str_store_string, s1, "str_credits_0"), (create_text_overlay, "$g_presentation_credits_obj_0", s1, tf_center_justify|tf_double_space|tf_vertical_align_center), (overlay_set_color, "$g_presentation_credits_obj_0", 0), (overlay_set_alpha, "$g_presentation_credits_obj_0", 0), (position_set_x, pos1, 1500), (position_set_y, pos1, 1500), (overlay_set_size, "$g_presentation_credits_obj_0", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, 375), (overlay_set_position, "$g_presentation_credits_obj_0", pos1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_0", 1000, 0xFF), (else_try), (gt, ":cur_time", 2000), (eq, "$g_presentation_credits_obj_0_alpha", 0), (assign, "$g_presentation_credits_obj_0_alpha", 1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_0", 1000, 0x00), (else_try), (gt, ":cur_time", 2000 + 1500), (lt, "$g_presentation_credits_obj_1", 0), (str_store_string, s1, "str_credits_1"), (create_text_overlay, "$g_presentation_credits_obj_1", s1, tf_center_justify|tf_double_space|tf_vertical_align_center), (overlay_set_color, "$g_presentation_credits_obj_1", 0), (overlay_set_alpha, "$g_presentation_credits_obj_1", 0), (position_set_x, pos1, 1500), (position_set_y, pos1, 1500), (overlay_set_size, "$g_presentation_credits_obj_1", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, 375), (overlay_set_position, "$g_presentation_credits_obj_1", pos1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_1", 1000, 0xFF), (else_try), (gt, ":cur_time", 2 * 2000 + 1500), (eq, "$g_presentation_credits_obj_1_alpha", 0), (assign, "$g_presentation_credits_obj_1_alpha", 1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_1", 1000, 0x00), (else_try), (gt, ":cur_time", 2 * 2000 + 2 * 1500), (lt, "$g_presentation_credits_obj_2", 0), (str_store_string, s1, "str_credits_2"), (create_text_overlay, "$g_presentation_credits_obj_2", s1, tf_center_justify|tf_double_space|tf_vertical_align_center), (overlay_set_color, "$g_presentation_credits_obj_2", 0), (overlay_set_alpha, "$g_presentation_credits_obj_2", 0), (position_set_x, pos1, 1750), (position_set_y, pos1, 1750), (overlay_set_size, "$g_presentation_credits_obj_2", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, 375), (overlay_set_position, "$g_presentation_credits_obj_2", pos1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_2", 1000, 0xFF), (else_try), (gt, ":cur_time", 3 * 2000 + 2 * 1500), (eq, "$g_presentation_credits_obj_2_alpha", 0), (assign, "$g_presentation_credits_obj_2_alpha", 1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_2", 1000, 0x00), (else_try), (gt, ":cur_time", 3 * 2000 + 3 * 1500), (lt, "$g_presentation_credits_obj_3", 0), (str_store_string, s1, "str_credits_3"), (create_text_overlay, "$g_presentation_credits_obj_3", s1, tf_center_justify|tf_double_space|tf_vertical_align_center), (overlay_set_color, "$g_presentation_credits_obj_3", 0), (overlay_set_alpha, "$g_presentation_credits_obj_3", 0), (position_set_x, pos1, 1750), (position_set_y, pos1, 1750), (overlay_set_size, "$g_presentation_credits_obj_3", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, 375), (overlay_set_position, "$g_presentation_credits_obj_3", pos1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_3", 1000, 0xFF), (else_try), (gt, ":cur_time", 4 * 2000 + 3 * 1500), (eq, "$g_presentation_credits_obj_3_alpha", 0), (assign, "$g_presentation_credits_obj_3_alpha", 1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_3", 1000, 0), (else_try), (gt, ":cur_time", 4 * 2000 + 4 * 1500), (lt, "$g_presentation_credits_obj_4", 0), (str_store_string, s1, "str_credits_4"), (create_text_overlay, "$g_presentation_credits_obj_4", s1, tf_center_justify|tf_double_space|tf_vertical_align_center), (overlay_set_color, "$g_presentation_credits_obj_4", 0), (overlay_set_alpha, "$g_presentation_credits_obj_4", 0), (position_set_x, pos1, 1750), (position_set_y, pos1, 1750), (overlay_set_size, "$g_presentation_credits_obj_4", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, 375), (overlay_set_position, "$g_presentation_credits_obj_4", pos1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_4", 1000, 0xFF), (else_try), (gt, ":cur_time", 5 * 2000 + 4 * 1500), (eq, "$g_presentation_credits_obj_4_alpha", 0), (assign, "$g_presentation_credits_obj_4_alpha", 1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_4", 1000, 0), (else_try), (gt, ":cur_time", 5 * 2000 + 5 * 1500), (lt, "$g_presentation_credits_obj_5", 0), (str_store_string, s1, "str_credits_5"), (create_text_overlay, "$g_presentation_credits_obj_5", s1, tf_center_justify|tf_double_space|tf_vertical_align_center), (overlay_set_color, "$g_presentation_credits_obj_5", 0), (overlay_set_alpha, "$g_presentation_credits_obj_5", 0), (position_set_x, pos1, 1750), (position_set_y, pos1, 1750), (overlay_set_size, "$g_presentation_credits_obj_5", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, 375), (overlay_set_position, "$g_presentation_credits_obj_5", pos1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_5", 1000, 0xFF), (else_try), (gt, ":cur_time", 6 * 2000 + 5 * 1500), (eq, "$g_presentation_credits_obj_5_alpha", 0), (assign, "$g_presentation_credits_obj_5_alpha", 1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_5", 1000, 0), (else_try), (gt, ":cur_time", 6 * 2000 + 6 * 1500), (lt, "$g_presentation_credits_obj_6", 0), (str_store_string, s1, "str_credits_6"), (create_text_overlay, "$g_presentation_credits_obj_6", s1, tf_center_justify|tf_double_space|tf_vertical_align_center), (overlay_set_color, "$g_presentation_credits_obj_6", 0), (overlay_set_alpha, "$g_presentation_credits_obj_6", 0), (position_set_x, pos1, 1750), (position_set_y, pos1, 1750), (overlay_set_size, "$g_presentation_credits_obj_6", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, 375), (overlay_set_position, "$g_presentation_credits_obj_6", pos1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_6", 1000, 0xFF), (else_try), (gt, ":cur_time", 7 * 2000 + 6 * 1500), (eq, "$g_presentation_credits_obj_6_alpha", 0), (assign, "$g_presentation_credits_obj_6_alpha", 1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_6", 1000, 0), (else_try), (gt, ":cur_time", 7 * 2000 + 7 * 1500), (lt, "$g_presentation_credits_obj_7", 0), (str_store_string, s1, "str_credits_7"), (create_text_overlay, "$g_presentation_credits_obj_7", s1, tf_center_justify|tf_double_space|tf_vertical_align_center), (overlay_set_color, "$g_presentation_credits_obj_7", 0), (overlay_set_alpha, "$g_presentation_credits_obj_7", 0), (position_set_x, pos1, 1750), (position_set_y, pos1, 1750), (overlay_set_size, "$g_presentation_credits_obj_7", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, 375), (overlay_set_position, "$g_presentation_credits_obj_7", pos1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_7", 1000, 0xFF), (else_try), (gt, ":cur_time", 8 * 2000 + 7 * 1500), (eq, "$g_presentation_credits_obj_7_alpha", 0), (assign, "$g_presentation_credits_obj_7_alpha", 1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_7", 1000, 0), (else_try), (gt, ":cur_time", 8 * 2000 + 8 * 1500), (lt, "$g_presentation_credits_obj_8", 0), (str_store_string, s1, "str_credits_8"), (create_text_overlay, "$g_presentation_credits_obj_8", s1, tf_center_justify|tf_double_space|tf_vertical_align_center), (overlay_set_color, "$g_presentation_credits_obj_8", 0), (overlay_set_alpha, "$g_presentation_credits_obj_8", 0), (position_set_x, pos1, 1750), (position_set_y, pos1, 1750), (overlay_set_size, "$g_presentation_credits_obj_8", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, 375), (overlay_set_position, "$g_presentation_credits_obj_8", pos1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_8", 1000, 0xFF), (else_try), (gt, ":cur_time", 9 * 2000 + 8 * 1500), (eq, "$g_presentation_credits_obj_8_alpha", 0), (assign, "$g_presentation_credits_obj_8_alpha", 1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_8", 1000, 0), (else_try), (gt, ":cur_time", 9 * 2000 + 9 * 1500), (lt, "$g_presentation_credits_obj_9", 0), (str_store_string, s1, "str_credits_10"), (create_text_overlay, "$g_presentation_credits_obj_9", s1, tf_center_justify|tf_double_space|tf_vertical_align_center), (overlay_set_color, "$g_presentation_credits_obj_9", 0), (overlay_set_alpha, "$g_presentation_credits_obj_9", 0), (position_set_x, pos1, 750), (position_set_y, pos1, 750), (overlay_set_size, "$g_presentation_credits_obj_9", pos1), (position_set_x, pos1, 250), (position_set_y, pos1, 485), (overlay_set_position, "$g_presentation_credits_obj_9", pos1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_9", 1000, 0xFF), (str_store_string, s1, "str_credits_11"), (create_text_overlay, "$g_presentation_credits_obj_10", s1, tf_center_justify|tf_double_space|tf_vertical_align_center), (overlay_set_color, "$g_presentation_credits_obj_10", 0), (overlay_set_alpha, "$g_presentation_credits_obj_10", 0), (position_set_x, pos1, 750), (position_set_y, pos1, 750), (overlay_set_size, "$g_presentation_credits_obj_10", pos1), (position_set_x, pos1, 750), (position_set_y, pos1, 470), (overlay_set_position, "$g_presentation_credits_obj_10", pos1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_10", 1000, 0xFF), (str_store_string, s1, "str_credits_12"), (create_text_overlay, "$g_presentation_credits_obj_11", s1, tf_center_justify|tf_double_space|tf_vertical_align_center), (overlay_set_color, "$g_presentation_credits_obj_11", 0), (overlay_set_alpha, "$g_presentation_credits_obj_11", 0), (position_set_x, pos1, 750), (position_set_y, pos1, 750), (overlay_set_size, "$g_presentation_credits_obj_11", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, 105), (overlay_set_position, "$g_presentation_credits_obj_11", pos1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_11", 1000, 0xFF), (else_try), (gt, ":cur_time", 12 * 2000 + 9 * 1500), (eq, "$g_presentation_credits_obj_9_alpha", 0), (assign, "$g_presentation_credits_obj_9_alpha", 1), (overlay_animate_to_alpha, "$g_presentation_credits_obj_9", 1000, 0), (overlay_animate_to_alpha, "$g_presentation_credits_obj_10", 1000, 0), (overlay_animate_to_alpha, "$g_presentation_credits_obj_11", 1000, 0), (else_try), (gt, ":cur_time", 12 * 2000 + 10 * 1500), (lt, "$g_presentation_credits_obj_12", 0), (str_store_string, s1, "str_credits_9"), (create_text_overlay, "$g_presentation_credits_obj_12", s1, tf_center_justify|tf_double_space), (overlay_set_color, "$g_presentation_credits_obj_12", 0), (overlay_set_alpha, "$g_presentation_credits_obj_12", 0xFF), (position_set_x, pos1, 1000), (position_set_y, pos1, 1000), (overlay_set_size, "$g_presentation_credits_obj_12", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, -4800), (overlay_set_position, "$g_presentation_credits_obj_12", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, 760), (overlay_animate_to_position, "$g_presentation_credits_obj_12", 70000, pos1), (else_try), (gt, ":cur_time", 12 * 2000 + 10 * 1500 + 70000), (presentation_set_duration, 0), (try_end), ]), ]), ]) def prsnt_create_profile_options_overlays(): block = [] for i, option in enumerate(profile_options): overlay_var = "$g_presentation_obj_profile_options_" + option[3:] block.extend([(create_text_overlay, reg0, lazy.add(profile_option_strings_begin, i, opmask_string), 0), (position_set_y, pos1, ":label_y"), (overlay_set_position, reg0, pos1), (val_sub, ":label_y", admin_panel_item_height), (create_check_box_overlay, overlay_var, "mesh_checkbox_off", "mesh_checkbox_on"), (position_set_y, pos2, ":checkbox_y"), (overlay_set_position, overlay_var, pos2), (overlay_set_val, overlay_var, option), (val_sub, ":checkbox_y", admin_panel_item_height), ]) return lazy.block(block) def prsnt_check_profile_options_overlays(): block = [] for option in profile_options: overlay_var = "$g_presentation_obj_profile_options_" + option[3:] block.extend([(else_try), (eq, ":object", overlay_var), (assign, option, ":value"), ]) return lazy.block(block) presentations.extend([ ("game_profile_banner_selection", 0, "mesh_load_window", # converted to a profile options selection [(ti_on_presentation_load, [(set_fixed_point_multiplier, 1000), (call_script, "script_load_profile_options"), (create_button_overlay, "$g_presentation_obj_profile_options_done", "str_done", tf_center_justify), (position_set_x, pos1, 500), (position_set_y, pos1, 50), (overlay_set_position, "$g_presentation_obj_profile_options_done", pos1), (position_set_x, pos1, 1500), (position_set_y, pos1, 1500), (overlay_set_size, "$g_presentation_obj_profile_options_done", pos1), (str_clear, s0), (create_text_overlay, reg0, s0, tf_scrollable), (position_set_x, pos1, 50), (position_set_y, pos1, 75), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 900), (position_set_y, pos1, 625), (overlay_set_area_size, reg0, pos1), (set_container_overlay, reg0), (assign, ":label_y", 20 + (len(profile_options) * admin_panel_item_height)), (assign, ":checkbox_y", 27 + (len(profile_options) * admin_panel_item_height)), (position_set_x, pos1, 30), (position_set_x, pos2, 7), prsnt_create_profile_options_overlays(), (presentation_set_duration, 999999), ]), (ti_on_presentation_event_state_change, [(store_trigger_param_1, ":object"), (store_trigger_param_2, ":value"), (try_begin), (eq, ":object", "$g_presentation_obj_profile_options_done"), (presentation_set_duration, 0), (call_script, "script_store_profile_options"), prsnt_check_profile_options_overlays(), (try_end), ]), (ti_on_presentation_run, [(store_trigger_param_1, ":cur_time"), (try_begin), (key_clicked, key_escape), (gt, ":cur_time", 200), (presentation_set_duration, 0), (try_end), ]), ]), ("game_custom_battle_designer", prsntf_manual_end_only, 0, []), ("game_multiplayer_admin_panel", prsntf_manual_end_only, 0, # called by the game both when hosting a server from the client (scene editing mode in PW) and the admin panel [(ti_on_presentation_load, [(set_fixed_point_multiplier, 1000), (multiplayer_get_my_player, ":my_player_id"), (try_begin), # if no player, hosting a client server must have been selected, so give a list of scenes to edit (le, ":my_player_id", 0), (store_sub, ":num_scenes", scenes_end, scenes_begin), (try_begin), (gt, ":num_scenes", 20), (create_combo_label_overlay, "$g_presentation_obj_edit_mode_choose_scene"), (else_try), (create_combo_button_overlay, "$g_presentation_obj_edit_mode_choose_scene"), (try_end), (position_set_x, pos1, 800), (position_set_y, pos1, 800), (overlay_set_size, "$g_presentation_obj_edit_mode_choose_scene", pos1), (position_set_x, pos1, 500), (position_set_y, pos1, 600), (overlay_set_position, "$g_presentation_obj_edit_mode_choose_scene", pos1), (try_for_range_backwards, ":scene_id", scenes_begin, scenes_end), # add to the list from the end so the scenes are in the correct order (store_sub, ":name_string_id", ":scene_id", scenes_begin), (val_add, ":name_string_id", scene_names_begin), (str_store_string, s0, ":name_string_id"), (overlay_add_item, "$g_presentation_obj_edit_mode_choose_scene", s0), (try_end), (val_clamp, "$g_selected_scene", scenes_begin, scenes_end), (store_sub, ":scene_index", scenes_end, "$g_selected_scene"), (val_sub, ":scene_index", 1), (overlay_set_val, "$g_presentation_obj_edit_mode_choose_scene", ":scene_index"), (create_button_overlay, "$g_presentation_obj_edit_mode_start_scene", "str_edit_scene", tf_center_justify), (position_set_x, pos1, 480), (position_set_y, pos1, 200), (overlay_set_position, "$g_presentation_obj_edit_mode_start_scene", pos1), (position_set_x, pos1, 1500), (position_set_y, pos1, 1500), (overlay_set_size, "$g_presentation_obj_edit_mode_start_scene", pos1), (presentation_set_duration, 999999), (else_try), (assign, "$g_presentation_obj_edit_mode_choose_scene", -1), (assign, "$g_presentation_obj_edit_mode_start_scene", -1), (try_end), (gt, ":my_player_id", 0), # otherwise when connected to a server, show the admin panel (create_mesh_overlay, reg0, "mesh_mp_ui_host_maps_randomp"), (position_set_x, pos1, -1), (position_set_y, pos1, 550), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 1002), (position_set_y, pos1, 1002), (overlay_set_size, reg0, pos1), (create_mesh_overlay, reg0, "mesh_mp_ui_host_main"), (position_set_x, pos1, -1), (position_set_y, pos1, -1), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 1002), (position_set_y, pos1, 1002), (overlay_set_size, reg0, pos1), (assign, ":cur_y", 20 + (21 * admin_panel_item_height)), # fixed offset plus the number of panel items multiplied by the height (str_clear, s0), (create_text_overlay, reg0, s0, tf_scrollable), (position_set_x, pos1, 59), (position_set_y, pos1, 50), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 640), (position_set_y, pos1, 520), (overlay_set_area_size, reg0, pos1), (set_container_overlay, reg0), (create_text_overlay, reg0, "str_add_to_game_servers_list", 0), (position_set_x, pos1, 30), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_check_box_overlay, "$g_presentation_obj_admin_panel_add_to_servers_list", "mesh_checkbox_off", "mesh_checkbox_on"), (position_set_x, pos1, 7), (store_add, ":special_cur_y", ":cur_y", 7), (position_set_y, pos1, ":special_cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_add_to_servers_list", pos1), (server_get_add_to_game_servers_list, ":add_to_servers_list"), (overlay_set_val, "$g_presentation_obj_admin_panel_add_to_servers_list", ":add_to_servers_list"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_anti_cheat", 0), (position_set_x, pos1, 30), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_check_box_overlay, "$g_presentation_obj_admin_panel_anti_cheat", "mesh_checkbox_off", "mesh_checkbox_on"), (position_set_x, pos1, 7), (store_add, ":special_cur_y", ":cur_y", 7), (position_set_y, pos1, ":special_cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_anti_cheat", pos1), (server_get_anti_cheat, ":server_anti_cheat"), (overlay_set_val, "$g_presentation_obj_admin_panel_anti_cheat", ":server_anti_cheat"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_server_name", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (str_store_server_name, s0), (try_begin), (eq, "$g_renaming_server_allowed", 1), (create_simple_text_box_overlay, "$g_presentation_obj_admin_panel_server_name"), (position_set_x, pos1, 390), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_server_name", pos1), (overlay_set_text, "$g_presentation_obj_admin_panel_server_name", s0), (else_try), (assign, "$g_presentation_obj_admin_panel_server_name", -1), (create_text_overlay, reg0, s0, 0), (position_set_x, pos1, 385), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (try_end), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_game_password", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_simple_text_box_overlay, "$g_presentation_obj_admin_panel_password"), (position_set_x, pos1, 390), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_password", pos1), (str_store_server_password, s0), (overlay_set_text, "$g_presentation_obj_admin_panel_password", s0), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_welcome_message", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_simple_text_box_overlay, "$g_presentation_obj_admin_panel_welcome_message"), (position_set_x, pos1, 390), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_welcome_message", pos1), (str_store_welcome_message, s0), (overlay_set_text, "$g_presentation_obj_admin_panel_welcome_message", s0), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_game_type", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_combo_button_overlay, "$g_presentation_obj_admin_panel_game_type"), (position_set_x, pos1, 800), (position_set_y, pos1, 800), (overlay_set_size, "$g_presentation_obj_admin_panel_game_type", pos1), (position_set_x, pos1, 490), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_game_type", pos1), (assign, ":name_string_id", game_type_names_end), (try_for_range_backwards, ":unused", game_type_mission_templates_begin, game_type_mission_templates_end), (val_sub, ":name_string_id", 1), (str_store_string, s0, ":name_string_id"), (overlay_add_item, "$g_presentation_obj_admin_panel_game_type", s0), (try_end), (assign, "$g_selected_game_type", "$g_game_type"), (val_clamp, "$g_selected_game_type", game_type_mission_templates_begin, game_type_mission_templates_end), (store_sub, ":game_type_index", lazy.sub(game_type_mission_templates_end, 1), "$g_selected_game_type"), (overlay_set_val, "$g_presentation_obj_admin_panel_game_type", ":game_type_index"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_scene", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (store_sub, ":num_scenes", scenes_end, scenes_begin), (try_begin), (gt, ":num_scenes", 20), (create_combo_label_overlay, "$g_presentation_obj_admin_panel_scene"), (else_try), (create_combo_button_overlay, "$g_presentation_obj_admin_panel_scene"), (try_end), (position_set_x, pos1, 800), (position_set_y, pos1, 800), (overlay_set_size, "$g_presentation_obj_admin_panel_scene", pos1), (position_set_x, pos1, 490), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_scene", pos1), (try_for_range_backwards, ":scene_id", scenes_begin, scenes_end), (store_sub, ":name_string_id", ":scene_id", scenes_begin), (val_add, ":name_string_id", scene_names_begin), (str_store_string, s0, ":name_string_id"), (overlay_add_item, "$g_presentation_obj_admin_panel_scene", s0), (try_end), (store_current_scene, "$g_selected_scene"), (store_sub, ":scene_index", lazy.sub(scenes_end, 1), "$g_selected_scene"), (overlay_set_val, "$g_presentation_obj_admin_panel_scene", ":scene_index"), (val_sub, ":cur_y", admin_panel_item_height), (assign, reg1, 1), (create_text_overlay, reg0, "str_max_players", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_number_box_overlay, "$g_presentation_obj_admin_panel_max_players", min_num_players, max_num_players), (position_set_x, pos1, 390), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_max_players", pos1), (server_get_max_num_players, ":max_players"), (overlay_set_val, "$g_presentation_obj_admin_panel_max_players", ":max_players"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_ghost_mode", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_combo_button_overlay, "$g_presentation_obj_admin_panel_ghost_mode"), (position_set_x, pos1, 800), (position_set_y, pos1, 800), (overlay_set_size, "$g_presentation_obj_admin_panel_ghost_mode", pos1), (position_set_x, pos1, 490), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_ghost_mode", pos1), (overlay_add_item, "$g_presentation_obj_admin_panel_ghost_mode", "str_free"), (overlay_add_item, "$g_presentation_obj_admin_panel_ghost_mode", "str_stick_to_any_player"), (overlay_add_item, "$g_presentation_obj_admin_panel_ghost_mode", "str_stick_to_team_members"), (overlay_add_item, "$g_presentation_obj_admin_panel_ghost_mode", "str_stick_to_team_members_view"), (server_get_ghost_mode, ":server_ghost_mode"), (overlay_set_val, "$g_presentation_obj_admin_panel_ghost_mode", ":server_ghost_mode"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_control_block_direction", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_combo_button_overlay, "$g_presentation_obj_admin_panel_block_dir"), (position_set_x, pos1, 800), (position_set_y, pos1, 800), (overlay_set_size, "$g_presentation_obj_admin_panel_block_dir", pos1), (position_set_x, pos1, 490), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_block_dir", pos1), (overlay_add_item, "$g_presentation_obj_admin_panel_block_dir", "str_automatic"), (overlay_add_item, "$g_presentation_obj_admin_panel_block_dir", "str_by_mouse_movement"), (server_get_control_block_dir, ":server_control_block_dir"), (overlay_set_val, "$g_presentation_obj_admin_panel_block_dir", ":server_control_block_dir"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_combat_speed", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_combo_button_overlay, "$g_presentation_obj_admin_panel_combat_speed"), (position_set_x, pos1, 800), (position_set_y, pos1, 800), (overlay_set_size, "$g_presentation_obj_admin_panel_combat_speed", pos1), (position_set_x, pos1, 490), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_combat_speed", pos1), (overlay_add_item, "$g_presentation_obj_admin_panel_combat_speed", "str_combat_speed_0"), (overlay_add_item, "$g_presentation_obj_admin_panel_combat_speed", "str_combat_speed_1"), (overlay_add_item, "$g_presentation_obj_admin_panel_combat_speed", "str_combat_speed_2"), (overlay_add_item, "$g_presentation_obj_admin_panel_combat_speed", "str_combat_speed_3"), (overlay_add_item, "$g_presentation_obj_admin_panel_combat_speed", "str_combat_speed_4"), (server_get_combat_speed, ":server_combat_speed"), (overlay_set_val, "$g_presentation_obj_admin_panel_combat_speed", ":server_combat_speed"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_game_time_limit", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_number_box_overlay, "$g_presentation_obj_admin_panel_time_limit", 5, 40321), (position_set_x, pos1, 390), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_time_limit", pos1), (overlay_set_val, "$g_presentation_obj_admin_panel_time_limit", "$g_game_time_limit"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_respawn_period", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_number_box_overlay, "$g_presentation_obj_admin_panel_respawn_period", min_respawn_period, max_respawn_period), (position_set_x, pos1, 390), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_respawn_period", pos1), (overlay_set_val, "$g_presentation_obj_admin_panel_respawn_period", "$g_respawn_period"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_starting_gold", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_number_box_overlay, "$g_presentation_obj_admin_panel_starting_gold", 0, 10001), (position_set_x, pos1, 390), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_starting_gold", pos1), (overlay_set_val, "$g_presentation_obj_admin_panel_starting_gold", "$g_starting_gold_multiplier"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_combat_gold_bonus", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_number_box_overlay, "$g_presentation_obj_admin_panel_combat_gold", 0, 10001), (position_set_x, pos1, 390), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_combat_gold", pos1), (overlay_set_val, "$g_presentation_obj_admin_panel_combat_gold", "$g_combat_gold_multiplier"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_kick_voteable", 0), (position_set_x, pos1, 30), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_check_box_overlay, "$g_presentation_obj_admin_panel_kick_voteable", "mesh_checkbox_off", "mesh_checkbox_on"), (position_set_x, pos1, 7), (store_add, ":special_cur_y", ":cur_y", 7), (position_set_y, pos1, ":special_cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_kick_voteable", pos1), (overlay_set_val, "$g_presentation_obj_admin_panel_kick_voteable", "$g_kick_voteable"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_ban_voteable", 0), (position_set_x, pos1, 30), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_check_box_overlay, "$g_presentation_obj_admin_panel_ban_voteable", "mesh_checkbox_off", "mesh_checkbox_on"), (position_set_x, pos1, 7), (store_add, ":special_cur_y", ":cur_y", 7), (position_set_y, pos1, ":special_cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_ban_voteable", pos1), (overlay_set_val, "$g_presentation_obj_admin_panel_ban_voteable", "$g_ban_voteable"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_scenes_voteable", 0), (position_set_x, pos1, 30), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_check_box_overlay, "$g_presentation_obj_admin_panel_scenes_voteable", "mesh_checkbox_off", "mesh_checkbox_on"), (position_set_x, pos1, 7), (store_add, ":special_cur_y", ":cur_y", 7), (position_set_y, pos1, ":special_cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_scenes_voteable", pos1), (overlay_set_val, "$g_presentation_obj_admin_panel_scenes_voteable", "$g_scenes_voteable"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_valid_vote_ratio", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_number_box_overlay, "$g_presentation_obj_admin_panel_vote_ratio", 50, 101), (position_set_x, pos1, 390), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_vote_ratio", pos1), (overlay_set_val, "$g_presentation_obj_admin_panel_vote_ratio", "$g_valid_vote_ratio"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_team_point_limit", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_number_box_overlay, "$g_presentation_obj_admin_panel_victory_condition", 0, 1441), (position_set_x, pos1, 390), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_victory_condition", pos1), (overlay_set_val, "$g_presentation_obj_admin_panel_victory_condition", "$g_victory_condition"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_num_bots_voteable", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_number_box_overlay, "$g_presentation_obj_admin_panel_max_herd_animals", 0, 1001), (position_set_x, pos1, 390), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_max_herd_animals", pos1), (overlay_set_val, "$g_presentation_obj_admin_panel_max_herd_animals", "$g_max_herd_animal_count"), (val_sub, ":cur_y", admin_panel_item_height), (create_text_overlay, reg0, "str_bot_count", 0), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (create_number_box_overlay, "$g_presentation_obj_admin_panel_bot_count", 0, 101), (position_set_x, pos1, 390), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_admin_panel_bot_count", pos1), (overlay_set_val, "$g_presentation_obj_admin_panel_bot_count", "$g_bot_count"), (set_container_overlay, -1), (create_button_overlay, "$g_presentation_obj_admin_panel_done", "str_done", tf_center_justify), (position_set_x, pos1, 825), (position_set_y, pos1, 50), (overlay_set_position, "$g_presentation_obj_admin_panel_done", pos1), (position_set_x, pos1, 1500), (position_set_y, pos1, 1500), (overlay_set_size, "$g_presentation_obj_admin_panel_done", pos1), (create_button_overlay, "$g_presentation_obj_admin_panel_start_scene", "str_start_scene", tf_center_justify), (position_set_x, pos1, 825), (position_set_y, pos1, 90), (overlay_set_position, "$g_presentation_obj_admin_panel_start_scene", pos1), (position_set_x, pos1, 1500), (position_set_y, pos1, 1500), (overlay_set_size, "$g_presentation_obj_admin_panel_start_scene", pos1), (presentation_set_duration, 999999), ]), (ti_on_presentation_event_state_change, [(store_trigger_param_1, ":object"), (store_trigger_param_2, ":value"), (try_begin), # edit scene mode (gt, "$g_presentation_obj_edit_mode_choose_scene", -1), (try_begin), (eq, ":object", "$g_presentation_obj_edit_mode_choose_scene"), (store_sub, "$g_selected_scene", scenes_end, ":value"), (val_sub, "$g_selected_scene", 1), (else_try), (eq, ":object", "$g_presentation_obj_edit_mode_start_scene"), (team_set_faction, 0, 0), (team_set_faction, 1, 0), (start_multiplayer_mission, "mt_edit_scene", "$g_selected_scene", 1), (try_end), (try_end), (eq, "$g_presentation_obj_edit_mode_choose_scene", -1), (try_begin), # admin panel (eq, ":object", "$g_presentation_obj_admin_panel_add_to_servers_list"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_add_to_game_servers_list, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_anti_cheat"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_anti_cheat, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_server_name"), (multiplayer_send_string_to_server, client_event_admin_set_server_name, s0), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_password"), (multiplayer_send_string_to_server, client_event_admin_set_password, s0), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_welcome_message"), (server_set_welcome_message, s0), (multiplayer_send_string_to_server, client_event_admin_set_welcome_message, s0), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_game_type"), (store_sub, "$g_selected_game_type", lazy.sub(game_type_mission_templates_end, 1), ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_scene"), (store_sub, "$g_selected_scene", lazy.sub(scenes_end, 1), ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_start_scene"), (multiplayer_send_3_int_to_server, client_event_admin_set_game_rule, command_start_scene, "$g_selected_scene", "$g_selected_game_type"), (presentation_set_duration, 0), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_max_players"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_max_players, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_ghost_mode"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_ghost_mode, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_block_dir"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_control_block_direction, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_combat_speed"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_combat_speed, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_time_limit"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_map_time_limit, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_respawn_period"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_respawn_period, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_time_limit"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_map_time_limit, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_starting_gold"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_starting_gold, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_combat_gold"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_combat_gold_bonus, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_kick_voteable"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_kick_voteable, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_ban_voteable"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_ban_voteable, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_scenes_voteable"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_maps_voteable, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_vote_ratio"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_valid_vote_ratio, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_victory_condition"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_team_point_limit, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_max_herd_animals"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_num_bots_voteable, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_bot_count"), (multiplayer_send_2_int_to_server, client_event_admin_set_game_rule, command_set_bot_count, ":value"), (else_try), (eq, ":object", "$g_presentation_obj_admin_panel_done"), (presentation_set_duration, 0), (try_end), ]), (ti_on_presentation_run, [(store_trigger_param_1, ":cur_time"), (try_begin), (key_clicked, key_escape), (gt, ":cur_time", 200), (presentation_set_duration, 0), (try_end), ]), ]), ("game_before_quit", 0, "mesh_load_window", []), ("game_rules", prsntf_manual_end_only, 0, # lists server settings [(ti_on_presentation_load, [(set_fixed_point_multiplier, 1000), (create_mesh_overlay, reg0, "mesh_mp_ui_welcome_panel"), (position_set_x, pos1, 200), (position_set_y, pos1, 400), (overlay_set_position, reg0, pos1), (str_store_welcome_message, s0), (try_begin), (neg|str_is_empty, s0), (str_clear, s3), (str_store_string, s2, s0), (str_store_string, s2, "str_s2_s3"), (str_store_string, s2, "str_s2_s3"), (else_try), (str_clear, s2), (try_end), (str_store_string, s3, "str_game_rules"), (str_store_string, s2, "str_s2_s3"), (str_store_string, s0, "str_server_name"), (str_store_server_name, s1), (str_store_string, s3, "str_s0_s1"), (str_store_string, s2, "str_s2_s3"), (store_current_scene, ":current_scene"), (val_sub, ":current_scene", scenes_begin), (val_add, ":current_scene", scene_names_begin), (str_store_string, s0, "str_scene_name"), (str_store_string, s1, ":current_scene"), (str_store_string, s3, "str_s0_s1"), (str_store_string, s2, "str_s2_s3"), (store_mission_timer_a, ":mission_timer"), (val_add, ":mission_timer", "$g_server_mission_timer_when_player_joined"), (store_mul, ":remaining_seconds", "$g_game_time_limit", 60), (val_sub, ":remaining_seconds", ":mission_timer"), (store_div, reg0, ":remaining_seconds", 3600), (val_mod, ":remaining_seconds", 3600), (store_div, reg1, ":remaining_seconds", 60), (store_mod, reg2, ":remaining_seconds", 60), (try_begin), (ge, reg1, 10), (str_clear, s0), (else_try), (str_store_string, s0, "str_zero"), (try_end), (try_begin), (ge, reg2, 10), (str_clear, s1), (else_try), (str_store_string, s1, "str_zero"), (try_end), (str_store_string, s3, "str_remaining_time_reg0_s0reg1_s1reg2"), (str_store_string, s2, "str_s2_s3"), (assign, ":loop_end", 100), (try_for_range, ":current_option", 0, ":loop_end"), (assign, reg0, -12345), (call_script, "script_game_get_multiplayer_server_option_for_mission_template", 0, ":current_option"), (try_begin), (eq, reg0, -12345), (assign, ":loop_end", 0), (else_try), (call_script, "script_game_multiplayer_server_option_for_mission_template_to_string", 0, ":current_option", reg0), (str_store_string, s3, s0), (str_store_string, s2, "str_s2_s3"), (try_end), (try_end), (create_text_overlay, reg0, s2, tf_scrollable), (overlay_set_color, reg0, 0xFFFFFF), (position_set_x, pos1, 230), (position_set_y, pos1, 425), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 900), (position_set_y, pos1, 900), (overlay_set_size, reg0, pos1), (position_set_x, pos1, 540), (position_set_y, pos1, 150), (overlay_set_area_size, reg0, pos1), (presentation_set_duration, 999999), ]), (ti_on_presentation_run, [(store_trigger_param_1, ":cur_time"), (try_begin), (key_clicked, key_escape), (gt, ":cur_time", 200), (presentation_set_duration, 0), (try_end), ]), ]), ("escape_menu", prsntf_manual_end_only, 0, [(ti_on_presentation_load, [(set_fixed_point_multiplier, 1000), (create_mesh_overlay, reg0, "mesh_mp_ingame_menu"), (position_set_x, pos1, 250), (position_set_y, pos1, 80), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 1000), (position_set_y, pos1, 1000), (overlay_set_size, reg0, pos1), (str_clear, s0), (create_text_overlay, reg0, s0, tf_scrollable_style_2), (position_set_x, pos1, 285), (position_set_y, pos1, 125), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 405), (position_set_y, pos1, 500), (overlay_set_area_size, reg0, pos1), (set_container_overlay, reg0), (init_position, pos2), (position_set_x, pos2, 900), (position_set_y, pos2, 900), (assign, ":cur_y", 10), (multiplayer_get_my_player, ":my_player_id"), (player_get_team_no, ":my_team", ":my_player_id"), (try_begin), (this_or_next|eq, "$g_player_has_spawned_after_connecting", 0), (eq, ":my_team", team_spectators), (assign, ":string_id", "str_join_game"), (else_try), (assign, ":string_id", "str_spectate"), (try_end), (create_button_overlay, reg0, ":string_id", 0), (assign, "$g_presentation_obj_escape_menu_spectate", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (val_add, ":cur_y", escape_menu_item_height), (create_button_overlay, reg0, "str_change_options", 0), (assign, "$g_presentation_obj_escape_menu_options", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (val_add, ":cur_y", escape_menu_item_height), (create_button_overlay, reg0, "str_change_controls", 0), (assign, "$g_presentation_obj_escape_menu_controls", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (val_add, ":cur_y", escape_menu_item_height), (create_button_overlay, reg0, "str_show_rules", 0), (assign, "$g_presentation_obj_escape_menu_show_rules", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (val_add, ":cur_y", escape_menu_item_height), (create_button_overlay, reg0, "str_show_info", 0), (assign, "$g_presentation_obj_escape_menu_show_info", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (val_add, ":cur_y", escape_menu_item_height), (create_button_overlay, reg0, "str_request_poll", 0), (assign, "$g_presentation_obj_escape_menu_request_poll", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (val_add, ":cur_y", escape_menu_item_height), (assign, "$g_presentation_obj_escape_menu_admin_panel", -1), (assign, "$g_presentation_obj_escape_menu_admin_tools", -1), (assign, "$g_presentation_obj_escape_menu_admin_items", -1), (try_begin), (ge, ":my_player_id", 0), (player_is_admin, ":my_player_id"), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_panel, 0), (create_button_overlay, reg0, "str_admin_panel", 0), (assign, "$g_presentation_obj_escape_menu_admin_panel", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (val_add, ":cur_y", escape_menu_item_height), (try_end), (create_button_overlay, reg0, "str_admin_tools", 0), (assign, "$g_presentation_obj_escape_menu_admin_tools", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (val_add, ":cur_y", escape_menu_item_height), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_all_items, 0), (create_button_overlay, reg0, "str_admin_items", 0), (assign, "$g_presentation_obj_escape_menu_admin_items", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (val_add, ":cur_y", escape_menu_item_height), (try_end), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_is_lord, 1), (create_button_overlay, reg0, "str_faction_admin", 0), (assign, "$g_presentation_obj_escape_menu_faction_admin", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_escape_menu_faction_admin", -1), (try_end), (create_button_overlay, reg0, "str_quit", 0), (assign, "$g_presentation_obj_escape_menu_quit", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (val_add, ":cur_y", escape_menu_item_height), (create_text_overlay, reg0, "str_choose_an_option", 0), (overlay_set_color, reg0, 0xFFFFFF), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (overlay_set_size, reg0, pos2), (position_set_x, pos1, 130), (val_sub, ":cur_y", escape_menu_item_height), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_escape_menu_spectate", pos1), (val_sub, ":cur_y", escape_menu_item_height), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_escape_menu_options", pos1), (val_sub, ":cur_y", escape_menu_item_height), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_escape_menu_controls", pos1), (val_sub, ":cur_y", escape_menu_item_height), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_escape_menu_show_rules", pos1), (val_sub, ":cur_y", escape_menu_item_height), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_escape_menu_show_info", pos1), (val_sub, ":cur_y", escape_menu_item_height), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_escape_menu_request_poll", pos1), (try_begin), (gt, "$g_presentation_obj_escape_menu_admin_panel", -1), (val_sub, ":cur_y", escape_menu_item_height), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_escape_menu_admin_panel", pos1), (try_end), (try_begin), (gt, "$g_presentation_obj_escape_menu_admin_tools", -1), (val_sub, ":cur_y", escape_menu_item_height), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_escape_menu_admin_tools", pos1), (try_end), (try_begin), (gt, "$g_presentation_obj_escape_menu_admin_items", -1), (val_sub, ":cur_y", escape_menu_item_height), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_escape_menu_admin_items", pos1), (try_end), (try_begin), (gt, "$g_presentation_obj_escape_menu_faction_admin", -1), (val_sub, ":cur_y", escape_menu_item_height), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_escape_menu_faction_admin", pos1), (try_end), (val_sub, ":cur_y", escape_menu_item_height), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_escape_menu_quit", pos1), (presentation_set_duration, 999999), ]), (ti_on_presentation_event_state_change, [(store_trigger_param_1, ":object"), (try_begin), (eq, ":object", "$g_presentation_obj_escape_menu_spectate"), (presentation_set_duration, 0), (multiplayer_send_message_to_server, client_event_request_spawn_point), (assign, "$g_player_has_spawned_after_connecting", 1), (else_try), (eq, ":object", "$g_presentation_obj_escape_menu_options"), (presentation_set_duration, 0), (change_screen_options), (else_try), (eq, ":object", "$g_presentation_obj_escape_menu_controls"), (presentation_set_duration, 0), (change_screen_controls), (else_try), (eq, ":object", "$g_presentation_obj_escape_menu_show_rules"), (presentation_set_duration, 0), (multiplayer_send_message_to_server, client_event_request_game_rules), (else_try), (eq, ":object", "$g_presentation_obj_escape_menu_show_info"), (presentation_set_duration, 0), (call_script, "script_show_welcome_message"), (else_try), (eq, ":object", "$g_presentation_obj_escape_menu_request_poll"), (presentation_set_duration, 0), (start_presentation, "prsnt_poll_menu"), (else_try), (eq, ":object", "$g_presentation_obj_escape_menu_admin_panel"), (presentation_set_duration, 0), (multiplayer_send_int_to_server, client_event_request_game_rules, 1), (else_try), (eq, ":object", "$g_presentation_obj_escape_menu_admin_tools"), (presentation_set_duration, 0), (start_presentation, "prsnt_admin_menu"), (else_try), (eq, ":object", "$g_presentation_obj_escape_menu_admin_items"), (presentation_set_duration, 0), (start_presentation, "prsnt_admin_item_select"), (else_try), (eq, ":object", "$g_presentation_obj_escape_menu_faction_admin"), (presentation_set_duration, 0), (start_presentation, "prsnt_faction_admin_menu"), (else_try), (eq, ":object", "$g_presentation_obj_escape_menu_quit"), (presentation_set_duration, 0), (finish_mission, 0), (try_end), ]), (ti_on_presentation_run, [(store_trigger_param_1, ":cur_time"), (try_begin), (key_clicked, key_escape), (gt, ":cur_time", 200), (presentation_set_duration, 0), (try_end), ]), ]), ("poll_menu", prsntf_manual_end_only, 0, [(ti_on_presentation_load, [(set_fixed_point_multiplier, 1000), (create_mesh_overlay, reg0, "mesh_mp_ingame_menu"), (position_set_x, pos1, 250), (position_set_y, pos1, 80), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 1000), (position_set_y, pos1, 1000), (overlay_set_size, reg0, pos1), (str_clear, s0), (create_text_overlay, reg0, s0, tf_scrollable_style_2), (position_set_x, pos1, 285), (position_set_y, pos1, 125), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 405), (position_set_y, pos1, 500), (overlay_set_area_size, reg0, pos1), (set_container_overlay, reg0), (init_position, pos2), (position_set_x, pos2, 900), (position_set_y, pos2, 900), (assign, ":cur_y", 10), (position_set_x, pos1, 200), (assign, reg0, 0), (multiplayer_get_my_player, ":my_player_id"), (try_begin), (player_get_slot, ":faction_id", ":my_player_id"), (is_between, ":faction_id", castle_factions_begin, factions_end), (faction_slot_eq, ":faction_id", slot_faction_is_locked, 0), (try_begin), (neq, "$g_game_type", "mt_no_money"), (assign, reg0, poll_cost_faction_lord), (try_end), (str_store_string, s0, "str_choose_poll_faction_lord"), (create_button_overlay, "$g_presentation_obj_poll_menu_faction_lord", "str_s0__reg0_", tf_center_justify), (overlay_set_color, "$g_presentation_obj_poll_menu_faction_lord", 0xFFFFFF), (overlay_set_size, "$g_presentation_obj_poll_menu_faction_lord", pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_poll_menu_faction_lord", pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_poll_menu_faction_lord", -1), (try_end), (try_begin), (eq, "$g_ban_voteable", 1), (try_begin), (neq, "$g_game_type", "mt_no_money"), (assign, reg0, poll_cost_ban_player), (try_end), (str_store_string, s0, "str_choose_poll_ban"), (create_button_overlay, "$g_presentation_obj_poll_menu_ban", "str_s0__reg0_", tf_center_justify), (overlay_set_color, "$g_presentation_obj_poll_menu_ban", 0xFFFFFF), (overlay_set_size, "$g_presentation_obj_poll_menu_ban", pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_poll_menu_ban", pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_poll_menu_ban", -1), (try_end), (try_begin), (eq, "$g_kick_voteable", 1), (try_begin), (neq, "$g_game_type", "mt_no_money"), (assign, reg0, poll_cost_kick_player), (try_end), (str_store_string, s0, "str_choose_poll_kick"), (create_button_overlay, "$g_presentation_obj_poll_menu_kick", "str_s0__reg0_", tf_center_justify), (overlay_set_color, "$g_presentation_obj_poll_menu_kick", 0xFFFFFF), (overlay_set_size, "$g_presentation_obj_poll_menu_kick", pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_poll_menu_kick", pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_poll_menu_kick", -1), (try_end), (try_begin), (eq, "$g_scenes_voteable", 1), (try_begin), (neq, "$g_game_type", "mt_no_money"), (assign, reg0, poll_cost_change_scene), (try_end), (str_store_string, s0, "str_choose_poll_scene"), (create_button_overlay, "$g_presentation_obj_poll_menu_scene", "str_s0__reg0_", tf_center_justify), (overlay_set_color, "$g_presentation_obj_poll_menu_scene", 0xFFFFFF), (overlay_set_size, "$g_presentation_obj_poll_menu_scene", pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_poll_menu_scene", pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_poll_menu_scene", -1), (try_end), (create_text_overlay, reg0, "str_choose_a_poll_type", 0), (overlay_set_color, reg0, 0xFFFFFF), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (overlay_set_size, reg0, pos2), (presentation_set_duration, 999999), ]), (ti_on_presentation_event_state_change, [(store_trigger_param_1, ":object"), (try_begin), (eq, ":object", "$g_presentation_obj_poll_menu_scene"), (presentation_set_duration, 0), (start_presentation, "prsnt_list_scenes"), (else_try), (eq, ":object", "$g_presentation_obj_poll_menu_kick"), (presentation_set_duration, 0), (assign, "$g_list_players_action_string_id", "str_kick"), (assign, "$g_list_players_event", client_event_request_poll), (assign, "$g_list_players_event_value", poll_type_kick_player), (start_presentation, "prsnt_list_players"), (else_try), (eq, ":object", "$g_presentation_obj_poll_menu_ban"), (presentation_set_duration, 0), (assign, "$g_list_players_action_string_id", "str_ban_temp"), (assign, "$g_list_players_event", client_event_request_poll), (assign, "$g_list_players_event_value", poll_type_ban_player), (start_presentation, "prsnt_list_players"), (else_try), (eq, ":object", "$g_presentation_obj_poll_menu_faction_lord"), (presentation_set_duration, 0), (assign, "$g_list_players_action_string_id", "str_propose_as_lord"), (assign, "$g_list_players_event", client_event_request_poll), (assign, "$g_list_players_event_value", poll_type_faction_lord), (start_presentation, "prsnt_list_players"), (try_end), ]), (ti_on_presentation_run, [(store_trigger_param_1, ":cur_time"), (try_begin), (key_clicked, key_escape), (gt, ":cur_time", 200), (presentation_set_duration, 0), (try_end), ]), ]), # $g_list_players_action_string_id: string for ending "Choose a player to ", describing the action # $g_list_players_event: network event number to send to the server with the player id # $g_list_players_event_value: extra value to send with the network event above # $g_list_players_keep_open: 1 = don't end the presentation after selecting a player # $g_list_players_return_presentation: the presentation to return to after player selection or pressing escape ("list_players", prsntf_manual_end_only, 0, [(ti_on_presentation_load, [(set_fixed_point_multiplier, 1000), (create_mesh_overlay, reg0, "mesh_mp_ingame_menu"), (position_set_x, pos1, 250), (position_set_y, pos1, 80), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 1000), (position_set_y, pos1, 1000), (overlay_set_size, reg0, pos1), (str_clear, s0), (create_text_overlay, reg0, s0, tf_scrollable_style_2), (position_set_x, pos1, 285), (position_set_y, pos1, 125), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 405), (position_set_y, pos1, 500), (overlay_set_area_size, reg0, pos1), (set_container_overlay, reg0), (multiplayer_get_my_player, ":my_player_id"), (player_get_slot, "$g_list_players_faction_id", ":my_player_id", slot_player_faction_id), (try_begin), (eq, "$g_list_players_event", client_event_request_poll), (eq, "$g_list_players_event_value", poll_type_faction_lord), (try_begin), (player_is_admin, ":my_player_id"), (player_slot_eq, ":my_player_id", slot_player_admin_no_factions, 0), (assign, ":my_player_id", 0), # only add the requesting player to a lord poll list if an admin with permission (try_end), (else_try), (eq, "$g_list_players_event", client_event_faction_admin_action), (else_try), (assign, "$g_list_players_faction_id", -1), # show players from all factions for admin tools (this_or_next|eq, "$g_list_players_event_value", admin_action_fade_player_out), (eq, "$g_list_players_event_value", admin_action_freeze_player), (assign, ":my_player_id", 0), # only add the requesting player to the list for fade out and freeze tools (try_end), (val_max, "$g_list_players_action_string_id", 0), (str_store_string, s0, "$g_list_players_action_string_id"), (create_text_overlay, ":prompt_overlay_id", "str_choose_a_player_to_s0", 0), (overlay_set_color, ":prompt_overlay_id", 0xFFFFFF), (position_set_x, pos2, 900), (position_set_y, pos2, 900), (overlay_set_size, ":prompt_overlay_id", pos2), (position_set_x, pos2, 750), (position_set_y, pos2, 750), (assign, ":cur_y", 5), (assign, ":overlay_id", -1), (get_max_players, ":max_players"), (try_begin), # loop over all factions (eq, "$g_list_players_faction_id", -1), (assign, ":factions_end", factions_end), (else_try), # only one iteration of the loop, for the targeted faction (store_add, ":factions_end", factions_begin, 1), (try_end), (try_for_range, ":current_faction_id", factions_begin, ":factions_end"), (faction_slot_eq, ":current_faction_id", slot_faction_is_active, 1), (try_for_range, ":player_id", 1, ":max_players"), # loop over factions one by one, grouping their members together in the list (player_is_active, ":player_id"), (this_or_next|neq, "$g_list_players_faction_id", -1), (player_slot_eq, ":player_id", slot_player_faction_id, ":current_faction_id"), (try_begin), (neq, ":player_id", ":my_player_id"), (this_or_next|eq, "$g_list_players_faction_id", -1), (player_slot_eq, ":player_id", slot_player_faction_id, "$g_list_players_faction_id"), (str_store_player_username, s0, ":player_id"), (create_button_overlay, ":overlay_id", s0, 0), (overlay_set_size, ":overlay_id", pos2), (player_set_slot, ":player_id", slot_player_list_button_id, ":overlay_id"), # save the associated overlay id, for checking that the player hasn't disconnected and the id reused (val_add, ":cur_y", player_list_item_height), (else_try), (player_set_slot, ":player_id", slot_player_list_button_id, -1), (try_end), (try_end), (try_end), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, ":prompt_overlay_id", pos1), (val_sub, ":cur_y", 5), (val_add, ":prompt_overlay_id", 1), (store_add, ":overlay_id_end", ":overlay_id", 1), (try_for_range, ":overlay_id", ":prompt_overlay_id", ":overlay_id_end"), (val_sub, ":cur_y", player_list_item_height), (position_set_y, pos1, ":cur_y"), (overlay_set_position, ":overlay_id", pos1), (try_end), (presentation_set_duration, 999999), ]), (ti_on_presentation_event_state_change, [(store_trigger_param_1, ":object"), (get_max_players, ":num_players"), (try_for_range, ":player_id", 1, ":num_players"), (player_is_active, ":player_id"), (player_slot_eq, ":player_id", slot_player_list_button_id, ":object"), # ensure that the player id represents the same one selected in the list (try_begin), (is_between, "$g_list_players_event", 0, 128), (multiplayer_send_2_int_to_server, "$g_list_players_event", "$g_list_players_event_value", ":player_id"), (try_end), (assign, ":num_players", 0), (try_begin), (eq, "$g_list_players_keep_open", 0), # target the selected player for other use by other functions (assign, "$g_target_player_id", ":player_id"), (assign, "$g_target_player_overlay_id", ":object"), (try_begin), (gt, "$g_list_players_return_presentation", 0), (neg|is_presentation_active, "$g_list_players_return_presentation"), (start_presentation, "$g_list_players_return_presentation"), (try_end), (assign, "$g_list_players_return_presentation", 0), (presentation_set_duration, 0), (try_end), (try_end), ]), (ti_on_presentation_run, [(store_trigger_param_1, ":cur_time"), (try_begin), (key_clicked, key_escape), (gt, ":cur_time", 200), (assign, "$g_target_player_id", 0), (assign, "$g_target_player_overlay_id", 0), (try_begin), (gt, "$g_list_players_return_presentation", 0), (neg|is_presentation_active, "$g_list_players_return_presentation"), (start_presentation, "$g_list_players_return_presentation"), (try_end), (assign, "$g_list_players_return_presentation", 0), (assign, "$g_list_players_keep_open", 0), (presentation_set_duration, 0), (else_try), # continuously update list entry colors (get_max_players, ":max_players"), (try_for_range, ":player_id", 1, ":max_players"), (player_is_active, ":player_id"), (player_get_slot, ":overlay_id", ":player_id", slot_player_list_button_id), (gt, ":overlay_id", -1), (assign, ":color", 0xFFFFFF), (try_begin), # for player slot toggling, update the player name color with the current setting (eq, "$g_list_players_event", client_event_faction_admin_action), (try_begin), (eq, "$g_list_players_event_value", faction_admin_action_toggle_player_door_key), (player_slot_eq, ":player_id", slot_player_has_faction_door_key, 1), (else_try), (eq, "$g_list_players_event_value", faction_admin_action_toggle_player_money_key), (player_slot_eq, ":player_id", slot_player_has_faction_money_key, 1), (else_try), (eq, "$g_list_players_event_value", faction_admin_action_toggle_player_item_key), (player_slot_eq, ":player_id", slot_player_has_faction_item_key, 1), (else_try), (eq, "$g_list_players_event_value", faction_admin_action_toggle_player_announce), (player_slot_eq, ":player_id", slot_player_can_faction_announce, 1), (else_try), (eq, "$g_list_players_event_value", faction_admin_action_mute_player), (player_slot_eq, ":player_id", slot_player_faction_chat_muted, 0), (else_try), (assign, ":color", 0xFF3333), (try_end), (else_try), # when including multiple factions, set to the player faction's color (eq, "$g_list_players_faction_id", -1), (player_get_slot, ":faction_id", ":player_id", slot_player_faction_id), (is_between, ":faction_id", factions_begin, factions_end), (faction_get_color, ":color", ":faction_id"), (try_end), (overlay_set_color, ":overlay_id", ":color"), (try_end), (try_end), ]), ]), ("list_scenes", prsntf_manual_end_only, 0, [(ti_on_presentation_load, [(set_fixed_point_multiplier, 1000), (create_mesh_overlay, reg0, "mesh_mp_ingame_menu"), (position_set_x, pos1, 250), (position_set_y, pos1, 80), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 1000), (position_set_y, pos1, 1000), (overlay_set_size, reg0, pos1), (str_clear, s0), (create_text_overlay, reg0, s0, tf_scrollable_style_2), (position_set_x, pos1, 285), (position_set_y, pos1, 125), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 405), (position_set_y, pos1, 500), (overlay_set_area_size, reg0, pos1), (set_container_overlay, reg0), (init_position, pos2), (position_set_x, pos2, 900), (position_set_y, pos2, 900), (store_sub, ":num_scenes", scenes_end, scenes_begin), (store_mul, ":cur_y", ":num_scenes", escape_menu_item_height), (val_add, ":cur_y", 10), (create_text_overlay, reg0, "str_choose_a_scene", 0), (overlay_set_color, reg0, 0xFFFFFF), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (overlay_set_size, reg0, pos2), (val_sub, ":cur_y", escape_menu_item_height), (store_add, "$g_presentation_obj_list_scenes_begin", reg0, 1), (position_set_x, pos1, 130), (assign, ":scene_string_id", scene_names_begin), (try_for_range, ":unused", scenes_begin, scenes_end), (create_button_overlay, ":overlay_id", ":scene_string_id", tf_center_justify), (overlay_set_color, ":overlay_id", 0xFFFFFF), (position_set_y, pos1, ":cur_y"), (overlay_set_position, ":overlay_id", pos1), (overlay_set_size, ":overlay_id", pos2), (val_sub, ":cur_y", escape_menu_item_height), (val_add, ":scene_string_id", 1), (try_end), (presentation_set_duration, 999999), ]), (ti_on_presentation_event_state_change, [(store_trigger_param_1, ":object"), (try_begin), (ge, ":object", "$g_presentation_obj_list_scenes_begin"), (store_sub, ":scene_id", ":object", "$g_presentation_obj_list_scenes_begin"), (val_add, ":scene_id", scenes_begin), (lt, ":scene_id", scenes_end), (multiplayer_send_2_int_to_server, client_event_request_poll, poll_type_change_scene, ":scene_id"), (presentation_set_duration, 0), (try_end), ]), (ti_on_presentation_run, [(store_trigger_param_1, ":cur_time"), (try_begin), (key_clicked, key_escape), (gt, ":cur_time", 200), (presentation_set_duration, 0), (try_end), ]), ]), # $g_list_faction_event: network event number to send to the server with the faction id # $g_list_faction_event_value: extra value to send with the network event above # $g_list_faction_return_presentation: the presentation to return to after faction selection or pressing escape ("list_factions", prsntf_manual_end_only, 0, [(ti_on_presentation_load, [(set_fixed_point_multiplier, 1000), (create_mesh_overlay, reg1, "mesh_mp_ingame_menu"), (position_set_x, pos1, 250), (position_set_y, pos1, 80), (overlay_set_position, reg1, pos1), (position_set_x, pos1, 1000), (position_set_y, pos1, 1000), (overlay_set_size, reg1, pos1), (str_clear, s0), (create_text_overlay, reg1, s0, tf_scrollable_style_2), (position_set_x, pos1, 285), (position_set_y, pos1, 125), (overlay_set_position, reg1, pos1), (position_set_x, pos1, 405), (position_set_y, pos1, 500), (overlay_set_area_size, reg1, pos1), (set_container_overlay, reg1), (assign, ":num_factions", 0), (multiplayer_get_my_player, ":my_player_id"), (player_get_slot, ":my_faction_id", ":my_player_id", slot_player_faction_id), (try_for_range, ":faction_id", castle_factions_begin, factions_end), (faction_set_slot, ":faction_id", slot_faction_list_button_id, -1), (faction_slot_eq, ":faction_id", slot_faction_is_active, 1), (neq, ":faction_id", ":my_faction_id"), (store_add, ":relation_slot", ":faction_id", slot_faction_relations_begin), (assign, ":show", 1), (try_begin), (eq, "$g_list_factions_event_value", faction_admin_action_set_relation_hostile), (faction_slot_ge, ":my_faction_id", ":relation_slot", 1), (else_try), (eq, "$g_list_factions_event_value", faction_admin_action_set_relation_peaceful), (neg|faction_slot_ge, ":my_faction_id", ":relation_slot", 1), (else_try), (assign, ":show", 0), (try_end), (eq, ":show", 1), (faction_set_slot, ":faction_id", slot_faction_list_button_id, 1), (val_add, ":num_factions", 1), (try_end), (store_mul, ":cur_y", ":num_factions", faction_menu_item_height), (val_add, ":cur_y", 10), (position_set_x, pos11, 900), (position_set_y, pos11, 900), (create_text_overlay, reg1, "str_choose_a_faction", 0), (overlay_set_color, reg1, 0xFFFFFF), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg1, pos1), (overlay_set_size, reg1, pos11), (val_sub, ":cur_y", (faction_menu_item_height / 2) + 10), (position_set_x, pos12, 50), (position_set_y, pos12, 50), (store_add, ":banner_y", ":cur_y", faction_menu_item_height / 2), (position_set_x, pos2, 25), (position_set_x, pos1, 50), (try_for_range, ":faction_id", castle_factions_begin, factions_end), (faction_slot_ge, ":faction_id", slot_faction_list_button_id, 1), (faction_get_slot, ":banner_mesh", ":faction_id", slot_faction_banner_mesh), (create_image_button_overlay, reg1, ":banner_mesh", ":banner_mesh"), (position_set_y, pos2, ":banner_y"), (overlay_set_position, reg1, pos2), (overlay_set_size, reg1, pos12), (val_sub, ":banner_y", faction_menu_item_height), (str_store_faction_name, s0, ":faction_id"), (create_button_overlay, reg1, s0), (faction_get_color, ":color", ":faction_id"), (overlay_set_color, reg1, ":color"), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg1, pos1), (overlay_set_size, reg1, pos11), (val_sub, ":cur_y", faction_menu_item_height), (faction_set_slot, ":faction_id", slot_faction_list_button_id, reg1), (try_end), (presentation_set_duration, 999999), ]), (ti_on_presentation_event_state_change, [(store_trigger_param_1, ":overlay_id"), (store_add, ":other_overlay_id", ":overlay_id", 1), (assign, ":loop_end", factions_end), (try_for_range, ":faction_id", castle_factions_begin, ":loop_end"), (this_or_next|faction_slot_eq, ":faction_id", slot_faction_list_button_id, ":overlay_id"), (faction_slot_eq, ":faction_id", slot_faction_list_button_id, ":other_overlay_id"), (presentation_set_duration, 0), (assign, ":loop_end", 0), (try_begin), (is_between, "$g_list_factions_event", 0, 128), (multiplayer_send_2_int_to_server, "$g_list_factions_event", "$g_list_factions_event_value", ":faction_id"), (try_end), (try_begin), (gt, "$g_list_factions_return_presentation", 0), (neg|is_presentation_active, "$g_list_factions_return_presentation"), (start_presentation, "$g_list_factions_return_presentation"), (try_end), (assign, "$g_list_factions_return_presentation", 0), (try_end), ]), (ti_on_presentation_run, [(store_trigger_param_1, ":cur_time"), (try_begin), (key_clicked, key_escape), (gt, ":cur_time", 200), (try_begin), (gt, "$g_list_factions_return_presentation", 0), (neg|is_presentation_active, "$g_list_factions_return_presentation"), (start_presentation, "$g_list_factions_return_presentation"), (try_end), (assign, "$g_list_factions_return_presentation", 0), (presentation_set_duration, 0), (try_end), ]), ]), ("show_poll", prsntf_read_only|prsntf_manual_end_only, 0, [(ti_on_presentation_load, [(set_fixed_point_multiplier, 1000), (create_mesh_overlay, reg1, "mesh_white_plane"), (overlay_set_color, reg1, 0x110000), (overlay_set_alpha, reg1, 0x44), (position_set_x, pos1, 10), (position_set_y, pos1, 10), (overlay_set_position, reg1, pos1), (position_set_x, pos10, 40500), (position_set_y, pos10, 2500), (overlay_set_size, reg1, pos10), (position_set_x, pos10, 850), (position_set_y, pos10, 850), (try_begin), (player_is_active, "$g_poll_requester_player_id"), (str_store_player_username, s1, "$g_poll_requester_player_id"), (else_try), (str_store_string, s1, "str_departed_player"), (try_end), (create_text_overlay, reg1, "str_poll_requester_keys", tf_center_justify|tf_with_outline), (overlay_set_color, reg1, 0xFFFFFF), (position_set_x, pos1, 350), (position_set_y, pos1, 15), (overlay_set_position, reg1, pos1), (overlay_set_size, reg1, pos10), (try_begin), (this_or_next|eq, "$g_poll_type", poll_type_kick_player), (this_or_next|eq, "$g_poll_type", poll_type_ban_player), (eq, "$g_poll_type", poll_type_faction_lord), (try_begin), (player_is_active, "$g_poll_value_1"), (str_store_player_username, s1, "$g_poll_value_1"), (else_try), (str_store_string, s1, "str_departed_player"), (multiplayer_get_my_player, "$g_poll_value_1"), (try_end), (try_end), (try_begin), (eq, "$g_poll_type", poll_type_change_scene), (store_sub, ":string_id", "$g_poll_value_1", scenes_begin), (val_add, ":string_id", scene_names_begin), (str_store_string, s1, ":string_id"), (assign, ":string_id", "str_poll_change_scene"), (else_try), (eq, "$g_poll_type", poll_type_kick_player), (assign, ":string_id", "str_poll_kick_player"), (else_try), (eq, "$g_poll_type", poll_type_ban_player), (assign, ":string_id", "str_poll_ban_player"), (else_try), (eq, "$g_poll_type", poll_type_faction_lord), (player_get_slot, ":faction_id", "$g_poll_value_1", slot_player_faction_id), (str_store_faction_name, s2, ":faction_id"), (assign, ":string_id", "str_poll_faction_lord"), (try_end), (create_text_overlay, reg1, ":string_id", tf_center_justify|tf_with_outline), (overlay_set_color, reg1, 0xFFFFFF), (position_set_x, pos1, 415), (position_set_y, pos1, 35), (overlay_set_position, reg1, pos1), (overlay_set_size, reg1, pos10), (store_mission_timer_a, ":current_time"), (store_sub, "$g_poll_displayed_seconds", "$g_poll_end_time", ":current_time"), (assign, reg0, "$g_poll_displayed_seconds"), (create_text_overlay, "$g_presentation_obj_poll_seconds", "str_poll_time_left", tf_right_align|tf_single_line|tf_with_outline), (overlay_set_color, "$g_presentation_obj_poll_seconds", 0xFFFFFF), (position_set_x, pos1, 860), (position_set_y, pos1, 15), (overlay_set_position, "$g_presentation_obj_poll_seconds", pos1), (overlay_set_size, "$g_presentation_obj_poll_seconds", pos10), (omit_key_once, key_f7), (omit_key_once, key_f8), (omit_key_once, key_f9), (assign, "$g_hide_poll", 0), (presentation_set_duration, poll_time_duration * 100 + 200), ]), (ti_on_presentation_run, [(store_trigger_param_1, ":current_time"), (try_begin), (gt, ":current_time", 100), (assign, ":vote", -9999), (try_begin), (key_clicked, key_f7), (assign, ":vote", poll_vote_abstain), (else_try), (key_is_down, key_right_shift), (multiplayer_get_my_player, ":player_id"), (player_is_admin, ":player_id"), (player_slot_eq, ":player_id", slot_player_admin_no_override_poll, 0), (try_begin), (key_clicked, key_f9), (assign, ":vote", poll_vote_admin_yes), (else_try), (key_clicked, key_f8), (assign, ":vote", poll_vote_admin_no), (try_end), (else_try), (key_clicked, key_f9), (assign, ":vote", poll_vote_yes), (else_try), (key_clicked, key_f8), (assign, ":vote", poll_vote_no), (try_end), (neq, ":vote", -9999), (multiplayer_send_int_to_server, client_event_poll_vote, ":vote"), (clear_omitted_keys), (presentation_set_duration, 0), (try_end), (store_mission_timer_a, ":mission_timer"), (store_sub, reg0, "$g_poll_end_time", ":mission_timer"), (try_begin), (this_or_next|eq, "$g_hide_poll", 1), (lt, reg0, 0), (clear_omitted_keys), (presentation_set_duration, 0), (else_try), (neq, reg0, "$g_poll_displayed_seconds"), (overlay_set_text, "$g_presentation_obj_poll_seconds", "str_poll_time_left"), (assign, "$g_poll_displayed_seconds", reg0), (try_end), ]), ]), ("admin_menu", prsntf_manual_end_only, 0, [(ti_on_presentation_load, [(set_fixed_point_multiplier, 1000), (multiplayer_get_my_player, ":my_player_id"), (player_is_admin, ":my_player_id"), (create_mesh_overlay, reg0, "mesh_mp_ingame_menu"), (position_set_x, pos1, 250), (position_set_y, pos1, 80), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 1000), (position_set_y, pos1, 1000), (overlay_set_size, reg0, pos1), (str_clear, s0), (create_text_overlay, reg0, s0, tf_scrollable_style_2), (position_set_x, pos1, 285), (position_set_y, pos1, 125), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 405), (position_set_y, pos1, 500), (overlay_set_area_size, reg0, pos1), (set_container_overlay, reg0), (init_position, pos2), (position_set_x, pos2, 900), (position_set_y, pos2, 900), (assign, ":cur_y", 10), (position_set_x, pos1, 130), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_factions, 0), (player_get_slot, ":faction_id", ":my_player_id"), (is_between, ":faction_id", castle_factions_begin, factions_end), (try_begin), (faction_slot_eq, ":faction_id", slot_faction_is_locked, 0), (create_button_overlay, reg0, "str_lock_current_faction"), (else_try), (create_button_overlay, reg0, "str_unlock_current_faction"), (try_end), (assign, "$g_presentation_obj_admin_menu_lock_faction", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_lock_faction", -1), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_ships, 0), (create_button_overlay, reg0, "str_reset_sunken_ships"), (assign, "$g_presentation_obj_admin_menu_reset_ships", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (create_button_overlay, reg0, "str_teleport_to_ships"), (assign, "$g_presentation_obj_admin_menu_teleport_to_ships", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_reset_ships", -1), (assign, "$g_presentation_obj_admin_menu_teleport_to_ships", -1), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_animals, 0), (create_button_overlay, reg0, "str_remove_stray_horses"), (assign, "$g_presentation_obj_admin_menu_remove_stray_horses", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_remove_stray_horses", -1), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_admin_items, 0), (create_button_overlay, reg0, "str_remove_admin_horses"), (assign, "$g_presentation_obj_admin_menu_remove_horses", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (create_button_overlay, reg0, "str_spawn_admin_horse"), (assign, "$g_presentation_obj_admin_menu_admin_horse", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_remove_horses", -1), (assign, "$g_presentation_obj_admin_menu_admin_horse", -1), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_godlike_troop, 0), (create_button_overlay, reg0, "str_become_godlike"), (assign, "$g_presentation_obj_admin_menu_become_godlike", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_become_godlike", -1), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_heal_self, 0), (create_button_overlay, reg0, "str_refill_health"), (assign, "$g_presentation_obj_admin_menu_refill_health", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_refill_health", -1), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_admin_items, 0), (create_button_overlay, reg0, "str_become_invisible"), (assign, "$g_presentation_obj_admin_menu_become_invisible", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (create_button_overlay, reg0, "str_equip_admin_armor"), (assign, "$g_presentation_obj_admin_menu_admin_armor", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_become_invisible", -1), (assign, "$g_presentation_obj_admin_menu_admin_armor", -1), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_teleport_self, 0), (create_button_overlay, reg0, "str_teleport_forwards"), (assign, "$g_presentation_obj_admin_menu_teleport_forwards", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (create_button_overlay, reg0, "str_teleport_behind_player"), (assign, "$g_presentation_obj_admin_menu_teleport_behind_player", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (create_button_overlay, reg0, "str_teleport_to_player"), (assign, "$g_presentation_obj_admin_menu_teleport_to_player", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_teleport_forwards", -1), (assign, "$g_presentation_obj_admin_menu_teleport_behind_player", -1), (assign, "$g_presentation_obj_admin_menu_teleport_to_player", -1), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_freeze, 0), (create_button_overlay, reg0, "str_freeze_player"), (assign, "$g_presentation_obj_admin_menu_freeze_player", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_freeze_player", -1), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_kill_fade, 0), (create_button_overlay, reg0, "str_fade_player_out"), (assign, "$g_presentation_obj_admin_menu_fade_player_out", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (create_button_overlay, reg0, "str_kill_player"), (assign, "$g_presentation_obj_admin_menu_kill_player", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_fade_player_out", -1), (assign, "$g_presentation_obj_admin_menu_kill_player", -1), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_mute, 0), (create_button_overlay, reg0, "str_mute_player"), (assign, "$g_presentation_obj_admin_menu_mute_player", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_mute_player", -1), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_permanent_ban, 0), (create_button_overlay, reg0, "str_ban_player_perm"), (assign, "$g_presentation_obj_admin_menu_ban_player_perm", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_ban_player_perm", -1), (try_end), (try_begin), #(player_slot_eq, ":my_player_id", slot_player_admin_no_temporary_ban, 0), (create_button_overlay, reg0, "str_ban_player_temp", 0), (assign, "$g_presentation_obj_admin_menu_ban_player_temp", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_ban_player_temp", -1), (try_end), (try_begin), (player_slot_eq, ":my_player_id", slot_player_admin_no_kick, 0), (create_button_overlay, reg0, "str_kick_player", 0), (assign, "$g_presentation_obj_admin_menu_kick_player", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (else_try), (assign, "$g_presentation_obj_admin_menu_kick_player", -1), (try_end), (try_begin), (player_is_active, "$g_target_player_id"), (player_slot_eq, "$g_target_player_id", slot_player_list_button_id, -1), (str_store_player_username, s1, "$g_target_player_id"), (str_store_string, s0, "str_choose_an_option_targeting_s1"), (else_try), (str_store_string, s0, "str_choose_an_option"), (try_end), (create_text_overlay, reg0, s0, 0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (presentation_set_duration, 999999), ]), (ti_on_presentation_event_state_change, [(store_trigger_param_1, ":object"), (try_begin), (eq, ":object", "$g_presentation_obj_admin_menu_kick_player"), (assign, ":action", admin_action_kick_player), (assign, "$g_list_players_action_string_id", "str_kick"), #(else_try), #(eq, ":object", "$g_presentation_obj_admin_menu_ban_player_temp"), #(assign, ":action", admin_action_ban_player_temp), #(assign, "$g_list_players_action_string_id", "str_ban_temp"), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_ban_player_perm"), (assign, ":action", admin_action_ban_player_perm), (assign, "$g_list_players_action_string_id", "str_ban_perm"), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_mute_player"), (assign, ":action", admin_action_mute_player), (assign, "$g_list_players_action_string_id", "str_mute"), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_kill_player"), (assign, ":action", admin_action_kill_player), (assign, "$g_list_players_action_string_id", "str_kill"), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_fade_player_out"), (assign, ":action", admin_action_fade_player_out), (assign, "$g_list_players_action_string_id", "str_fade_out"), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_freeze_player"), (assign, ":action", admin_action_freeze_player), (assign, "$g_list_players_action_string_id", "str_freeze"), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_teleport_to_player"), (assign, ":action", admin_action_teleport_to_player), (assign, "$g_list_players_action_string_id", "str_teleport_to"), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_teleport_behind_player"), (assign, ":action", admin_action_teleport_behind_player), (assign, "$g_list_players_action_string_id", "str_teleport_behind"), (else_try), (assign, ":action", -1), (eq, ":object", "$g_presentation_obj_admin_menu_teleport_forwards"), (multiplayer_send_int_to_server, client_event_admin_action, admin_action_teleport_forwards), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_admin_armor"), (multiplayer_send_int_to_server, client_event_admin_action, admin_action_get_armor), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_become_invisible"), (multiplayer_send_int_to_server, client_event_admin_action, admin_action_get_invisible), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_refill_health"), (multiplayer_send_int_to_server, client_event_admin_action, admin_action_refill_health), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_become_godlike"), (multiplayer_send_int_to_server, client_event_admin_action, admin_action_become_godlike), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_admin_horse"), (multiplayer_send_int_to_server, client_event_admin_action, admin_action_get_horse), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_remove_horses"), (multiplayer_send_int_to_server, client_event_admin_action, admin_action_remove_horses), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_remove_stray_horses"), (multiplayer_send_int_to_server, client_event_admin_action, admin_action_remove_stray_horses), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_teleport_to_ships"), (multiplayer_send_int_to_server, client_event_admin_action, admin_action_teleport_to_ships), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_reset_ships"), (multiplayer_send_int_to_server, client_event_admin_action, admin_action_reset_ships), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_lock_faction"), (multiplayer_send_int_to_server, client_event_admin_action, admin_action_lock_faction), (else_try), (eq, ":object", "$g_presentation_obj_admin_menu_ban_player_temp"), (presentation_set_duration, 0), (start_presentation, "prsnt_temp_db_ban"), (try_end), (gt, ":action", -1), (presentation_set_duration, 0), (try_begin), (player_is_active, "$g_target_player_id"), (player_slot_eq, "$g_target_player_id", slot_player_list_button_id, -1), (multiplayer_send_2_int_to_server, client_event_admin_action, ":action", "$g_target_player_id"), (else_try), (assign, "$g_list_players_event", client_event_admin_action), (assign, "$g_list_players_event_value", ":action"), (assign, "$g_list_players_return_presentation", "prsnt_admin_menu"), (start_presentation, "prsnt_list_players"), (try_end), ]), (ti_on_presentation_run, [(store_trigger_param_1, ":cur_time"), (try_begin), (key_clicked, key_escape), (gt, ":cur_time", 200), (presentation_set_duration, 0), (try_end), ]), ]), ("temp_db_ban", prsntf_manual_end_only, 0, [(ti_on_presentation_load, [(set_fixed_point_multiplier, 1000), (multiplayer_get_my_player, ":my_player_id"), (player_is_admin, ":my_player_id"), (create_mesh_overlay, reg0, "mesh_mp_ingame_menu"), (position_set_x, pos1, 250), (position_set_y, pos1, 80), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 1000), (position_set_y, pos1, 1000), (overlay_set_size, reg0, pos1), (str_clear, s0), (create_text_overlay, reg0, s0, tf_scrollable_style_2), (position_set_x, pos1, 285), (position_set_y, pos1, 125), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 405), (position_set_y, pos1, 500), (overlay_set_area_size, reg0, pos1), (set_container_overlay, reg0), (init_position, pos2), (position_set_x, pos2, 900), (position_set_y, pos2, 900), (assign, ":cur_y", 10), (position_set_x, pos1, 130), (try_begin), #(player_slot_eq, ":my_player_id", slot_player_admin_temp_ban_1m, 0), (player_slot_eq, ":my_player_id", slot_player_admin_no_temporary_ban, 0), (create_button_overlay, reg0, "str_admin_temp_1m"), (assign, "$g_presentation_obj_admin_temp_1m", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), #(player_slot_eq, ":my_player_id", slot_player_admin_temp_ban_3w, 0), (create_button_overlay, reg0, "str_admin_temp_3w"), (assign, "$g_presentation_obj_admin_temp_3w", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), #(player_slot_eq, ":my_player_id", slot_player_admin_temp_ban_2w, 0), (create_button_overlay, reg0, "str_admin_temp_2w"), (assign, "$g_presentation_obj_admin_temp_2w", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), #(player_slot_eq, ":my_player_id", slot_player_admin_temp_ban_1w, 0), (create_button_overlay, reg0, "str_admin_temp_1w"), (assign, "$g_presentation_obj_admin_temp_1w", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), #(player_slot_eq, ":my_player_id", slot_player_admin_temp_ban_3d, 0), (create_button_overlay, reg0, "str_admin_temp_3d"), (assign, "$g_presentation_obj_admin_temp_3d", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), #(player_slot_eq, ":my_player_id", slot_player_admin_temp_ban_1d, 0), (create_button_overlay, reg0, "str_admin_temp_1d"), (assign, "$g_presentation_obj_admin_temp_1d", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), #(player_slot_eq, ":my_player_id", slot_player_admin_temp_ban_12h, 0), (create_button_overlay, reg0, "str_admin_temp_12h"), (assign, "$g_presentation_obj_admin_temp_12h", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), #(player_slot_eq, ":my_player_id", slot_player_admin_temp_ban_3h, 0), (create_button_overlay, reg0, "str_admin_temp_3h"), (assign, "$g_presentation_obj_admin_temp_3h", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), #(player_slot_eq, ":my_player_id", slot_player_admin_temp_ban_1h, 0), (create_button_overlay, reg0, "str_admin_temp_1h"), (assign, "$g_presentation_obj_admin_temp_1h", reg0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (val_add, ":cur_y", escape_menu_item_height), (try_end), (try_begin), (player_is_active, "$g_target_player_id"), (player_slot_eq, "$g_target_player_id", slot_player_list_button_id, -1), (str_store_player_username, s1, "$g_target_player_id"), (str_store_string, s0, "str_choose_an_option_targeting_s1"), (else_try), (str_store_string, s0, "str_choose_an_option"), (try_end), (create_text_overlay, reg0, s0, 0), (overlay_set_color, reg0, 0xFFFFFF), (overlay_set_size, reg0, pos2), (position_set_x, pos1, 0), (position_set_y, pos1, ":cur_y"), (overlay_set_position, reg0, pos1), (presentation_set_duration, 999999), ]), (ti_on_presentation_event_state_change, [(store_trigger_param_1, ":object"), (try_begin), (eq, ":object", "$g_presentation_obj_admin_temp_1h"), (assign, ":action", admin_action_temp_ban_1h), (assign, "$g_list_players_action_string_id", "str_ban_temp"), (else_try), (eq, ":object", "$g_presentation_obj_admin_temp_3h"), (assign, ":action", admin_action_temp_ban_3h), (assign, "$g_list_players_action_string_id", "str_ban_temp"), (else_try), (eq, ":object", "$g_presentation_obj_admin_temp_12h"), (assign, ":action", admin_action_temp_ban_12h), (assign, "$g_list_players_action_string_id", "str_ban_temp"), (else_try), (eq, ":object", "$g_presentation_obj_admin_temp_1d"), (assign, ":action", admin_action_temp_ban_1d), (assign, "$g_list_players_action_string_id", "str_ban_temp"), (else_try), (eq, ":object", "$g_presentation_obj_admin_temp_3d"), (assign, ":action", admin_action_temp_ban_3d), (assign, "$g_list_players_action_string_id", "str_ban_temp"), (else_try), (eq, ":object", "$g_presentation_obj_admin_temp_1w"), (assign, ":action", admin_action_temp_ban_1w), (assign, "$g_list_players_action_string_id", "str_ban_temp"), (else_try), (eq, ":object", "$g_presentation_obj_admin_temp_2w"), (assign, ":action", admin_action_temp_ban_2w), (assign, "$g_list_players_action_string_id", "str_ban_temp"), (else_try), (eq, ":object", "$g_presentation_obj_admin_temp_3w"), (assign, ":action", admin_action_temp_ban_3w), (assign, "$g_list_players_action_string_id", "str_ban_temp"), (else_try), (eq, ":object", "$g_presentation_obj_admin_temp_1m"), (assign, ":action", admin_action_temp_ban_1m), (assign, "$g_list_players_action_string_id", "str_ban_temp"), (try_end), (gt, ":action", -1), (presentation_set_duration, 0), (try_begin), (player_is_active, "$g_target_player_id"), (player_slot_eq, "$g_target_player_id", slot_player_list_button_id, -1), (multiplayer_send_2_int_to_server, client_event_admin_action, ":action", "$g_target_player_id"), (else_try), (assign, "$g_list_players_event", client_event_admin_action), (assign, "$g_list_players_event_value", ":action"), (assign, "$g_list_players_return_presentation", "prsnt_admin_menu"), (start_presentation, "prsnt_list_players"), (try_end), ]), (ti_on_presentation_run, [(store_trigger_param_1, ":cur_time"), (try_begin), (key_clicked, key_escape), (gt, ":cur_time", 200), (presentation_set_duration, 0), (try_end), ]), ]), ("faction_admin_menu", prsntf_manual_end_only, 0, [(ti_on_presentation_load, [(set_fixed_point_multiplier, 1000), (create_mesh_overlay, reg0, "mesh_mp_ingame_menu"), (position_set_x, pos1, 250), (position_set_y, pos1, 80), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 1000), (position_set_y, pos1, 1000), (overlay_set_size, reg0, pos1), (str_clear, s0), (create_text_overlay, reg0, s0, tf_scrollable_style_2), (position_set_x, pos1, 285), (position_set_y, pos1, 125), (overlay_set_position, reg0, pos1), (position_set_x, pos1, 405), (position_set_y, pos1, 500), (overlay_set_area_size, reg0, pos1), (set_container_overlay, reg0), (init_position, pos2), (position_set_x, pos2, 900), (position_set_y, pos2, 900), (assign, ":cur_y", 10), (position_set_x, pos1, 200), (create_button_overlay, "$g_presentation_obj_faction_admin_relation_peaceful", "str_offer_faction_peace", tf_center_justify), (overlay_set_color, "$g_presentation_obj_faction_admin_relation_peaceful", 0xFFFFFF), (overlay_set_size, "$g_presentation_obj_faction_admin_relation_peaceful", pos2), (position_set_y, pos1, ":cur_y"), (overlay_set_position, "$g_presentation_obj_faction_admin_relation_peaceful", pos1), (val_add, ":cur_y", escape_menu_item_height), (create_button_overlay, "$g_presentation_obj_faction_admin_relation_hostile", "str_declare_faction_hostile", tf_center_justify), (overlay_set_color, "$
codeparrot/github-code-clean
# -*- coding: utf-8 -*- # Resource object code # # Created by: The Resource Compiler for PyQt4 (Qt v4.8.7) # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore qt_resource_data = "\ \x00\x00\x00\xd0\ \x51\ \x4c\x61\x62\x65\x6c\x7b\x0d\x0a\x20\x20\x20\x20\x62\x61\x63\x6b\ \x67\x72\x6f\x75\x6e\x64\x2d\x63\x6f\x6c\x6f\x72\x3a\x20\x71\x6c\ \x69\x6e\x65\x61\x72\x67\x72\x61\x64\x69\x65\x6e\x74\x28\x73\x70\ \x72\x65\x61\x64\x3a\x70\x61\x64\x2c\x20\x78\x31\x3a\x30\x2c\x20\ \x79\x31\x3a\x30\x2c\x20\x78\x32\x3a\x30\x2c\x20\x79\x32\x3a\x31\ \x2c\x20\x73\x74\x6f\x70\x3a\x30\x20\x72\x67\x62\x61\x28\x31\x30\ \x2c\x20\x32\x31\x30\x2c\x20\x30\x2c\x20\x32\x35\x35\x29\x2c\x20\ \x73\x74\x6f\x70\x3a\x31\x20\x72\x67\x62\x61\x28\x39\x2c\x20\x31\ \x36\x39\x2c\x20\x30\x2c\x20\x32\x35\x35\x29\x29\x3b\x0d\x0a\x20\ \x20\x20\x20\x62\x6f\x72\x64\x65\x72\x3a\x31\x70\x78\x20\x73\x6f\ \x6c\x69\x64\x20\x72\x67\x62\x28\x30\x2c\x20\x38\x36\x2c\x20\x37\ \x29\x3b\x0d\x0a\x20\x20\x20\x20\x63\x6f\x6c\x6f\x72\x3a\x20\x72\ \x67\x62\x28\x30\x2c\x36\x30\x2c\x20\x30\x29\x3b\x0d\x0a\x7d\ \x00\x00\x00\xd1\ \x51\ \x4c\x61\x62\x65\x6c\x7b\x0d\x0a\x20\x20\x20\x20\x62\x61\x63\x6b\ \x67\x72\x6f\x75\x6e\x64\x2d\x63\x6f\x6c\x6f\x72\x3a\x71\x6c\x69\ \x6e\x65\x61\x72\x67\x72\x61\x64\x69\x65\x6e\x74\x28\x73\x70\x72\ \x65\x61\x64\x3a\x70\x61\x64\x2c\x20\x78\x31\x3a\x30\x2c\x20\x79\ \x31\x3a\x30\x2c\x20\x78\x32\x3a\x30\x2c\x20\x79\x32\x3a\x31\x2c\ \x20\x73\x74\x6f\x70\x3a\x30\x20\x72\x67\x62\x61\x28\x32\x32\x33\ \x2c\x20\x30\x2c\x20\x30\x2c\x20\x32\x35\x35\x29\x2c\x20\x73\x74\ \x6f\x70\x3a\x31\x20\x72\x67\x62\x61\x28\x31\x38\x39\x2c\x20\x30\ \x2c\x20\x30\x2c\x20\x32\x35\x35\x29\x29\x20\x3b\x0d\x0a\x20\x20\ \x20\x20\x62\x6f\x72\x64\x65\x72\x3a\x31\x70\x78\x20\x73\x6f\x6c\ \x69\x64\x20\x72\x67\x62\x28\x31\x35\x36\x2c\x20\x30\x2c\x20\x32\ \x29\x3b\x0d\x0a\x20\x20\x20\x20\x63\x6f\x6c\x6f\x72\x3a\x20\x72\ \x67\x62\x28\x36\x30\x2c\x20\x30\x2c\x20\x30\x29\x3b\x0d\x0a\x7d\ \ \x00\x00\x16\x05\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x80\x00\x00\x00\x80\x08\x06\x00\x00\x00\xc3\x3e\x61\xcb\ \x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\ \x00\x00\x00\x06\x62\x4b\x47\x44\x00\x00\x00\x00\x00\x00\xf9\x43\ \xbb\x7f\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\ \x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x15\x63\x49\x44\x41\x54\x78\ \xda\xed\x5d\xcb\x6f\x1c\xc5\x9f\xff\xf4\x6b\x9e\xf6\xe0\x8c\x43\ \x62\x3b\x0b\x01\x91\x87\x23\x83\xa3\x68\xa5\xc4\xe1\xb1\x44\x3c\ \xb4\x61\x79\x1c\x90\x90\x80\xdd\x13\x12\xab\x1c\x7e\x17\xa4\x45\ \x68\x0f\x1c\xf6\x06\x7f\xc1\x5e\x22\x2d\x67\x4b\x28\x57\x87\xc4\ \x31\x24\x28\x90\x38\x09\xce\xc3\x44\x06\x05\x92\x80\x33\xb1\x93\ \x78\xfc\xec\x99\xe9\x57\xed\x61\xa6\xda\x35\x9d\xea\xee\x6a\x27\ \xc6\x13\x53\x1f\xa9\x35\x33\x3d\xdd\xd5\xd5\xf5\xf9\x7c\x5f\xd5\ \x3d\x3d\x80\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\ \x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\x84\ \x84\x84\x84\x84\x84\x84\x84\xc4\x3a\x81\x92\x74\x07\x42\x48\xe2\ \x7d\x56\xf5\x04\x14\x85\xac\x75\x1f\x1e\x65\x08\x93\xd9\x20\x5e\ \x3d\x7f\xfe\xbc\xfa\xdb\x6f\xbf\xc5\xee\x57\x2a\x95\x56\x2c\x94\ \x99\x99\x99\x87\x46\x6a\xa9\x54\x5a\x13\x81\x4c\x4c\x4c\x90\xef\ \xbe\xfb\x6e\xb5\x8e\x2d\xd2\x2e\x11\xd9\x4e\x88\x24\x42\x88\xfa\ \xed\xb7\xdf\xa6\xba\xba\xba\x0c\xdb\xb6\xf5\x3f\xff\xfc\x53\xd7\ \x34\x4d\xa9\x56\xab\x0a\x00\x2c\x2d\x2d\x71\xf7\xab\xd5\x6a\x7e\ \xfb\x8e\xe3\xf8\x9d\xb1\x6d\x3b\xb2\x63\xc1\xf6\x2c\xcb\x8a\x3d\ \x11\xde\x36\xb5\x5a\x2d\x76\x3f\xba\x4d\xb5\x5a\x8d\xdd\xd6\x34\ \x4d\x92\x64\xfd\xe4\xe4\xa4\x77\xfa\xf4\x69\x57\x64\x8c\x13\xc2\ \x0b\x59\xcf\x92\xee\x31\xdb\x85\x6d\x1f\x2f\x80\x91\x91\x11\xbd\ \xab\xab\x2b\xeb\xba\x6e\xde\x30\x8c\xdc\xe2\xe2\x62\x9b\x6d\xdb\ \x46\xad\x56\xd3\x6a\xb5\x9a\xea\xba\xae\x62\xdb\xf6\x72\x0f\x1a\ \x70\x5d\x57\x01\x00\xcf\xf3\xee\x1b\x1c\x42\x08\x61\xbf\xa3\xaf\ \x74\xbd\x7f\x96\x9e\x47\xd8\xfd\xe9\x71\xd8\x75\xae\xeb\x36\x6d\ \xc3\x3b\x1e\x6f\x3b\xd7\x75\x09\xfb\x1a\xb5\xae\x56\xab\x21\xb8\ \x0d\xd0\x2c\x6a\xf6\x3d\xfb\x79\x62\x62\xa2\x76\xe2\xc4\x09\x4b\ \x9c\x5b\x21\x90\x90\x75\x04\x75\xb2\xdd\xc6\xe2\x60\x59\x08\x5c\ \x8f\x10\x29\x80\x91\x91\x11\x7d\xcb\x96\x2d\x39\x4d\xd3\x3a\x2c\ \xcb\xda\xe4\xba\x6e\x37\x21\xe4\x05\x45\x51\x7a\x3d\xcf\xd3\x3c\ \xcf\x53\x08\x21\x4d\x79\x01\x43\x22\x21\x84\x28\x8d\x8f\x2c\xb7\ \x41\x9e\xe9\x67\xba\x21\x77\x3d\xf3\x9d\xbf\x5d\x2a\x95\x82\x6d\ \xdb\x64\x76\x76\x16\x77\xef\xde\x25\x33\x33\x33\x98\x9b\x9b\x43\ \xad\x56\x23\x8e\xe3\xa0\x52\xa9\x90\x06\x19\x70\x5d\x17\x96\x65\ \x01\x00\x71\x1c\x07\x84\x10\xb8\xae\x4b\x1c\xc7\xf1\x8f\xe5\x38\ \x0e\x3c\xcf\x03\x21\x84\x34\x5e\xe1\xba\x2e\x51\x55\xd5\x3f\x3e\ \xdb\x79\xea\xc9\x34\x4d\x23\x8a\xa2\xc0\x71\x1c\x28\x8a\x02\xcf\ \xf3\x88\xa2\xd4\x87\xc4\xf3\x3c\xcc\xcf\xcf\x5b\x33\x33\x33\x76\ \xb5\x5a\x85\xeb\x3e\x14\x87\x40\x00\x40\x51\x14\x10\x42\xa0\x28\ \x0a\xba\xbb\xbb\xc9\x81\x03\x07\xc6\xbe\xfe\xfa\xeb\x93\xb7\x6e\ \xdd\x32\x01\x58\x8d\xc5\x6e\x2c\x2e\x4f\x04\xa1\x02\x18\x1c\x1c\ \xd4\x76\xec\xd8\x91\xd1\x34\xad\xa8\x69\xda\x3f\x01\x78\x53\xd3\ \xb4\x7f\xe8\xba\xfe\x18\x1d\x90\x00\x8b\xdc\xf7\x49\x3f\xd3\xf7\ \x86\x61\xa0\x52\xa9\xa0\x5a\xad\xa2\x52\xa9\xc0\xb2\x2c\x58\x96\ \x05\x55\x55\xb1\xb0\xb0\x80\x7b\xf7\xee\xa1\x5a\xad\xc2\x71\x1c\ \x98\xa6\x09\x55\x55\xe1\x79\x1e\x6a\xb5\x1a\x28\xc1\x95\x4a\x05\ \x86\x61\xc0\x71\x1c\xe8\xba\x0e\xd3\x34\xa1\x69\x1a\x25\x12\x94\ \x34\xcf\xab\x7b\x48\xd7\x75\xe1\xba\x2e\x54\x55\x05\x21\x04\xe9\ \x74\x1a\xd5\x6a\x15\x9e\xe7\x41\xd7\x75\xff\xf8\x9e\xe7\xc1\xf3\ \x3c\x28\x8a\x02\xdb\xb6\x91\x4a\xa5\x28\xf1\x70\x5d\x17\x9e\xe7\ \x41\xd3\x34\xd8\xb6\x0d\x4d\xd3\xa0\x28\x0a\x16\x16\x16\x30\x35\ \x35\x85\x86\xe0\x1e\x0a\x32\x99\x0c\xaa\xd5\x2a\x4c\xd3\x84\xeb\ \xba\x78\xf6\xd9\x67\xf1\xe1\x87\x1f\x4e\x1f\x3b\x76\xec\xf3\xc3\ \x87\x0f\x7f\x0b\xc0\x6c\x2c\xb5\x86\x08\x9c\xa0\x08\xf4\xb0\xc6\ \xdb\xda\xda\xf4\x72\xb9\x9c\x2d\x14\x0a\x1b\x74\x5d\xff\xe7\x7c\ \x3e\xff\xdf\x86\x61\xa8\xf4\x84\x92\x12\x4f\xdf\xd3\x7d\x2d\xcb\ \xc2\xd2\xd2\x92\x3f\x68\x96\x65\xa1\x52\xa9\xc0\x34\x4d\xcc\xcd\ \xcd\xc1\x75\x5d\xdf\xe5\x57\x2a\x15\xff\xb3\xae\xeb\xd0\x75\xdd\ \x27\x3b\x95\x4a\x21\x9d\x4e\xfb\xed\xa6\xd3\x69\x00\x75\xcb\xcb\ \x66\xb3\x48\xa5\x52\xa0\x82\xb5\x6d\xdb\x17\x83\xaa\xaa\xb0\x6d\ \x1b\xe9\x74\xda\xb7\xdc\x5a\xad\x06\x45\x51\x7c\xf1\x65\x32\x19\ \xea\x05\x90\x4e\xa7\x31\x37\x37\x87\x6c\x36\xeb\x9f\x47\xc3\xba\ \x91\xcb\xe5\xa0\x69\x9a\xdf\xb6\x61\x18\xd0\x34\x0d\xa6\x69\xfa\ \x62\x48\xa5\x52\xb8\x7c\xf9\x32\x4c\xd3\x7c\x68\x02\x68\x6b\x6b\ \x83\xa2\x28\x70\x5d\x17\x67\xce\x9c\xc1\xd0\xd0\x10\xde\x7e\xfb\ \xed\x4d\x6f\xbd\xf5\xd6\xff\x5e\xbc\x78\xf1\x3f\x46\x47\x47\xaf\ \x00\xa0\xd6\x4a\x89\xa7\x9e\x20\x5c\x00\x83\x83\x83\xda\xe2\xe2\ \xa2\xda\xd1\xd1\x91\x26\x84\x3c\xa6\xeb\xfa\xbf\xeb\xba\xae\xd2\ \x81\xe3\x81\x5a\x03\x25\xc6\xb6\x6d\xb8\xae\x0b\xc7\x71\x50\xab\ \xd5\x60\x59\x16\x16\x17\x17\x61\x59\x96\x4f\x7a\xb5\x5a\xf5\x89\ \x73\x5d\xd7\x77\x69\xd4\xfd\x2a\x8a\x02\x55\x55\x7d\x0b\xb3\x6d\ \x1b\xaa\xaa\xfa\x4b\x2a\x95\x02\x00\x68\x9a\x06\x55\x55\xa1\x69\ \x1a\x2c\xcb\xf2\xdb\xd4\x34\x0d\x9a\xa6\x41\xd7\x75\x38\x8e\x83\ \x74\x3a\xed\xf7\x31\x9b\xcd\xc2\xb2\x2c\x68\x9a\x86\x74\x3a\xed\ \x13\xe5\x38\x8e\x4f\xa8\xae\xeb\x4d\x1e\x89\x10\x02\x4d\xd3\x7c\ \x41\xa7\xd3\x69\x10\x42\xd0\xd6\xd6\x06\x4d\xd3\x7c\x21\xeb\x7a\ \x7d\x58\x0b\x85\x02\x80\x7a\x08\xca\xe7\xf3\xd8\xbe\x7d\x3b\x4a\ \xa5\xd2\xc3\x0a\x03\xd0\x75\x1d\xed\xed\xed\xe8\xea\xea\x42\x5f\ \x5f\x1f\x2e\x5f\xbe\x8c\xa5\xa5\x25\x3c\xfd\xf4\xd3\xea\xfb\xef\ \xbf\xff\x9f\xa3\xa3\xa3\xff\xd3\xd8\x94\xcd\x0b\x3c\xd4\x3d\x3f\ \x09\x15\xc0\xf8\xf8\xb8\xf2\xcc\x33\xcf\xa8\x0b\x0b\x0b\xa9\x42\ \xa1\xd0\x76\xfd\xfa\xf5\xdd\x7b\xf7\xee\x85\xa2\x28\xfe\xa2\xeb\ \x3a\x16\x16\x16\x30\x3b\x3b\xeb\xbb\x56\xcb\xb2\x7c\x02\x3d\xcf\ \xf3\x5d\x31\x1d\x5c\x55\x55\x7d\xab\xa2\x31\xdc\x30\x0c\xdf\x9d\ \x52\x50\xeb\x54\x55\xd5\xb7\x34\x42\x08\xaa\xd5\xaa\x4f\x04\x15\ \x08\x25\xc3\xf3\x3c\xdf\xdd\xd3\xd7\x6a\xb5\x8a\x5c\x2e\x07\xcb\ \xb2\x90\x4a\xa5\xfc\x30\xa2\xeb\xba\x1f\x0a\x68\xdf\xa8\x28\x6d\ \xdb\x46\x36\x9b\x85\x6d\xdb\x7e\xfb\x86\x61\xc0\x30\x0c\xb8\xae\ \xeb\x7b\x1d\xd3\x34\x91\xcf\xe7\x51\x28\x14\x90\xc9\x64\x7c\x01\ \xd6\x6a\x35\xe4\xf3\x79\x54\x2a\x15\x5f\x08\x34\xbf\x50\x14\x05\ \xb9\x5c\x0e\xe5\x72\xf9\x61\x39\x01\xb4\xb5\xb5\x61\x76\x76\x16\ \x3d\x3d\x3d\xf8\xfc\xf3\xcf\x41\x08\x41\x3e\x9f\xc7\xf4\xf4\xf4\ \x5e\x00\x8f\xa3\xee\xf6\x6b\x58\xce\x07\xdc\x58\x01\x00\x40\xb9\ \x5c\xd6\x32\x99\x4c\x4a\xd3\xb4\xcc\xd1\xa3\x47\xb3\xfb\xf6\xed\ \xf3\x2d\x52\xd3\x34\xcc\xcc\xcc\xc0\x71\x1c\xd8\xb6\x8d\x6a\xb5\ \xea\x5b\x25\xb5\x3c\x2a\x12\x36\x5e\xd2\x41\xa7\x83\x42\x07\x97\ \xc6\x62\x55\x55\xfd\x81\xa2\x96\x46\xf7\x25\x84\x34\x1d\xc3\xf3\ \x3c\x7f\x7b\xea\x81\xa8\xc7\xa1\x16\xe6\x79\x1e\xaa\xd5\x2a\x80\ \x7a\x18\xa1\xa2\xa4\x31\x9f\x0d\x47\xf4\x38\xa6\x69\xfa\x7d\xa1\ \x21\x88\xed\x17\x15\x1f\xcd\x3d\x16\x17\x17\xfd\x30\xe3\xba\x2e\ \x08\x21\xb8\x77\xef\x9e\x1f\xff\x69\x3f\x6d\xdb\xc6\xd2\xd2\x92\ \x1f\xea\xc2\x4a\xe7\x24\xe8\xef\xef\x47\x6f\x6f\x2f\x16\x17\x17\ \x7d\xc3\xec\xec\xec\xc4\xd5\xab\x57\x61\x18\x46\x0e\xc0\x06\x00\ \x4b\x00\x16\x1b\xaf\x3a\x96\x2b\x03\x44\x0a\x00\x00\x6c\xdb\x56\ \x2c\xcb\xd2\x58\xeb\xcc\xe5\x72\x7e\xac\x3c\x77\xee\x9c\x1f\xc7\ \xa9\x95\xd2\xf7\x94\x00\xf6\x3b\xd6\xbd\x33\x59\x76\xd3\xb6\xec\ \x76\xc1\x36\xe9\x77\xec\xba\xb8\xf5\xc1\xbe\xd0\x63\xc6\xb5\x15\ \x76\xec\xe0\xf9\x89\xee\x4f\x3f\xd3\xfd\xfe\xf8\xe3\x0f\x5a\x95\ \x3c\x10\xb6\x6e\xdd\x8a\x2f\xbf\xfc\x12\x4f\x3e\xf9\x24\xb6\x6c\ \xd9\x02\x00\xd8\xb9\x73\x27\xce\x9e\x3d\x0b\x00\xed\x00\xf2\x00\ \x32\x00\x0c\x2c\xe7\x03\x3e\xd4\xb0\x86\x67\x66\x66\xd4\x6a\xb5\ \xaa\x56\x2a\x15\x9d\x76\x9c\xc6\x5c\x45\x51\xf0\xe3\x8f\x3f\xfa\ \xe4\x07\x4f\x9a\x1d\x9c\xe0\x40\x07\x5f\x59\x72\x00\x34\x7d\x1f\ \x6c\x93\xfd\x3e\xac\xfd\xe0\xfa\x20\xf9\xec\x71\x82\xc7\xe2\x1d\ \x3f\xb8\x5e\x84\x7c\x5e\xdb\xc1\xbe\x14\x8b\xc5\x07\x26\x1f\x00\ \x6e\xdc\xb8\x81\xcf\x3e\xfb\x0c\xb6\x6d\xfb\xa1\x65\x76\x76\x96\ \x7a\xb7\x1c\x43\xbe\xde\xe0\xbb\x89\x73\xae\x00\xe8\x34\xae\xeb\ \xba\x8a\xe7\x79\x0a\x1d\x00\x1a\xbf\x6f\xdf\xbe\x8d\xc5\xc5\xc5\ \xfb\x08\x8d\x1a\x00\xde\x40\x47\x89\x26\x89\x75\xc7\xf5\x81\x27\ \xbc\x30\x61\xc6\x09\x37\xd8\xf7\x38\x01\x05\xcf\x8f\x7e\x4e\xa7\ \xd3\x7e\x45\xf1\x30\x44\x70\xe1\xc2\x05\xe4\xf3\x79\x18\x86\x81\ \xf6\xf6\x76\xfa\x95\xc1\x2c\x2a\x96\xcb\x7e\xdf\xa5\x87\x7a\x80\ \x5a\xad\xa6\xd4\x6a\x35\xd5\x71\x1c\x3a\x99\xe3\x67\xe3\x77\xef\ \xde\x0d\xb5\x6a\xde\x7b\xde\x67\x51\xab\x89\x5a\x1f\x26\x9a\x28\ \x61\x86\xf5\x93\x25\x38\x68\xf1\x3c\xb1\x05\xdb\x61\x09\x8e\x12\ \x2f\xfb\x99\x21\xea\x81\x71\xe9\xd2\x25\x64\xb3\x59\x04\xca\x74\ \xad\x41\xb6\x82\x65\xeb\x8f\xf7\x00\x54\x00\xec\x60\x00\x68\x2a\ \xaf\xe2\x06\x35\xcc\x3a\x56\x4a\x4c\xd8\x20\x46\x89\x26\x78\x4c\ \x51\xeb\x0e\xdb\x56\x34\xcc\x88\x88\x99\x4e\x2e\xd1\xb0\xba\x4a\ \xa0\xc4\x87\x4e\xf8\xa9\x71\x2d\x90\x3a\xfc\x72\x8c\x3d\x59\x91\ \xc4\x29\x2a\x21\x14\xf5\x22\x51\x83\x99\x24\x64\xf0\xe2\x79\x9c\ \xf7\x12\x0d\x1d\x51\x22\x8f\x3a\xaf\x5c\x2e\xb7\x9a\x02\x60\x85\ \x90\x4c\x00\xec\xd5\x31\x1a\xb3\xe8\x49\x06\xb3\xe8\xb0\x13\x17\ \x19\x94\x30\xd2\x58\xa2\x56\xe2\x69\x44\x42\x86\x48\x9f\x82\x71\ \x3f\xce\x8b\x89\x78\x3d\xb6\x4d\x5a\x06\xff\x05\xe4\x73\xb9\x8e\ \xf5\x00\x34\x09\xa4\x13\x30\x00\xfc\xf9\xf0\x28\xf7\xc7\x2e\x74\ \x7e\x9c\x17\x33\x79\x83\xc7\x23\x2a\x2e\x2f\xe0\x95\x7b\xa2\xa5\ \x23\x4f\x20\xbc\x72\x36\x2e\x94\xc4\x79\x3d\xde\x98\x01\x58\xed\ \x30\x10\x09\xe1\x10\xc0\x0e\x4e\x63\xfd\x8a\x2d\x08\x00\x76\xef\ \xde\x8d\xbd\x7b\xf7\xfa\x93\x45\x22\xae\x9d\xb6\x13\x37\xa8\x71\ \xf9\x01\x4b\x0c\xaf\x4d\x9e\x05\x07\x8f\x15\xdc\x2f\x68\xf9\x22\ \x02\xa6\x0b\x9d\x31\x5c\x0b\x44\x1e\xb9\x71\x0d\x5d\xa1\x39\x00\ \x3b\x5f\x2f\x1a\xbb\xc3\x06\xb7\x58\x2c\xe2\xb5\xd7\x5e\xc3\xc1\ \x83\x07\xf1\xc3\x0f\x3f\xe0\xfb\xef\xbf\x47\xb9\x5c\x8e\x4d\xda\ \xc2\xc8\x11\xf1\x10\x71\x6d\x88\x64\xef\xa2\x39\x89\xe8\x18\xd1\ \x64\xb0\x25\x05\xe0\x79\x1e\xa1\x1e\x80\x0d\x01\xa2\x03\x12\x37\ \x78\x40\x7d\x66\xf1\xd5\x57\x5f\xc5\x81\x03\x07\x70\xe9\xd2\x25\ \x8c\x8c\x8c\xe0\xda\xb5\x6b\x42\x84\x8a\x26\x5a\x2b\x21\x58\xf4\ \xd8\xa2\x49\x6d\xd4\x77\x74\x1a\x97\xf5\x2e\x2d\x21\x00\x0a\x42\ \x88\x7f\xa5\x0c\x80\x7f\xbd\x3c\x78\x52\x7d\x7d\x7d\xd8\xbf\x7f\ \x3f\x3a\x3b\x3b\x71\xe7\xce\x1d\x9c\x39\x73\x06\x3f\xfd\xf4\x13\ \x6c\xdb\x06\x21\x04\x9b\x36\x6d\x42\x77\x77\x37\xce\x9f\x3f\x8f\ \x2b\x57\xae\x60\xf3\xe6\xcd\xe8\xeb\xeb\xf3\xaf\xda\xed\xd9\xb3\ \x07\x7b\xf6\xec\xc1\x8d\x1b\x37\x30\x3c\x3c\x8c\x73\xe7\xce\x35\ \x5d\x50\x8a\x2b\xfb\x82\xdf\xd1\xbe\x8b\x4e\x54\xf1\x42\x8d\xa8\ \xd0\x83\xe1\x2e\x2e\x17\x09\xae\xa7\xd7\x12\x5a\x4e\x00\x34\x01\ \x08\xc6\xb7\xe0\x49\xf7\xf7\xf7\xe3\xcd\x37\xdf\xf4\xf7\xdb\xbc\ \x79\x33\xde\x79\xe7\x1d\xbc\xf4\xd2\x4b\x18\x1f\x1f\x47\x2e\x97\ \x43\x7f\x7f\x3f\x46\x46\x46\x40\x08\xc1\xe4\xe4\x24\xbe\xfa\xea\ \x2b\xb4\xb5\xb5\x61\x60\x60\x00\x03\x03\x03\xfe\xf4\xe8\xd6\xad\ \x5b\xf1\xd1\x47\x1f\xe1\xdd\x77\xdf\xc5\xa7\x9f\x7e\x9a\x68\xb0\ \x93\x26\x84\x22\xae\x5b\x84\x40\xde\xd8\x44\x09\x31\xf8\x7e\x95\ \x2b\x81\x95\x0b\x80\x9e\x14\x75\x55\x00\x7c\xab\x64\x4f\x24\x6c\ \x56\x6b\xc3\x86\x0d\x78\xf1\xc5\x17\xef\x6b\x8f\x2e\xf3\xf3\xf3\ \x38\x7a\xf4\x28\x86\x86\x86\x30\x30\x30\x80\xf7\xde\x7b\x0f\x86\ \x61\x00\x00\x3a\x3a\x3a\x22\x13\xb4\x30\x12\xd9\x3e\x47\x79\x87\ \xa0\x80\x82\x44\x89\x96\x8c\x61\x42\x4c\x92\x2f\xb4\xb4\x00\xd8\ \xc1\x02\x70\xdf\x8d\x1b\x74\xfd\xe5\xcb\x97\x31\x32\x32\x82\xf9\ \xf9\x79\xf4\xf6\xf6\xe2\xc0\x81\x03\xd8\xb8\x71\xa3\xbf\xff\xad\ \x5b\xb7\x30\x3a\x3a\x7a\x5f\x36\xbd\x73\xe7\x4e\x3c\xff\xfc\xf3\ \x78\xee\xb9\xe7\xfc\xcb\xc0\x00\xfc\xf2\x51\x34\xbf\x88\x8b\xfd\ \xf4\x1c\xd8\x75\x61\xa5\xa7\x48\xb8\x09\x13\x5b\x5c\x98\xe0\x95\ \xc1\x6b\x05\xa1\x10\xc0\x73\x53\x41\x02\xc6\xc6\xc6\x50\x2e\x97\ \xfd\xf5\x63\x63\x63\x18\x1b\x1b\xc3\xf6\xed\xdb\xd1\xd5\xd5\x85\ \xbb\x77\xef\xe2\xe2\xc5\x8b\xfe\x24\x52\x3e\x9f\xc7\xbe\x7d\xfb\ \xb0\x7f\xff\xfe\x26\x91\x00\xf5\xdb\xc2\x4f\x9d\x3a\x85\xe3\xc7\ \x8f\x27\x22\x38\xca\x7d\xb3\x03\x1e\xe5\xfe\x83\x62\xa1\xaf\xbc\ \xf2\x32\xd8\xa6\x48\x22\xc9\xee\xc7\xb6\xdd\xd2\x02\xa0\x1d\x66\ \xef\x74\x0d\xc6\x37\x4a\x7e\xf0\xa4\xaf\x5e\xbd\x8a\xf1\xf1\xf1\ \xfb\x06\xfe\x85\x17\x5e\xc0\x1b\x6f\xbc\xd1\x74\xac\xdb\xb7\x6f\ \x63\x78\x78\x18\xa7\x4f\x9f\xf6\x6f\xc6\x14\x9d\xae\x15\xf1\x10\ \xa2\xeb\xc3\x88\xe5\xb9\xfd\x28\xc1\xf1\x8e\x17\x25\xc4\x96\x13\ \x00\xbd\x07\x9e\x56\x01\xb4\xf3\x74\x1d\xfd\x9c\x34\xf6\xb1\xfb\ \x13\x42\xf0\xf3\xcf\x3f\x63\x78\x78\x18\x57\xae\x5c\x89\xb4\xe8\ \x30\xc2\x78\xf1\xfb\x41\x43\x86\x48\x15\x10\x37\x93\x29\x3a\x36\ \xc1\x90\xd8\x32\x02\xa0\x08\x92\x16\x35\xd8\x71\x59\x37\x1d\x9c\ \x5a\xad\x86\x93\x27\x4f\xe2\xc4\x89\x13\x28\x95\x4a\x91\x44\xc4\ \xad\x63\x07\x3c\x89\x3b\xa6\xfb\x45\x25\x8a\xa2\x21\x83\x1d\x1b\ \x5e\x7e\x11\xe7\x81\x5a\x56\x00\x8e\xe3\x90\x30\x01\xc4\xb9\xe3\ \xb0\xc4\xc7\xf3\x3c\x1c\x3f\x7e\x5c\x78\xb0\x83\x82\x13\xf5\x10\ \x51\x42\x8c\x3a\x6e\x58\x5f\xa2\x26\xa2\xc2\x04\x11\xd7\x17\xde\ \xd8\xfe\x95\x88\xbc\x16\xc0\x86\x00\x4a\x20\x2f\x0f\x10\x21\x84\ \x15\x0d\xcf\xd2\xe2\xdc\x6e\x5c\x0e\x10\x7c\xe5\x1d\x2f\x68\xa9\ \xa2\x16\x2c\x32\x41\x94\x34\xec\xb4\x4a\x08\x88\xbd\x18\x44\xc1\ \xde\x0f\x4f\x33\xf9\xb0\x41\x8d\x8a\x75\xa2\x16\x25\x32\x7b\x27\ \xba\x84\x11\x11\x25\xb6\x60\xa2\xb7\x92\x9c\x80\x27\xc4\x56\x22\ \x1f\x10\x08\x01\x5e\xfd\x7a\x80\x5f\x05\xf0\xdc\x7a\x94\x25\x8a\ \x78\x85\xa8\x7d\xc3\xde\xb3\x03\x2b\x42\x70\xdc\xf6\x61\x5e\xe9\ \x41\x4a\x4f\x11\xc1\xb1\xde\x75\x2d\x20\xe4\x01\x08\x59\xbe\x64\ \x19\xbc\x70\x11\x65\x91\x49\x66\xd4\x44\xdc\x7b\x30\x79\x4b\x52\ \x7d\x84\xb5\xc5\x13\x5f\x5c\xdb\x51\xc2\x0a\xf3\x26\xad\xe6\xfa\ \x29\x84\x27\x82\xe8\x85\x0a\xfa\xe3\x0c\x91\x44\x8b\x1d\x1c\x51\ \xb7\x1b\x36\xa8\xbc\x12\x4b\x34\x94\x84\x11\x26\xea\x9a\x45\xf3\ \x90\x28\x41\x47\x1d\xb7\xa5\x05\x10\xec\x38\x80\xfb\xee\xf0\xe1\ \x59\x10\x25\xea\x41\xdc\x74\x9c\xeb\x8d\x9b\x67\x48\x12\xfb\x93\ \x86\x01\x9e\xb8\x83\xfb\xf1\xfa\xc2\x1b\xcf\xb5\x44\xa2\x9b\x42\ \x29\xe8\x65\xe1\x28\x45\xc7\x91\x9c\x24\x99\x8b\xab\x10\x78\x64\ \xf0\x06\x9c\x17\x4a\x78\x1e\x28\x8a\xfc\xb8\x7c\x85\xee\xc7\x13\ \x21\xdb\xe6\x5a\xc6\xfd\x44\x02\xa0\x27\x02\xa0\xe9\x6e\xa0\x38\ \x8b\x0c\xab\xe7\xc3\xe2\xae\x48\xa2\x15\x16\x66\xc2\x12\x30\x1e\ \xf9\x51\xa1\x24\x4a\xd4\x22\x62\xe0\xf5\xa5\x95\x26\x7d\x78\x08\ \x0d\x01\xa6\x69\x12\xcb\xb2\x7c\x99\xb2\x89\x1f\xad\x06\x92\x24\ \x61\x71\xc9\x19\x4f\x4c\x71\x59\x36\x8f\x60\x91\x98\xcc\x12\x15\ \x65\xcd\x71\x7d\x49\x22\x96\xa0\x68\x5b\x05\xa1\x1e\x80\x7d\xf0\ \x11\x9b\x04\xb2\x62\x10\x71\x8d\x2b\xb1\xee\xa8\x58\x9b\x24\x27\ \xe0\xf5\x25\xa9\x68\xa2\x04\x17\x97\x70\xf2\xfa\xd2\x6a\x10\xb9\ \x2d\xfc\xbe\xe7\xd1\x88\xc4\xe1\x28\x8b\x8f\xcb\x09\xc2\xdc\x74\ \x54\x86\x1d\x46\x72\x9c\x27\x8a\xf3\x34\xa2\x21\x43\xd4\x13\xb6\ \x1a\x84\x93\x40\xb6\xfc\x0b\x0e\x06\x4b\x40\x98\xd5\xf0\xea\x61\ \x1e\xc1\x22\x95\x43\x90\x60\x5e\x9b\x71\x09\x24\x2f\x3f\xe1\x09\ \x4b\x24\x21\x8c\x0a\x53\xad\x4c\x3e\x90\xa0\x0c\x64\x4f\x88\x57\ \x06\xc6\xc5\x3e\xde\x60\x88\x96\x63\x61\x49\x5f\x14\xf9\x71\x19\ \x7f\x54\x9b\x61\xde\x85\xd7\x97\xb8\x38\xcf\x1e\xbf\x15\xc1\xf5\ \x00\x13\x13\x13\xc1\xe7\xb8\xf9\x4f\x07\x09\xde\x11\x1c\x46\x7c\ \x50\x38\x71\xb1\x31\x2e\xcb\x0e\xcb\xf8\x45\x26\x9d\xe8\x7e\x51\ \x61\x26\xaa\x8f\x6c\xdb\x49\xfa\xd2\x8a\x49\x9f\x90\x00\xe8\x23\ \x4e\x5d\xd7\x25\xf4\xc2\x0f\xfb\x3c\x1e\xf6\xd7\xc1\x49\xb2\xe3\ \xb0\x04\x4a\x34\x19\x8c\x73\xb1\x71\x82\x10\x09\x05\x51\x93\x47\ \x51\xe5\x1e\xeb\xe2\x5b\x39\xe9\x13\x12\x00\x00\x9c\x3c\x79\xd2\ \x09\x3e\x02\x95\x8a\x21\xf8\xe3\xd0\x28\x92\xc3\x32\xe6\x30\x17\ \x9e\x24\x9b\x8e\x2b\xf7\x44\xbe\x8b\x3b\x66\x5c\xdf\xe3\xf2\x8d\ \x56\x47\xa8\x00\xa6\xa6\xa6\xc8\xe0\xe0\xe0\xc2\xd2\xd2\x92\xed\ \x79\x9e\xff\x70\x28\xba\x88\x24\x3d\x3c\x4b\x08\x23\x88\x12\xca\ \x73\xa3\xbc\xc4\x8c\x57\x9a\x25\x25\x3f\xce\x1b\x45\x65\xfc\xbc\ \xfe\x04\xfb\xf2\x28\x20\xb2\x0a\xb8\x71\xe3\x86\x7b\xf8\xf0\xe1\ \x3f\x4d\xd3\x6c\x7a\x02\x27\xef\x77\x01\xa2\x96\x21\x9a\x10\xc6\ \xb9\xe2\xb0\x84\x30\xce\xcb\xc4\x59\x3d\x4f\x78\x71\x02\xe7\x55\ \x34\x8f\x0a\x62\xab\x80\xeb\xd7\xaf\xdb\xa5\x52\xa9\xe9\x31\x6f\ \x22\x16\xcf\x73\xd7\x71\x93\x44\x22\x96\x28\x32\x29\xc3\x23\x38\ \x8c\xa8\xa4\x0b\x4f\x70\x41\x21\x3e\x4a\x10\xba\x16\x40\x9f\x96\ \xcd\x5e\x12\x16\x8d\x81\x22\x09\x62\x98\x57\x88\xdb\x26\xcc\x82\ \xa3\x3c\x0d\xaf\x9f\xa2\x62\x0b\x4b\xfc\x82\x79\xcc\xa3\x04\xe1\ \x5f\x06\xd1\xe4\x8f\x3e\xb2\x35\xcc\x7d\x8b\x0e\x6a\x30\x66\x8a\ \xcc\x21\x84\xb9\xfb\xa8\x9a\x3d\x69\x4e\x20\xe2\xe1\xa2\xca\xc5\ \x47\x0d\x89\xee\x09\xa4\x4f\xe9\xbc\x74\xe9\x12\xee\xdc\xb9\x23\ \xec\xfa\xc3\xc4\x91\x34\x94\xc4\x79\x93\xb8\x10\x23\x92\x63\x88\ \x6c\x1b\x14\xee\xdf\x42\x00\xf4\x71\xaa\x84\x10\x98\xa6\x89\x23\ \x47\x8e\x60\x74\x74\xd4\x7f\x3e\x30\x6f\x30\x44\x67\xe5\xa2\x26\ \x80\x78\xfb\x85\xb5\x1d\x24\x26\xac\x14\x15\xdd\x27\xcc\xd3\x3c\ \x8a\xe5\xde\x03\x0b\x80\x3e\x2b\x97\xc2\xf3\xea\xbf\xff\x3b\x72\ \xe4\x08\x6e\xde\xbc\x79\x1f\x51\xa2\x19\x3f\x2b\x86\x28\x2f\xc1\ \x23\x5d\xa4\x72\x60\x09\x0e\xeb\x4b\x92\xa4\xf0\x51\x2d\xf7\xc2\ \x20\x9c\x03\x64\xb3\xd9\xa6\x3f\x54\xa0\x58\x58\x58\xc0\xf0\xf0\ \x30\x36\x6f\xde\x8c\xdd\xbb\x77\x63\xe3\xc6\x8d\xc2\x19\x7f\x90\ \xe0\x30\x8b\x0c\x92\x1f\x25\x20\x5e\x08\x12\xa9\x2e\xc2\x3c\x54\ \x58\x5f\xd6\xe2\x61\x0e\xab\x01\x61\x01\xd0\xda\x5f\x51\x14\xee\ \x43\x8e\xa7\xa6\xa6\xf0\xcd\x37\xdf\xa0\xbb\xbb\x1b\xbb\x76\xed\ \x42\x67\x67\xa7\x50\x22\x17\x15\x53\xa3\xbe\xe3\x79\x9a\x28\x8f\ \xc0\xb6\x13\xb5\x6d\x5c\x9b\x74\x16\x74\xbd\x20\x51\x15\x40\x1f\ \x0f\x17\xf5\x50\xa3\x52\xa9\x84\x52\xa9\x84\x62\xb1\x88\x6d\xdb\ \xb6\xa1\xbb\xbb\xdb\xdf\x3f\x2e\x01\x4b\x52\x09\x00\x62\xf7\xec\ \xd1\x63\x47\x79\x8e\xa8\x59\x45\x4a\x3a\x35\x80\xf5\x86\x44\x02\ \xa0\x22\x08\xfb\xd7\x10\x16\x33\x33\x33\x38\x7b\xf6\x2c\x52\xa9\ \x14\x9e\x78\xe2\x09\xf4\xf4\xf4\xa0\x50\x28\x08\xcf\xb7\x8b\x86\ \x10\x91\x32\x32\x8e\x78\xde\xf6\xf4\x7f\x07\x1e\xe6\x7f\xfc\xb4\ \x22\x84\x05\x40\xff\x85\x43\x54\x00\x14\x96\x65\xe1\xda\xb5\x6b\ \xb8\x76\xed\x1a\xf2\xf9\x3c\xba\xba\xba\xb0\x71\xe3\x46\x14\x0a\ \x85\xd0\x2c\x5e\xa4\x34\x04\xe2\x2f\xe3\xc6\x85\x8c\xe0\x36\xb6\ \x6d\xfb\x7f\x75\xf3\x28\x4e\xea\xac\x04\xc2\x02\xa0\xd3\xc0\xf4\ \x62\xd0\x4a\xb0\xb4\xb4\xe4\x8b\xc1\x30\x0c\x6c\xd8\xb0\x01\x1d\ \x1d\x1d\x28\x14\x0a\xc8\xe5\x72\x42\x33\x6f\x71\xc9\x9b\x48\x15\ \x41\xd7\xd3\x7f\x3c\xa1\x7f\x26\xb5\x1e\x5d\x7c\x1c\x84\x05\x40\ \x2d\x03\xc0\x43\xc9\x80\x6d\xdb\xc6\xf4\xf4\x34\xa6\xa7\xa7\xeb\ \x1d\xd1\x75\xe4\xf3\x79\xe4\xf3\x79\x64\xb3\x59\xa4\xd3\x69\xa4\ \x52\xa9\xa6\x7b\x0f\x80\xf0\x2b\x7f\x00\xbf\xbc\xa4\x31\x9c\x92\ \x4c\x5f\xff\xae\x84\x07\x91\xa8\x0a\x48\xa5\x52\x4d\x16\xf5\x30\ \xe1\x38\x0e\xe6\xe6\xe6\x30\x37\x37\xd7\xb4\x5e\x55\x55\x18\x86\ \xe1\xff\x5d\x1c\xfd\x23\xa9\xc0\x9f\x39\x36\x59\x3c\x2d\x57\xe9\ \x22\x89\x0e\x47\xe2\x67\x94\xfe\xd5\x4f\xb4\xf4\xbc\xfa\xd3\x44\ \xd8\xbf\x6f\x95\x78\x78\x10\xce\xe6\xd8\x5f\x07\x4b\xac\x1f\x08\ \x7b\x80\xb5\x7a\x96\xad\xc4\xea\x42\xd8\x03\xd0\xd8\x4b\x93\x2a\ \x89\xf5\x81\x44\x17\x83\x80\x65\x21\x48\xac\x0f\x08\x0b\x80\xfe\ \x20\x34\xe9\x44\x90\x44\x6b\x23\xd1\x0d\x21\x34\x04\xac\x74\x22\ \x48\xa2\xf5\x20\x2c\x00\xfa\x77\xb1\xeb\xe1\x1a\xb8\xc4\x32\x84\ \x05\x90\xc9\x64\xfc\xc7\xb8\x4b\xac\x1f\x24\xaa\x02\x28\x64\x12\ \xb8\x7e\x20\x3c\x0f\x60\x59\x96\x1f\x02\xe8\xdf\xaa\x4b\x3c\xfa\ \x48\xe4\x01\xe8\xfd\x00\x12\xeb\x07\x89\xe6\x01\x34\x4d\x83\x61\ \x18\x52\x04\xeb\x08\x89\xae\x05\xd0\x6b\xe8\x12\xeb\x07\x89\x26\ \x82\xe8\xdf\xc7\xcb\x24\x70\xfd\x40\x38\x09\x64\xff\x35\xf4\xef\ \x72\xbb\xd4\xdf\x01\x22\x1e\xc0\xff\xcf\x20\x00\x4d\x37\x63\x48\ \x3c\xfa\x10\xf6\x00\xec\xed\x59\x8f\x3d\xf6\xd8\x5a\xf7\x5b\x22\ \x80\x95\x72\x22\x22\x00\x02\x00\x63\x63\x63\x28\x14\x0a\x28\x16\ \x8b\x78\xf9\xe5\x97\xd7\xfa\x7c\x25\x02\x78\xe5\x95\x57\xa0\x28\ \x0a\xca\xe5\x32\x6c\xdb\x66\xf3\x34\x3a\x6f\xcf\x8d\xdb\xc2\xbe\ \xbc\xa7\xa7\x07\x4f\x3d\xf5\x14\xb2\xd9\x2c\xb2\xd9\x2c\x3e\xfe\ \xf8\xe3\xb5\x3e\x67\x89\x06\x0e\x1d\x3a\x84\x1d\x3b\x76\x40\x51\ \x14\x14\x8b\x45\x5e\x92\x1e\x7a\xf1\x26\xec\xb2\x9e\xda\xf8\x2e\ \x05\x20\x07\xe0\x1f\x13\x13\x13\xd8\xb5\x6b\x17\x32\x99\x0c\x7e\ \xff\xfd\x77\x0c\x0c\x0c\xa0\xbb\xbb\x1b\xb3\xb3\xb3\xfe\x9d\xbd\ \x12\x7f\x2d\xfa\xfb\xfb\x71\xe8\xd0\x21\x7c\xf0\xc1\x07\x28\x16\ \x8b\x70\x1c\x07\xa5\x52\x09\x17\x2e\x5c\x80\xa6\x69\x18\x1c\x1c\ \x3c\x05\x60\x06\xc0\x2c\x80\x05\x00\x15\x00\x36\x00\x17\x0d\x51\ \x44\x85\x00\x82\xba\xdb\xf0\x7a\x7b\x7b\x17\x86\x86\x86\xda\x77\ \xec\xd8\x81\x4f\x3e\xf9\x04\xa6\x69\xe2\xc2\x85\x0b\xe8\xec\xec\ \xc4\x17\x5f\x7c\x11\xfa\xbf\xc1\x12\xab\x0b\x3a\x37\x53\x2e\x97\ \x31\x3f\x3f\x8f\x62\xb1\x88\x9b\x37\x6f\x62\x76\x76\x16\x86\x61\ \x58\x58\x26\xda\xe7\x12\x81\x50\x10\x26\x00\xc2\xbc\x7a\x86\x61\ \x7c\x0f\xe0\x8d\xc3\x87\x0f\xe3\x97\x5f\x7e\xc1\xc1\x83\x07\x91\ \xc9\x64\x70\xf6\xec\x59\xdc\xbe\x7d\x1b\x8e\xe3\xc8\xd2\x70\x0d\ \xd0\xd1\xd1\x81\x8e\x8e\x0e\x74\x75\x75\xc1\xf3\x3c\x8c\x8f\x8f\ \xc3\xb6\x6d\xe4\x72\x39\x4c\x4d\x4d\x5d\x47\xdd\xda\xe9\xe2\xa1\ \x99\x57\x00\xd1\x1e\x80\xaa\xc5\x29\x16\x8b\xff\xb7\x65\xcb\x96\ \x7f\x9d\x9c\x9c\x54\x87\x86\x86\x70\xea\xd4\x29\xec\xdf\xbf\x1f\ \xdb\xb6\x6d\xc3\xe4\xe4\x24\xee\xdd\xbb\x27\x05\xb0\x06\x68\x6f\ \x6f\x87\x61\x18\xd8\xba\x75\x2b\x0c\xc3\xc0\xfc\xfc\x3c\x52\xa9\ \x14\xfa\xfa\xfa\xc8\x91\x23\x47\x7e\x00\x60\x02\xa8\xa2\x2e\x00\ \x07\x1c\x0f\x10\x36\xa5\xa7\xa2\x2e\x8e\x0c\x80\x76\x00\x8f\xbf\ \xfe\xfa\xeb\xff\xf6\xeb\xaf\xbf\xfe\xd7\xf5\xeb\xd7\x37\x00\xf5\ \xa4\xb0\x58\x2c\xc2\x34\x4d\xee\xcf\xc5\x25\x56\x1f\xf4\x56\xfd\ \xae\xae\x2e\xf4\xf6\xf6\xa2\xa3\xa3\x03\x3d\x3d\x3d\xe6\xb1\x63\ \xc7\x86\x8f\x1d\x3b\xf6\x13\x80\x49\x00\x7f\x02\xb8\x05\xe0\x0e\ \xea\x79\x40\x15\xcb\x62\x88\x14\x80\x8a\x7a\x12\xd8\x06\xa0\x03\ \xc0\xa6\x4c\x26\xf3\x84\x65\x59\xff\xe2\x79\xde\xae\x54\x2a\x95\ \x56\x55\xd5\x20\x84\x68\x84\x10\x25\xa2\x2d\x89\x55\x86\x61\x18\ \xa4\xa7\xa7\x87\x74\x77\x77\xdf\x3a\x79\xf2\xe4\x35\x00\x65\x00\ \x77\x01\xdc\x06\x50\x02\x30\x8d\x7a\x22\xb8\x08\xc0\x02\xe3\x09\ \xc2\x48\x53\x70\xbf\x17\xd8\x00\xa0\x13\xc0\xe3\x8d\xf7\xed\x00\ \xb2\xa8\x8b\x44\x63\xf6\x93\xf8\x6b\x41\xe3\xb9\x8b\x3a\xb9\x15\ \xd4\x2d\xbd\x8c\xba\xd5\xdf\x6b\xbc\x0f\x5a\x7f\x64\x15\xc0\x66\ \x8e\x36\xea\xb1\x84\xce\x19\xd8\x00\x96\x00\xe4\x51\x17\x87\x8e\ \x65\x8f\x11\x84\x14\xc4\xea\x20\x58\xd7\xfb\xf9\x1a\xea\x24\x2f\ \x01\x98\x07\x30\xd7\x58\x4c\x34\x27\x82\x42\x49\x20\x41\x5d\x55\ \x0a\x80\x1a\x73\xa0\x1a\xea\xae\x24\x83\xba\xf5\x53\x01\x04\xc9\ \x96\xe4\xaf\x1e\x08\xe7\x33\x15\x80\x85\xba\x08\x2a\xa8\x13\x6f\ \xa2\xce\x99\x03\xa6\xfe\xa7\x88\x23\x89\xc6\x76\xbd\xb1\x18\xa8\ \x93\x9e\x6a\xbc\x8f\xb3\x7e\x29\x82\xd5\x81\x17\xb2\x8e\x8a\xc0\ \x46\x5d\x08\x16\x96\x2b\x00\x07\x01\xeb\x07\xc4\x08\xa2\x44\x52\ \xa2\x75\xd4\x63\xbe\x86\x65\xcb\x97\x64\xaf\x2d\x08\x9a\xc3\xb6\ \xdb\x58\xd8\xd2\xef\x3e\xf2\x81\x64\xa4\xa9\xcc\x3e\x6c\xd2\x17\ \x95\x48\x4a\x51\xac\x0e\xc2\x26\x5d\x58\x92\xd9\x9a\x3f\x74\x92\ \x66\x25\x04\x89\x12\x2b\xc9\x5f\x3d\x88\xfc\x32\x87\x08\x6e\x27\ \x21\x21\x21\x21\x21\x21\x21\x21\x21\x21\x21\x21\x21\x21\x21\x21\ \x21\x21\x21\x21\x21\x21\x21\x21\x21\x21\xb1\x2e\xf1\xff\x54\x46\ \x90\x48\x69\xaa\x7e\x2e\x00\x00\x00\x26\x7a\x54\x58\x74\x43\x6f\ \x6d\x6d\x65\x6e\x74\x00\x00\x78\xda\x73\x2e\x4a\x4d\x2c\x49\x4d\ \x51\x28\xcf\x2c\xc9\x50\x08\xc9\x48\x55\x70\xf7\xf4\x0d\x00\x00\ \x52\xe7\x07\x23\x90\xcb\x65\x70\x00\x00\x00\x00\x49\x45\x4e\x44\ \xae\x42\x60\x82\ \x00\x00\x06\x47\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0\x77\x3d\xf8\ \x00\x00\x06\x0e\x49\x44\x41\x54\x78\xda\x9d\x55\x69\x6c\x54\x55\ \x18\x3d\xf7\xbe\xfb\xd6\x99\x32\xed\xcc\x74\x19\x96\x16\xda\x40\ \x95\xa6\x16\x5a\x0a\x45\x10\xc2\x0e\x82\x42\x30\x42\x41\x23\x31\ \x8a\x09\x01\x63\x34\x8a\x89\x1b\x25\x48\x8c\xc4\x40\x0c\xa0\x44\ \x90\x04\xff\xb0\x19\x97\x44\x04\xa5\x84\xb0\x34\x2c\x69\xa1\x80\ \xd2\x82\x61\x6b\x4b\x4b\xa1\xa5\x2d\x2d\x2d\xd3\x37\xf3\xae\xdf\ \x9b\x51\xac\x50\xfe\xf0\xd2\x9b\xd7\x37\xef\xdd\x73\xbe\xe5\x7c\ \xe7\xb2\xd0\xd2\xc3\xfe\x86\x8d\xe3\x6e\xe3\x11\xd7\x93\xeb\x2e\ \x2e\x96\x9c\xcf\x13\x82\x4f\x96\x8a\x82\x61\x83\xbc\xb8\xd4\xda\ \x8d\x08\x18\x84\x2a\x2a\x0c\x9d\x1d\x10\x42\x29\x29\x9d\x96\xd8\ \xd5\xdb\x7e\x96\xb6\xe4\xa0\x64\x8a\x32\xa5\x61\xc3\xb8\xd2\x9e\ \x2f\xb2\x56\x9d\x5d\xef\x80\x2f\x9b\x51\x94\x8c\x7e\xc9\x16\x82\ \x3e\x15\x89\x9a\x02\x4e\xc0\x49\x1a\x47\x7f\x83\xa1\xaa\xd5\xc6\ \xc9\xa6\x6e\x1c\xbc\x6e\xc3\x14\xd8\x71\x70\x7a\xd2\x82\x87\x08\ \x02\x6f\x1f\x93\x2a\x63\xe0\x76\xf7\x8e\xfa\xf5\xe3\x16\xa4\xbf\ \x7f\xa2\x2f\x18\xff\x6b\xf0\x13\x41\x6b\x68\x4e\x10\x2d\xb6\x44\ \x94\x01\x3a\xe7\xd0\x15\xba\xab\x0a\x54\x01\xa8\x9c\x61\xa0\xa9\ \xa0\xd0\x27\xa0\x33\x89\x6f\x2e\x77\xa2\xe2\x06\x11\x29\x2c\x9d\ \xb2\xa9\xbd\x4f\xe0\x7f\xab\x4c\xa2\x30\x0b\x66\x5d\x0b\x64\xed\ \xcd\x46\x2b\xc1\x4a\xcd\x18\xde\x17\x48\xb2\x08\x54\xc2\x10\x1c\ \x86\xca\x29\x42\x22\x10\x8c\x08\x18\x34\x22\xd3\xe8\x7f\x4e\x24\ \x42\x21\x22\x8b\x63\x4c\x1f\x8e\xf2\x96\x08\xd6\x9e\xe9\x80\x57\ \xfc\x47\xc2\x7c\x4b\x8f\x48\x99\x9f\x89\x04\x9d\x43\x74\x76\x23\ \x25\x41\x83\xf0\xaa\x04\xce\x08\x98\xc0\x08\x58\x50\xc4\x1e\x8a\ \xdc\xe3\x12\xd1\x52\x5d\x22\x85\x83\xfe\xa0\x12\x81\x42\xdf\x06\ \xe8\xdb\x22\x22\xa9\x6a\x8b\xe0\xcb\x73\x1d\x28\x7b\x36\x89\xc5\ \x08\x12\x96\x1c\x96\xb2\x20\x0b\x5e\x45\xc2\x43\x35\x36\xa9\xbe\ \x16\x01\x18\x82\xea\x4d\x25\xc9\x4d\xd1\x31\x36\x4d\x45\xfd\xad\ \x30\x1a\x3a\xba\x51\xd5\x10\x46\x52\xd0\x84\xe6\x51\x30\x28\xd9\ \x43\x3d\x91\x10\x44\xe0\xa2\xf9\x88\x7c\x72\x22\xc3\xba\x0b\x77\ \x71\xee\xa6\x1d\xeb\x09\xb3\xde\x38\x24\xe5\xc8\x4c\x78\x28\x1a\ \x8b\x40\x3d\x1a\x83\x97\x88\x18\x3d\x2f\xcd\xf3\xe1\xfc\x95\x56\ \xec\x2e\x6b\x04\x35\x7c\x9b\xaa\x8b\x53\x9a\xae\x1c\xe2\x82\xcf\ \xe6\x0a\x7b\xd7\xf4\x68\x09\x13\x46\x04\x10\xb4\x34\x30\x29\x69\ \x0f\x43\x3f\xda\x9f\x43\x02\x78\xad\xac\xd5\x2d\x95\xc5\xcc\xc5\ \x07\x24\x46\x0d\x86\x49\x31\xb8\xe9\x7b\xdd\x0c\x68\xa9\x54\xe7\ \xa9\x03\x75\x6c\xdd\x79\x11\x8a\xc2\x57\xd6\xae\xce\x2f\x79\x50\ \x21\xf9\x5b\xae\x2e\xa5\x97\x1b\x66\x8e\x49\x85\x5f\xe7\xb1\x52\ \x45\x09\x67\xa2\x8f\x61\x77\x4d\x17\x8e\xd6\x85\xd7\x30\xed\xd5\ \x52\x89\xa2\x21\x30\x49\x49\x96\xea\x2e\x85\x1a\xe9\xaa\x86\xb2\ \x20\xc2\xe7\x06\x9b\xa8\xac\xbc\x89\xca\xea\xb6\xfd\xd7\x56\x0d\ \x9b\xfa\x20\xc9\x88\x6f\x6b\x56\x58\x7d\xd4\x92\x17\x46\xa7\xd2\ \x93\x03\x46\xa9\xa7\xbb\x38\x4e\x14\x9f\x9e\x6e\xaf\x60\xca\xa2\ \xfd\xd2\x19\x99\x0d\x95\x5e\xba\x12\xf4\x99\x1c\x13\x32\x2d\x14\ \x04\x74\x3c\x45\xcb\xa4\xa8\xaa\x9a\xbb\xb0\x61\x4f\x0d\xee\x74\ \xd8\x14\x30\x1b\x70\xe1\x83\xdc\xba\x9e\x24\x79\x5b\xae\xb5\xce\ \x9d\x10\xf2\x25\xd3\x30\xdc\x8b\x3a\xb8\x7e\x27\x4a\xb2\xb7\x51\ \x4e\xb2\x65\x78\x69\x8f\xcc\x29\x1e\x89\xa2\x54\x15\x63\x53\x0d\ \xe4\xf8\x74\x94\xd7\xb5\xe3\xcc\xa5\x76\x9c\xbe\xdc\x8a\xaa\xea\ \xe6\xcb\x96\xa5\xfe\x62\x5a\xda\xde\xab\xab\x0b\xf6\xf5\x36\xad\ \x05\x5b\xae\xee\xf4\x25\xe9\xf3\x22\x34\x30\x9d\x11\xe7\x78\xf9\ \xc2\xbe\xa3\xef\xcf\x41\xe6\xf2\xa3\x72\xdc\xd3\x19\xb1\x87\xb3\ \xd5\x4d\xb1\xfb\xf9\xb3\x37\xb6\x19\x86\xd8\xdb\xba\x75\xca\x4e\ \x3c\xe6\x55\xf4\x53\xd3\xeb\xa6\x2e\xd6\x32\xcc\xdc\x71\x87\x8c\ \x65\x2e\x7e\x2b\x2e\x7d\x5c\xb0\xdc\x4d\x57\xe6\x4b\xc6\x67\x18\ \x5e\x31\x27\x3f\xdd\xf2\x15\xa4\xe9\x28\x0c\x18\xf8\xaa\xba\x23\ \x26\x5f\xf8\x5e\xd9\x9b\x4a\x4a\x1e\x4e\x7a\x5e\xa1\xf6\xb1\x8a\ \x54\x53\x47\x77\x47\x78\xd7\x8d\x8d\xcf\xcc\xef\x0d\x70\xc8\x67\ \xe7\xa6\xdb\x52\x99\x41\x15\x99\x95\x37\x24\x31\xb3\x30\xc3\x83\ \x11\x69\x26\x72\xfd\x2a\x68\x98\x41\xf6\x84\x36\x09\x6c\xa9\x6c\ \x05\xd3\x5f\xfc\x51\xf2\xb4\x14\xf0\x90\x0f\x56\x80\x96\x4f\x83\ \x7a\xcf\x46\x7b\xd9\x85\xb6\xc6\xcd\x93\x12\xff\x67\x80\x25\xa7\ \xfb\x4b\x07\xb5\xfe\xa0\x85\x77\x66\x0d\x40\x6e\xc0\x84\xe3\x48\ \xd4\x87\x81\x86\xb0\x83\x9a\x6e\x57\x45\xee\x94\xc7\x15\xb9\xef\ \x1c\x11\x88\x39\xdf\x97\x47\x0b\x87\x16\x70\xbf\x97\xa4\x09\x9a\ \x60\x9a\x07\x53\x45\xe4\xf8\x25\xa0\x3b\x5c\xd2\xb8\x69\xe2\xca\ \x07\x33\x48\xff\xb8\xe2\xf7\x11\x39\xc1\x29\x93\x47\x85\x70\xa2\ \xc9\x46\x1f\xb2\x12\xa1\xc4\x6d\x44\x23\x70\xba\xc1\xa3\x0b\xfc\ \x7a\xaa\x89\x4a\xf4\xfc\xce\xcf\x59\xdf\xd0\x72\x99\x37\x10\xcc\ \x8e\xc6\x5c\x92\x9a\x03\x3d\x12\x85\x53\x71\xd1\xd5\xf5\xb2\x5b\ \x9b\x27\x6d\x7c\x88\xe4\xc3\x8a\x12\x87\xb1\x15\xef\xbd\x9c\x8d\ \x3f\xef\x48\x92\xf8\x3f\xe0\x22\xee\xba\x16\xb9\xc1\xfe\x18\xc1\ \xec\xdd\x26\x6c\xbb\x93\x4d\x23\x65\xd1\x07\x92\x46\x5e\xd0\xcf\ \xba\x41\x11\xd9\x0e\x64\xe5\x65\xb0\x48\xb4\x9d\x0b\xe5\x0b\xc6\ \xc4\xcf\x8e\xc6\xc6\x43\xf2\x7c\xdd\xa3\x2e\x5a\x38\x2d\x03\x23\ \xb3\x03\xf8\xa1\xa6\x93\x6c\x81\xc7\x8c\xcf\x75\x5c\xd7\x05\x3c\ \x86\x82\x03\x15\x37\xe3\x4d\xa6\x2c\xb6\x23\x10\x28\x66\xa3\xb2\ \x81\x48\x84\x48\x5c\x2e\x16\x8b\xca\x34\x54\x88\xe6\x0e\xa4\x70\ \x07\x6a\xe7\x5d\x0c\xcf\x0e\x62\x40\xc8\x42\x7a\x5a\x02\x4e\x36\ \x87\x51\x43\x43\xe5\xda\x8b\x11\xb3\xf3\x78\x16\x6e\xa9\x75\x4a\ \xe3\xc8\x7d\x02\xf7\x9a\xb9\x5d\xb2\xe1\x43\x81\x7e\xd4\x57\x9a\ \x46\x32\xc9\xd8\x72\xa3\x32\x34\x41\xcb\xb5\x6f\x25\x7e\xe8\xb8\ \xbf\x29\x71\x5b\x31\xa9\x99\xae\x03\x6b\x54\x12\xbb\x93\x26\x9d\ \xf6\x84\x92\x0d\xa8\x14\x5c\xd9\xc9\xc6\x1e\x04\xb3\x77\x0d\x80\ \x1d\xa9\x61\xf9\x39\x40\x7f\x3f\x65\x12\x85\x9b\x8a\xcb\xe3\x96\ \x4c\xd3\x5c\xc0\xf8\xc1\x13\x3f\x80\x58\xfc\x30\x72\xa3\xa7\x2c\ \xef\x36\xb6\xa3\xee\x8f\x1b\x20\x51\x35\x66\x0d\x0b\xa5\xe6\xe5\ \x06\x71\xec\x48\x7d\x0f\x82\x7f\x49\x22\xd1\x1a\xa4\xa4\x80\xe5\ \x65\x22\x16\xae\x43\x7d\xa0\x5d\x2c\xe6\xb6\x94\x11\x59\xba\x45\ \xd1\x9b\x54\x63\x93\xde\x1b\xa4\xfb\x96\xea\x06\x84\x9b\x3a\x3a\ \xb9\xa1\x0c\xae\x5d\x53\x54\x3f\xf0\xa3\x8a\xed\x5e\xbf\xa7\x98\ \x53\x86\x0c\xbd\x5d\x6e\x4f\xec\x68\x31\x32\xfa\x83\x85\x28\x9b\ \x10\x95\x8d\x48\x5c\x01\x68\x24\x47\x9d\x3e\x31\xda\xbb\xc0\x5b\ \xda\xe1\xd4\x36\x83\x5b\xda\x86\xc6\xaf\xc7\xbf\xd9\x13\x22\xf3\ \x93\x53\x93\x29\xa8\xfd\xbd\x13\xc4\xb2\x21\x75\x49\xa7\x84\x80\ \x27\x51\x16\x05\x50\x04\x78\x30\x09\xea\xed\x36\x70\x3a\xf5\x15\ \x4d\x94\x2a\x42\xd9\xd5\xf6\xdd\x8c\xcd\x8f\x82\xc8\x5a\x71\xda\ \xff\x37\x2e\xd5\xee\x30\x68\x21\x72\xcf\x00\x00\x00\x00\x49\x45\ \x4e\x44\xae\x42\x60\x82\ \x00\x00\x03\x02\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0\x77\x3d\xf8\ \x00\x00\x02\xc9\x49\x44\x41\x54\x78\xda\xad\x94\x4d\x68\x13\x41\ \x14\xc7\xff\xb3\x49\x8a\xe8\xc1\x63\x41\xea\x07\x22\x6a\x15\x15\ \x44\xa1\xa0\x08\x11\x41\x28\x88\xb5\xc4\x52\xb1\x85\x2a\x1e\x6a\ \x15\xa9\xa0\xe2\xc1\x9b\xe7\x82\x20\x8a\x20\xf4\xe4\x41\x45\x8b\ \xb4\xe0\x49\xad\x9a\x46\x4d\x3f\x63\x69\x8a\x96\x8a\x5a\xa5\xc4\ \x36\xdb\xcd\x47\xa3\x6d\x36\xb3\xe3\xdb\xd9\x6c\x92\x05\xdb\x08\ \xdd\xc0\xf0\x26\x6f\xdf\xfc\x7f\xef\x63\x76\x19\xca\xfc\x0e\x76\ \x6e\x11\x95\xeb\x57\xc1\x5b\xe1\x73\xf8\x73\x59\x1d\xbf\x7e\x2c\ \xa0\xef\xec\x24\x5b\xee\xfc\xb2\x0f\x6d\x40\x7b\x7d\x2d\x36\xaf\ \xad\x72\xf8\xbf\xa7\x63\xe8\x78\xd2\xbd\x72\xc0\xde\x7b\x1b\xc4\ \xf5\xc6\x3a\x44\xe7\x1e\x38\xfc\xc9\x8c\x82\xb7\xa1\xd5\x18\x6e\ \x9d\x72\x01\xd0\x70\x02\x13\xa9\xc7\x0e\xbf\x96\x02\x7a\xdf\xf9\ \xdc\x01\x5c\x0b\xd4\x63\x72\xfe\x91\xfc\x2f\x68\x19\x06\x10\x4f\ \x02\xa1\xb0\x4b\x80\x2b\x04\xf8\x42\x00\x53\x98\xd3\x32\x84\x82\ \x78\x02\xe8\x1f\xf2\xb8\x04\xa8\x3f\x8e\x4f\xa9\x2e\x12\xb6\x2a\ \x30\x21\x71\x0d\x18\x8e\xb8\x04\xb8\x74\xac\x0e\x9f\xe7\x9f\x92\ \x38\x93\x10\xb3\x92\x19\x95\x63\x2c\xfa\x1f\x2d\x32\xaf\x61\x39\ \xc8\xb9\xa3\xb5\x18\x4f\x77\xe5\xdb\x43\x55\x90\x9d\x4b\x70\x9a\ \xc1\x62\xb9\xa3\x60\xfe\xce\x9d\xe2\xc6\x19\x3f\xa6\x13\xdf\x20\ \x98\x42\x22\xcc\x1a\xa4\x14\x52\x20\xb8\x82\x71\x35\x44\xb9\x7b\ \xa5\xb0\x01\x45\x5a\x9d\x02\x16\xb9\xc0\xee\xca\x1a\xf2\x19\x85\ \xca\xe4\x59\x0e\x78\x7d\x1e\x3c\x7c\x1e\x06\x3b\x74\xbf\x5a\x9c\ \x3f\xb5\x15\x91\xe9\x10\x1d\xf5\x58\x41\x82\x41\xa7\x20\x32\x72\ \xcf\x68\xa8\x06\x3d\x2b\x54\x00\x4b\xcc\x10\x02\x9c\x96\x41\x20\ \x23\xef\x13\x26\x88\xf6\x0a\x25\x34\xf8\x21\x0d\x76\xe0\x6e\xb5\ \x68\x6b\xda\x41\x80\x3e\xca\xd2\xe7\x08\xe2\x32\x23\x25\x2f\x56\ \x14\xb6\x67\x21\x81\xb0\x5a\x06\x51\xdc\x9b\xf6\x4f\x56\xc1\xd8\ \x40\x12\xac\xe6\xf6\x76\xd1\xd6\xbc\x0b\x23\x3f\x83\x50\x98\xb7\ \x28\x6c\x28\x56\x56\xa5\xc2\x54\x0d\xb7\xb3\xb7\xdb\x91\x9f\x60\ \xce\xb0\x6e\x8c\x95\x9c\x02\xfa\x54\xe1\xe3\x7b\x02\xec\xbf\x65\ \x01\x86\x09\xc0\x94\x8a\x42\x06\xa5\xc2\x5c\xb0\x62\x5b\x4a\x84\ \xed\x58\x94\x08\x1b\xd2\xc1\x91\xe3\x74\x85\x83\x04\xd8\xd7\xb1\ \x4d\x1c\xf1\x6f\x44\x74\x3a\xba\xe4\x4d\xd8\x53\xe9\xc7\x94\x11\ \x74\x08\xc7\x62\x39\xf9\x6c\x4d\xc5\xbf\xcf\x64\xb2\xc0\xc4\x68\ \x06\xac\xfa\xe6\x26\x91\x4e\x65\x97\x14\xd7\x7e\xcf\xe1\x72\x4b\ \x00\x5f\x79\xd0\x91\xf1\x0c\x01\xc2\x2f\xd5\xf2\xd7\xb4\x5c\x40\ \xd5\xd5\x75\xa2\xa5\xe1\x30\x26\x75\xbb\x02\x45\x52\xd4\xd9\xac\ \x04\x64\xee\x2c\xac\xec\x4d\x36\x01\xed\x2d\x27\x11\x4e\x74\x17\ \xfa\x6b\xf6\x5b\x55\x0d\x0c\xbe\x88\xbb\x03\xb8\xd0\x1c\xc0\x60\ \xf2\x59\x7e\x90\x1e\x59\x89\x36\xab\x63\xe8\x95\x4b\x80\xb6\xd3\ \x01\xf4\xa7\x7a\xa4\xb0\x3d\x04\x95\x00\x23\xaf\x5d\x02\xb4\x36\ \x05\x30\xa0\xf5\xc0\xd6\x37\x87\xad\xc5\x75\x44\xde\xb8\x05\xa0\ \x0a\xc2\x89\x1e\xc7\x2d\xd2\x66\x74\x8c\x06\x5d\x02\x5c\x6c\x6e\ \x44\x88\x66\x20\x2b\xc8\xbf\xb9\x09\x02\x44\x7a\x67\xdd\x01\x98\ \x36\x9d\x4d\x16\x5a\x24\x6d\x8e\x4b\x5b\x0e\xf0\x17\xa5\x35\xdf\ \xe4\xdf\x81\x57\x9f\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\ \x82\ \x00\x00\x06\x19\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x2e\x00\x00\x00\x2a\x08\x06\x00\x00\x00\xcc\x28\x69\x21\ \x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\ \x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x12\x00\x00\x0b\x12\ \x01\xd2\xdd\x7e\xfc\x00\x00\x00\x18\x74\x45\x58\x74\x53\x6f\x66\ \x74\x77\x61\x72\x65\x00\x41\x64\x6f\x62\x65\x20\x46\x69\x72\x65\ \x77\x6f\x72\x6b\x73\x4f\xb3\x1f\x4e\x00\x00\x04\x11\x74\x45\x58\ \x74\x58\x4d\x4c\x3a\x63\x6f\x6d\x2e\x61\x64\x6f\x62\x65\x2e\x78\ \x6d\x70\x00\x3c\x3f\x78\x70\x61\x63\x6b\x65\x74\x20\x62\x65\x67\ \x69\x6e\x3d\x22\x20\x20\x20\x22\x20\x69\x64\x3d\x22\x57\x35\x4d\ \x30\x4d\x70\x43\x65\x68\x69\x48\x7a\x72\x65\x53\x7a\x4e\x54\x63\ \x7a\x6b\x63\x39\x64\x22\x3f\x3e\x0a\x3c\x78\x3a\x78\x6d\x70\x6d\ \x65\x74\x61\x20\x78\x6d\x6c\x6e\x73\x3a\x78\x3d\x22\x61\x64\x6f\ \x62\x65\x3a\x6e\x73\x3a\x6d\x65\x74\x61\x2f\x22\x20\x78\x3a\x78\ \x6d\x70\x74\x6b\x3d\x22\x41\x64\x6f\x62\x65\x20\x58\x4d\x50\x20\ \x43\x6f\x72\x65\x20\x34\x2e\x31\x2d\x63\x30\x33\x34\x20\x34\x36\ \x2e\x32\x37\x32\x39\x37\x36\x2c\x20\x53\x61\x74\x20\x4a\x61\x6e\ \x20\x32\x37\x20\x32\x30\x30\x37\x20\x32\x32\x3a\x33\x37\x3a\x33\ \x37\x20\x20\x20\x20\x20\x20\x20\x20\x22\x3e\x0a\x20\x20\x20\x3c\ \x72\x64\x66\x3a\x52\x44\x46\x20\x78\x6d\x6c\x6e\x73\x3a\x72\x64\ \x66\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\ \x2e\x6f\x72\x67\x2f\x31\x39\x39\x39\x2f\x30\x32\x2f\x32\x32\x2d\ \x72\x64\x66\x2d\x73\x79\x6e\x74\x61\x78\x2d\x6e\x73\x23\x22\x3e\ \x0a\x20\x20\x20\x20\x20\x20\x3c\x72\x64\x66\x3a\x44\x65\x73\x63\ \x72\x69\x70\x74\x69\x6f\x6e\x20\x72\x64\x66\x3a\x61\x62\x6f\x75\ \x74\x3d\x22\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x78\x6d\x6c\x6e\x73\x3a\x78\x61\x70\x3d\x22\x68\x74\x74\x70\ \x3a\x2f\x2f\x6e\x73\x2e\x61\x64\x6f\x62\x65\x2e\x63\x6f\x6d\x2f\ \x78\x61\x70\x2f\x31\x2e\x30\x2f\x22\x3e\x0a\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x3c\x78\x61\x70\x3a\x43\x72\x65\x61\x74\x6f\x72\ \x54\x6f\x6f\x6c\x3e\x41\x64\x6f\x62\x65\x20\x46\x69\x72\x65\x77\ \x6f\x72\x6b\x73\x20\x43\x53\x33\x3c\x2f\x78\x61\x70\x3a\x43\x72\ \x65\x61\x74\x6f\x72\x54\x6f\x6f\x6c\x3e\x0a\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x3c\x78\x61\x70\x3a\x43\x72\x65\x61\x74\x65\x44\ \x61\x74\x65\x3e\x32\x30\x31\x30\x2d\x31\x31\x2d\x31\x38\x54\x32\ \x31\x3a\x33\x31\x3a\x33\x38\x5a\x3c\x2f\x78\x61\x70\x3a\x43\x72\ \x65\x61\x74\x65\x44\x61\x74\x65\x3e\x0a\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x3c\x78\x61\x70\x3a\x4d\x6f\x64\x69\x66\x79\x44\x61\ \x74\x65\x3e\x32\x30\x31\x30\x2d\x31\x31\x2d\x31\x38\x54\x32\x32\ \x3a\x30\x39\x3a\x34\x30\x5a\x3c\x2f\x78\x61\x70\x3a\x4d\x6f\x64\ \x69\x66\x79\x44\x61\x74\x65\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\ \x2f\x72\x64\x66\x3a\x44\x65\x73\x63\x72\x69\x70\x74\x69\x6f\x6e\ \x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\x72\x64\x66\x3a\x44\x65\x73\ \x63\x72\x69\x70\x74\x69\x6f\x6e\x20\x72\x64\x66\x3a\x61\x62\x6f\ \x75\x74\x3d\x22\x22\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x78\x6d\x6c\x6e\x73\x3a\x64\x63\x3d\x22\x68\x74\x74\x70\ \x3a\x2f\x2f\x70\x75\x72\x6c\x2e\x6f\x72\x67\x2f\x64\x63\x2f\x65\ \x6c\x65\x6d\x65\x6e\x74\x73\x2f\x31\x2e\x31\x2f\x22\x3e\x0a\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x3c\x64\x63\x3a\x66\x6f\x72\x6d\ \x61\x74\x3e\x69\x6d\x61\x67\x65\x2f\x70\x6e\x67\x3c\x2f\x64\x63\ \x3a\x66\x6f\x72\x6d\x61\x74\x3e\x0a\x20\x20\x20\x20\x20\x20\x3c\ \x2f\x72\x64\x66\x3a\x44\x65\x73\x63\x72\x69\x70\x74\x69\x6f\x6e\ \x3e\x0a\x20\x20\x20\x3c\x2f\x72\x64\x66\x3a\x52\x44\x46\x3e\x0a\ \x3c\x2f\x78\x3a\x78\x6d\x70\x6d\x65\x74\x61\x3e\x0a\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\ \x20\x20\xd7\xc6\x55\x1a\x00\x00\x01\x7a\x49\x44\x41\x54\x58\x85\ \xd5\xd9\x31\x4a\x03\x41\x14\xc6\xf1\xbf\x5a\x09\x1e\x60\x21\x60\ \x25\x04\x6c\x85\x54\x82\x60\x65\x27\x88\x07\xf0\x00\x1e\xc0\x56\ \x10\x04\x21\x67\xb0\x15\x16\xd2\xa6\xb5\xb0\xb1\x10\x21\x95\xad\ \x20\x04\x84\x05\x41\x50\x84\xc0\x58\x6c\x06\xe2\x26\xb3\x33\xb3\ \xb3\xf2\xf6\x7d\xf0\x41\x52\xed\xaf\xc8\xe4\xed\xcc\x40\xbb\xe9\ \x01\x77\xc0\x1b\xf0\x3e\xff\xdc\x6f\xf9\x19\xad\xa7\x0f\x14\x80\ \xa9\xf4\x13\x38\x12\x74\x79\x93\xb3\x8c\xb6\x9d\x01\x17\x72\xb4\ \xfa\x7c\xe0\x86\xdb\xde\x02\x9b\x52\x40\x57\x7c\x68\xdb\x47\x20\ \x13\x32\xae\x4c\x28\xdc\x00\xaf\xc0\x9e\x0c\x73\x39\x31\x70\x03\ \x7c\x01\xa7\x22\xd2\x4a\x62\xe1\xb6\x97\x12\x58\x9b\x9e\x03\x15\ \xda\x1c\x81\x45\x9b\x01\xe3\x44\xb8\x01\x9e\x81\xed\xd8\x87\x2f\ \x4e\xbc\x54\x40\x4a\xa7\xc0\x20\x14\xed\x9a\x78\x52\xfd\x01\xce\ \x42\xe0\x75\x13\x4f\xb2\xd7\xc0\x46\x1d\x3c\x64\xe2\x49\x75\x0c\ \x6c\xb9\xe0\xd2\x38\x5f\x27\xc0\x8e\x46\xb8\xa1\x5c\x83\x07\x1a\ \xe1\x86\x72\xd1\x9e\x6b\x84\xdb\xde\x00\xac\xcd\xbf\x68\xcb\xb1\ \x56\xf8\x83\x56\xf8\xf7\xba\xb4\xa0\x69\xb4\xc2\x9f\xb4\xfe\x54\ \x4e\x40\xfe\xef\x2d\xb6\x43\xab\x97\x86\x84\x76\x86\xc2\x01\x54\ \x00\x87\x54\x22\x8d\xf2\xf5\x05\xc7\x4b\x96\xda\xd7\xda\x51\x07\ \x80\xab\x3a\xc4\xb3\x91\xd8\x45\xe9\xd6\x0d\xca\xcd\x72\x4e\xb9\ \x59\x95\x44\x4f\x81\xfd\x50\x74\x6a\x06\xc0\x7d\x0b\xe8\x09\x0d\ \x8e\x27\x52\x93\x25\xa2\x45\x0e\x84\x6c\x9a\xa2\xaf\x24\xb0\x8b\ \x89\x05\xab\x3c\xf4\x54\x79\xcc\xdc\xb9\x83\x7d\xb5\x57\x29\xbe\ \x09\xdc\xd9\xcb\x2b\xd7\x04\xee\xfc\x75\x21\xfc\x9d\xc0\x05\xff\ \x78\x41\xfb\x0b\xfa\x0e\x62\x4e\xbe\x1a\xd5\xad\x00\x00\x00\x00\ \x49\x45\x4e\x44\xae\x42\x60\x82\ \x00\x00\x02\x4a\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x80\x00\x00\x00\x80\x08\x06\x00\x00\x00\xc3\x3e\x61\xcb\ \x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\ \x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\ \x79\x71\xc9\x65\x3c\x00\x00\x01\xec\x49\x44\x41\x54\x78\xda\xec\ \xdc\xdd\x4d\xe3\x50\x14\x85\x51\x83\x78\x4f\x3a\x80\x0e\x28\x21\ \x25\x50\x42\x4a\x48\x09\x94\x40\x09\x2e\xc1\x25\x58\x54\xe0\x12\ \x42\x07\x81\x0a\xc0\x0f\x44\x42\x0a\x12\x3f\xb1\x73\xcf\xf5\x59\ \x4b\xba\xaf\xa3\x51\xf6\x37\x33\x99\x23\xcd\x34\x0d\x00\x00\x00\ \x00\x00\x00\x00\x40\x2c\xfd\xf8\xde\x0b\xbf\xbe\xe6\x0f\xf0\x5a\ \x43\xb9\x09\x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\ \x40\x00\x08\x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\ \x00\x01\x20\x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\ \x00\x04\x80\x00\x10\x00\x02\x40\x00\x08\x00\x01\x20\x00\x04\x80\ \x00\x10\x00\x02\x20\x88\x9b\x09\x7e\x8c\x4d\xc1\x9f\xff\x3a\xc0\ \x67\xb8\x2e\xfc\x19\x3c\x97\xfe\x00\xda\xa6\xfc\xff\xd7\x9b\xf5\ \xb5\x51\x7e\x27\x11\x41\xe2\xf1\x8f\x06\xa3\x5c\xec\x0d\x11\xbf\ \x4f\xac\x44\x70\xb1\xf1\x57\x51\xbf\x54\x8a\xa0\xb2\xf1\xaf\x66\ \x8a\xe0\x25\xc8\x37\xf4\x25\x79\x1d\xdf\xdd\xf8\xde\x6a\xf8\xc9\ \xde\x8f\xef\xe0\x57\xec\x64\xef\xf0\xf9\x99\x56\x45\x04\x89\xc7\ \xff\x7a\x24\x32\xe2\x79\x6f\x53\xfb\x9f\x5d\x5b\x23\xfe\xfb\x6d\ \x97\xf2\x05\x46\x04\x89\xc7\x3f\xda\x19\xf5\xd7\x6f\xb7\xd4\xbf\ \xca\x38\x19\x57\x78\xe2\x15\x81\xf1\x27\xd7\x19\xfb\xe4\x75\x99\ \xae\x5a\x4e\xc6\x15\xdd\xf7\x45\x60\xfc\x59\x23\xd8\x1b\x3f\xb7\ \xac\x27\xe3\xaa\x4f\xbc\x22\x30\xbe\x08\x8c\xef\x64\xec\xc4\x2b\ \x02\xe3\x8b\xc0\xf8\x4e\xc6\x3f\xbc\x27\x73\xe6\x8d\xa0\x35\x63\ \xde\x08\x8c\x3f\x91\x1a\x4f\xc6\x83\xd9\xa6\x3d\x19\x0f\x8d\x13\ \xaf\x08\x8c\x9f\xdb\x6d\xf0\x6b\xe1\xc1\xf8\x79\x4f\xc6\x4e\xbc\ \x89\x23\x30\x7e\x01\x0f\x81\x02\xd8\x98\xa3\x8c\x08\x27\x63\x27\ \xde\xc4\x11\x18\x3f\x88\xc7\x02\xe3\xef\x7c\xec\xb1\x5c\xf2\x64\ \xec\xc4\x9b\x38\x02\xe3\x07\xd7\xcf\x38\x7e\xe7\xe3\x8d\x6f\xae\ \x93\xb1\x13\x6f\xe2\x08\x8c\x5f\x69\x04\x7b\xe3\xe7\x76\xee\xc9\ \xd8\x89\x37\x71\x04\xc6\x5f\x58\x04\xc6\x4f\xee\x2f\x27\x63\x27\ \xde\xc4\x11\x18\x3f\x71\x04\xc6\x4f\xe2\xbb\x93\xb1\x7f\xbc\x91\ \x38\x02\xf7\xfd\xc4\x11\x18\x1f\x00\x00\x00\x00\x00\x00\x00\x58\ \x94\x0f\x01\x06\x00\x82\x46\x96\x6e\xb0\x71\x08\xc8\x00\x00\x00\ \x00\x49\x45\x4e\x44\xae\x42\x60\x82\ \x00\x00\x05\xe1\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0\x77\x3d\xf8\ \x00\x00\x05\xa8\x49\x44\x41\x54\x78\xda\x85\x56\x7b\x4c\x53\x67\ \x14\xff\xdd\x47\x5b\x68\x0b\xa5\x94\x87\x54\x14\x98\xc8\xe6\x40\ \x1e\x22\x73\x80\x0a\x66\xd9\x7c\x6c\x73\xfe\xb1\x21\x5b\x66\x84\ \x85\x2c\x0a\x71\x89\x8b\xc9\x5c\xf6\x8a\x9b\xdb\xc2\x96\x3d\x12\ \xc3\x23\x8e\xcc\x57\xdc\x90\x2d\x99\x43\xe6\x63\x51\x03\xb3\x80\ \xc2\x08\xa0\x12\x03\x32\x3a\xb7\x82\xda\x52\xda\x0a\xf4\x71\x6f\ \xef\xbd\xfb\x7a\xa1\x2a\x32\xf4\x24\xfd\xda\x9c\xef\xde\xf3\xfb\ \xce\xef\xfc\xce\xf9\x4a\xe1\x21\xa6\x5f\x5f\x57\x20\x08\x52\xb1\ \x28\x0a\x6b\x45\x41\x4a\x52\x45\xa8\x65\xbf\xcf\xe9\x06\xcd\x50\ \x66\x9a\x66\xce\x30\x0c\x55\xef\x38\x55\xd6\x32\x57\x0c\xea\xff\ \x9c\x91\x2f\x1e\xcc\xe0\x7d\x7c\xa7\x36\x56\xaf\x48\x4a\x35\x22\ \x31\xc9\x80\xc8\x88\x50\x84\x84\xb2\xf2\xbe\xd7\xe3\xc7\x98\xd3\ \x83\xbf\xcd\x76\x98\xfb\x46\x30\x71\xdb\xc1\x2b\x54\x8a\x9c\xb1\ \x13\x25\xbd\x8f\x04\x30\x6c\x3c\x50\x45\xd1\x6c\xf9\xd3\x1b\xd2\ \xb1\x38\xd9\x80\x50\x35\x0b\x4a\xa2\x20\x8a\xd2\x8c\xe7\x68\x9a\ \x82\x44\x49\xf0\xb8\xfd\xb8\x3e\x68\xc7\xc5\x93\x97\x21\x89\xfe\ \x6a\x7b\x63\x69\xc5\x9c\x00\x86\x8d\x47\x8e\x85\x19\xf5\x45\x1b\ \x5e\x48\x45\x74\x8c\x1a\x12\x09\x0c\x12\x97\xa2\xa6\x1e\xa4\xa6\ \x9f\x96\x24\xd9\x2d\x7f\x07\x36\x28\x02\x64\xb3\xba\x71\xb2\xa9\ \x0f\xe3\x23\x8e\x06\x7b\xe3\x96\xcd\xb3\x00\x62\x5e\xa9\xaf\xd2\ \x46\x69\xcb\x8b\x5f\xcd\x82\x4e\xab\x04\xe1\x1c\x1c\xf9\xdc\xe1\ \x24\x58\xc6\x79\x4c\x0a\x22\x24\xff\xf4\xe9\x09\x53\x61\x2c\x8d\ \x28\x0d\x03\xbd\x92\x45\x80\x39\x8a\xa1\xe0\x9a\xe0\x50\xff\x63\ \x37\x26\x46\x27\xaa\xad\x3f\x15\x57\xdc\x05\x88\xdf\xf2\x73\x06\ \xef\x17\x7b\x4a\xcb\x72\x61\x8c\xd2\xc0\xcd\xf9\x61\x76\x05\xa2\ \xb1\x88\x51\x33\xb8\x35\xe9\xc7\x38\xc7\x11\x4a\x28\xf9\x05\x99\ \x2c\xb2\x10\x48\xf8\x45\x20\x96\xd0\x98\x18\xce\x40\x4d\xc0\x46\ \x46\x27\x71\xa0\xae\x1d\x0a\x96\xce\xb4\x1c\x79\xb9\x57\x06\x88\ \x29\x6a\xe0\xd6\x6f\x5a\xaa\xc8\xcf\x8c\x83\xc5\xe5\xc3\x0d\xa7\ \x1f\x12\x0d\x28\xc9\xf2\xf5\xaa\x28\x0c\x38\x39\x1c\xed\xbf\x03\ \x17\xc9\x46\x20\x7e\xfa\x81\xba\xc9\xe5\x21\x4b\x42\x04\x8b\x78\ \x9d\x0a\xad\x3d\x37\x71\xea\xf8\x15\xde\xda\x50\xa4\xa4\x16\x94\ \x34\x16\x28\x34\xca\xe6\xf7\xb7\xaf\xc0\x80\x83\xc3\x98\x57\x9a\ \x0a\x40\xa0\x39\xaf\x1f\x5f\x16\x46\x23\x52\xa3\x94\x4f\x7c\xbc\ \xdf\x81\x5f\x07\x26\xa1\x0a\xa5\x89\x4c\x49\xf1\x89\x33\x58\x7a\ \x49\x0c\x64\x44\x14\x18\x42\x21\x45\xaf\xc4\xde\x9a\x4b\xe0\x27\ \xb9\x42\x2a\xbe\xb4\xb1\x26\x7f\x55\xd2\xb6\xbc\xec\xf9\x18\x74\ \xf0\x60\xa6\x5f\xa1\x08\x1d\x0e\x97\x17\x5f\xac\x99\x87\x68\x52\ \x93\xa0\xb9\xdc\x3c\xf6\x75\xd8\x30\xe4\xe2\xa1\x09\x53\x42\xa1\ \x60\xe4\x6a\x4b\xd3\x48\x02\x81\x4d\xd6\x2b\xd0\xd6\x35\x8c\xd6\ \x0b\xe6\x5a\xca\x58\xd2\x34\xb4\xed\x8d\xac\xa4\x09\x28\xe4\x13\ \x05\x0b\x43\x1a\x08\xb7\x6d\xe3\xd8\xbb\xc6\x88\xd8\xf0\x90\x59\ \xbd\x32\x68\x9b\xc4\xee\xd3\xc3\x30\xe8\x94\xd0\x47\x6a\xe4\xac\ \x25\x69\x2a\x23\x89\x44\xd0\x82\x47\xed\xf7\xdd\x66\x2a\xae\xe4\ \x84\x54\x5e\x91\x0f\x1f\x27\x4c\x29\x64\x4a\x99\x18\xf6\x02\xe7\ \x4d\x16\xb4\x54\xa4\x60\x61\xa4\x7a\xce\x6e\x3f\xda\x75\x0b\x95\ \x67\x47\xb0\x3a\x2d\x0a\x7a\x83\x86\xf4\xcc\x14\x90\x4a\xc9\xa0\ \xba\xaa\x15\xd4\xa2\xb7\xcf\x49\x6f\x6e\xcd\x86\xcf\xe7\x07\x39\ \x34\x1c\x5e\x11\xd7\x46\xc9\x6f\x96\x42\x4f\x87\x05\xa6\xb7\x96\ \x60\x01\x51\xd6\xc3\x4d\xc4\xce\x5f\xcc\xb8\x6e\xf5\x20\x6f\x99\ \x91\xd0\x44\x00\x54\x2c\xf6\x1f\xea\x02\xf5\xc4\xee\x56\x69\xfb\ \xeb\xe9\x10\x7c\x02\xfa\x9c\x3c\xc6\xdc\x22\x18\x92\x2f\x4b\xb8\ \x6d\x3d\x3f\x08\xd3\xae\x34\x2c\x34\x3c\x0a\x00\xd8\xfc\x5d\x2f\ \xda\xfa\x9c\x58\xb7\x29\x15\x4f\x86\x2b\xc0\xa8\x18\x54\x1d\xee\ \x01\x95\xb0\xeb\x82\xf4\x41\x59\x36\xba\x6f\xbb\x61\xf7\x88\x60\ \xa7\x35\xc8\x92\x14\xcf\x36\x5d\x85\xe9\x9d\x65\x48\x88\xd2\xce\ \x19\xb8\xf2\xf7\x41\x7c\x7c\xa8\x1f\x69\x2b\x12\x91\x9a\x61\x84\ \x8f\x28\x4f\x4f\x54\x96\x15\xab\xc6\x27\x75\x24\x83\x94\xdd\x6d\ \x43\xeb\x9f\x7f\x3c\xc9\x15\x90\xe7\x7d\x02\x57\x10\x80\xc6\x86\ \x6e\xb4\xbd\xb7\x1c\x89\xb1\x61\xb3\x02\xb7\x0e\x8c\x62\xdd\x1e\ \x13\xe6\x27\x1b\x91\xbf\x26\x99\xa8\x8f\xcc\xab\x40\xb7\x13\x9a\ \x45\xa2\x57\x1d\x91\xeb\xa9\xdf\xfa\xcd\x54\xea\x47\x9d\x35\x29\ \x19\xf3\xb6\xc5\xc5\x68\x67\x0c\x34\x25\xe1\xb0\xe1\x60\x07\xda\ \x3f\xcc\x41\xe2\xbc\xf0\xbb\x7e\xcb\xd8\x04\x9e\xfd\xf4\x22\x9c\ \x1e\x09\xf9\xcf\x2d\xc1\xfc\x38\x1d\x78\x5e\x90\x0b\x1b\xb4\xc0\ \x20\xbc\x69\x9d\xc0\x40\xef\xad\x5a\x2a\xe3\xb3\xee\x02\x4d\x44\ \x48\x73\x6e\x4e\x3c\x38\x5e\xb8\x07\x10\xa2\xc0\xe1\xda\x16\x74\ \xec\x59\x49\x00\xc2\x64\xf1\xbd\xf4\x55\x3b\x4e\x9b\x86\xb1\x7a\ \xd3\x32\x64\xa6\x1b\x49\x60\x5e\x9e\x59\x0f\x9a\x92\xd4\xaf\xbd\ \xd3\x82\x49\xa7\xb7\x50\x1e\x15\xe9\x9f\xf7\x70\x2b\x72\xe3\x15\ \xf2\x90\x9b\xce\x42\x45\x00\xf6\x7f\xdb\x8c\x3f\x2b\xf3\xd0\xd4\ \x63\xc5\x8e\x4a\x13\x96\xaf\xcb\x42\x41\x61\xb2\x7c\x42\x51\x10\ \x20\xcd\x8e\x2d\xef\x05\x86\xde\xa5\x76\x0b\x7f\xf9\xdd\x4c\xa5\ \x0c\x90\xfd\xcd\xd5\x0c\x46\xc9\xf4\x3c\x93\x17\x2f\xf7\xbb\xac\ \x63\x42\xd1\x81\xda\x3f\x60\x1d\xf8\x17\x11\x8b\x17\x60\x73\x69\ \x2e\x74\xc4\xc7\xf3\xe2\x0c\x3a\xee\xb7\x40\xf7\x07\x3a\xee\x5c\ \x9b\x05\x02\x27\x64\x76\xed\x4c\xeb\xbd\x3b\xae\x73\xf6\x5d\xab\ \x8a\x88\x0e\x2d\x2f\x58\x1a\x03\x9f\x4c\x15\x05\x81\x04\xb2\x39\ \x3c\x48\x30\xea\xc0\xf9\xf8\x59\x97\xce\xcc\xe0\x24\x6b\x42\x4d\ \xcb\x15\x2b\x9c\x36\x4f\x75\xe7\x8e\x25\xf7\xc6\x75\xd0\x9e\xaa\ \x19\x38\x66\x30\x84\x16\xe5\xa7\x1a\xc8\xf0\x92\xe4\xe1\x35\x35\ \x9e\x25\xcc\x71\x68\x79\x9f\x0e\x8c\x71\x42\x4d\x6b\x9f\x1d\x76\ \xbb\xa7\xa1\x63\x7b\xca\xec\x0b\x27\x68\xb9\x75\x43\x55\x8c\x12\ \xe5\xb9\x69\xd1\x88\x26\x73\xde\x1f\x00\x92\x66\x03\xc8\x6c\x90\ \x85\x25\x81\x6d\xe4\xda\x6c\xbf\x6a\x23\xb4\xa0\xba\xbd\xec\xb1\ \xb9\xaf\xcc\xa0\xad\x3c\x74\x23\xc3\x2f\x8a\x9d\x7a\x9d\x4a\xb1\ \xc8\xa8\x46\x8c\x96\x85\x9a\x74\x66\x90\xfb\x00\xd7\x6e\xd2\xf9\ \xd6\x09\x3f\xfe\x1a\x71\x93\xa9\xeb\xe3\x59\x9a\xce\x31\x6d\x4d\ \x78\xf4\xa5\x7f\xbf\x15\xfe\xf0\x4f\x81\x28\x52\xc5\x02\x4d\xad\ \x25\x94\x25\x85\x84\x30\xb2\xdf\xeb\x15\x02\x94\x98\x19\x51\x3a\ \x43\xd3\x52\x7d\xf3\x6b\x0b\xe7\xfc\xdb\xf2\x1f\xf9\xfb\x63\xb7\ \x6a\x50\xe2\x8e\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ \ \x00\x00\x61\x49\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x80\x00\x00\x00\x80\x08\x06\x00\x00\x00\xc3\x3e\x61\xcb\ \x00\x00\x20\x00\x49\x44\x41\x54\x78\x9c\xec\xbd\x79\x9c\x5d\xc7\ \x55\xef\xfb\xad\xaa\x3d\x9d\xb1\x4f\x8f\xea\x41\x43\x4b\xb2\x2c\ \x79\x90\xa7\x90\xd8\xb1\xe3\x58\x71\x92\x1b\x63\x93\x6b\x03\xc9\ \x4d\x3e\x90\x3c\x32\xf1\x02\x01\x92\x7c\x80\x4b\x80\xf7\x80\x1b\ \xee\x85\xc7\x94\x10\x20\x18\x6e\x02\x49\x78\x01\x42\x70\x06\x67\ \x32\x9e\x87\x78\x9e\x6d\xc9\x96\x25\xcb\x9a\xd5\xea\x56\xcf\xdd\ \xa7\xcf\xb8\x87\x7a\x7f\x54\xed\x7d\x4e\xcb\xb2\x9c\x84\xbc\x0b\ \x97\xa4\xf4\xd9\xea\x73\xf6\xbc\x6b\xad\xf5\x5b\xbf\xb5\x6a\xd5\ \x3e\x42\x6b\xcd\x0f\xdb\x0f\x6e\x93\xff\xd6\x37\xf0\xc3\xf6\x6f\ \xdb\x7e\xa8\x00\x3f\xe0\xed\x87\x0a\xf0\x03\xde\x7e\xa8\x00\x3f\ \xe0\xed\x87\x0a\xf0\x03\xde\x9c\xef\xd7\x89\x84\x10\xdf\xaf\x53\ \xad\x6a\x57\x5d\x7d\x6d\x05\xb8\x00\x40\x29\x75\x81\xeb\xba\x15\ \x29\x04\x89\xd6\x84\x61\x48\x1c\xc7\x87\x80\x43\x00\x37\xdf\xf4\ \xb5\xbb\xff\x7f\xb9\x89\xff\x20\xed\x54\x11\x9f\xf8\x7e\x85\x81\ \xdf\x0f\x05\xb8\xea\xea\x6b\x2b\x52\x88\x1d\x95\x4a\xcf\x15\x83\ \x83\x43\x17\xf4\xf7\x55\x2e\x18\x1a\x1e\xae\xac\x1d\x1b\x65\x68\ \x70\x88\x4a\xa5\x07\xdf\xf7\x10\x42\xa1\xd1\x84\xed\x16\x4b\xd5\ \x2a\x33\x27\x66\x99\x98\x3c\xce\xf1\x63\x13\x4c\xcf\xce\x1e\x9a\ \x9b\x9d\x7b\x6a\x71\x71\xf1\xe9\x44\xeb\xbb\x7f\xa8\x14\x9d\xf6\ \xef\x52\x01\xae\xba\xfa\xda\x4a\x10\x04\xd7\xad\x1b\x1b\xbb\x76\ \xc3\x86\x75\xd7\xbd\xea\x55\xaf\xe2\x9c\x73\xce\xa6\xaf\xaf\x92\ \xed\x13\xc7\x31\xf5\x5a\x8d\x66\xb3\x49\x3b\x0c\x21\x49\x00\x70\ \x5d\x17\x2f\x08\x28\x14\x0a\xb8\xae\x9b\xed\x5f\xad\xd5\x78\x7e\ \xcf\x5e\x1e\x7a\xf8\x11\x9e\x79\x76\xcf\xe2\xd4\xd4\xd4\x8d\xcd\ \x66\xf3\x6b\x37\xdf\xf4\xb5\x1b\xff\x75\x4f\xf9\xbf\x77\xfb\x77\ \xa5\x00\x57\x5d\x7d\xed\x05\xa3\xc3\xc3\x1f\xda\x72\xe6\x96\x77\ \x5d\x75\xd5\x7f\xe2\x82\xf3\xb7\x03\xd0\x6e\xb6\x39\x7e\xfc\x18\ \x53\x53\x53\xcc\x4c\xcf\x30\x3f\xbf\xc0\xf2\xf2\x12\xd5\x95\x2a\ \xcd\x46\x93\x30\x8c\xd0\x49\x82\x90\x46\x01\x82\x20\x20\x5f\x28\ \x50\x2e\xf5\xd0\xd7\xd7\x4b\xff\xc0\x20\xc3\xc3\x43\x8c\x8e\xae\ \xa5\x50\x2a\x00\xb0\x6f\xdf\x7e\x6e\xbd\xf5\x36\x1e\x7b\xe2\xa9\ \x43\xd3\x27\x4e\xfc\x5d\xa2\xf5\xe7\x6e\xbe\xe9\x6b\x87\xbe\x2f\ \x0f\xfe\xbf\x51\xfb\x77\xa1\x00\x57\x5f\x73\xdd\x8e\x75\xeb\xc6\ \x7e\xe7\xb2\xd7\xbc\x66\xc7\xb5\x6f\xbe\x86\x72\xb9\x44\xbd\x56\ \x63\xdf\xbe\x7d\x1c\x3c\x70\x80\xc3\x47\x8e\x30\x71\xf4\x18\x4f\ \x3d\xf5\xd4\xd4\x9e\xbd\x7b\xe6\x8f\x4e\x4c\xcc\x01\x55\x60\xd9\ \xfe\x4d\x80\x93\x6f\xba\x1f\xf0\xfb\xfb\xfa\x47\xc7\xc7\x37\x94\ \x2f\xba\xe0\xc2\x91\xad\x67\x6e\x2d\xad\xdf\xb0\x9e\xf1\x8d\xe3\ \x6c\xd9\xb2\x85\x4a\x5f\x1f\xed\x76\xc8\x2d\xb7\xdc\xc6\xb7\x6e\ \xba\x99\xa3\x47\x8f\x7e\x2e\xd1\xfa\xa3\x3f\x48\x8a\xf0\x6f\xaa\ \x00\x57\x5d\x7d\xed\xf8\xf0\xf0\xf0\x9f\x5e\x79\xe5\x6b\xaf\x7b\ \xdb\x5b\xdf\x82\xe7\x79\x1c\x3d\x72\x84\x67\x9f\x79\x86\xbd\x7b\ \xf6\xf0\xec\xb3\xcf\xb6\xef\xb9\xe7\x9e\x23\xcf\xef\xdf\x7f\x18\ \x38\x08\x34\x80\x16\x10\x02\x4d\x20\xa6\x23\xfc\x74\x01\x10\x80\ \x07\x28\xc0\x05\x02\xfb\xb9\x02\xac\x7d\xd3\xeb\x5f\xbf\xe9\x75\ \x6f\x78\xc3\xc6\x6d\xdb\xb6\x71\xf6\x59\xe7\xb0\x65\xeb\x16\x00\ \xbe\xf5\xad\x9b\xf9\xea\xd7\xbe\xb6\x38\x31\x31\xf9\x67\xc0\x27\ \x6e\xbe\xe9\x6b\x8b\xdf\x97\x8e\xf8\x77\xdc\xfe\xcd\x14\xe0\x27\ \x7e\xf2\xed\xff\xed\xd2\x4b\x2f\xf9\xd0\xbb\xde\xf5\xce\x4a\x7f\ \x5f\x1f\x87\x0e\x1c\xe0\x89\x27\x9f\x64\xd7\xd3\x4f\x73\xc7\x1d\ \x77\x4e\xdd\xfb\xc0\xfd\xcf\x02\xfb\x80\x15\xa0\x0e\x2c\x61\x14\ \xa0\x69\xbf\x47\xf6\x7b\xaa\x00\x89\x3d\xb5\xc2\x44\x32\x12\xf0\ \xed\x12\x00\x05\x20\x67\xff\x06\x40\xa1\x52\x2e\x9f\xf3\xce\x77\ \xbc\xf3\x82\x4b\x2e\x7d\x75\xf9\xa2\x0b\x2f\x64\xdb\xd9\x67\xd3\ \x6e\x87\x7c\xf6\xef\x3e\xcf\x6d\xb7\xdd\x71\xa8\x56\xab\xbd\xfb\ \x3f\x3a\x61\xfc\x5f\xae\x00\x57\x5d\x7d\xed\xf8\xba\x75\x63\x5f\ \x7d\xe7\x3b\x7f\xfa\x82\xcb\x2f\xbb\x94\xb9\x99\x19\x1e\x7a\xe8\ \x11\x1e\x7b\xf4\x61\xfe\xf9\x8b\x37\xec\xdf\xfd\xfc\x9e\x9d\xc0\ \x61\x60\xc1\x2e\x55\x60\xd6\xfe\xad\xd9\xa5\xc9\x6a\x04\xe8\x46\ \x01\x30\x56\xef\x62\x84\xed\xd9\xbf\x05\xa0\x08\xf4\x00\x25\xbb\ \x94\x01\x4f\x22\xcf\xfb\x85\x0f\xfc\xdc\xab\xaf\x78\xed\x8e\xf2\ \xab\x5e\x7d\x31\xeb\xd6\xaf\x67\xf7\x9e\xbd\x5c\x7f\xfd\x5f\xb3\ \x7f\xff\xc1\x4f\x00\x1f\xfd\x8f\x8a\x06\xa7\x55\x00\x21\x84\xd0\ \xff\x0a\x6d\x38\x59\x01\xde\x7c\xed\x4f\x5c\x77\xd1\x85\x17\x7e\ \xf6\xc3\x1f\xfa\x60\xa5\x52\x29\xf3\xe8\xc3\x0f\xf3\xc0\xfd\xf7\ \x73\xe3\x8d\x5f\x9f\xba\xfb\xde\x7b\x1e\x03\xf6\x60\x84\x3d\x07\ \x4c\xda\xcf\x4b\x18\xa1\x87\x5d\x4b\xcc\x8b\xe0\x3f\xd0\x90\xb7\ \x57\xaa\x03\xcd\xf4\xe2\x92\x0e\x1a\x94\x81\x5e\x8c\x12\xf4\xd8\ \xcf\xe9\x92\x0b\x5c\xef\xfc\xdf\xfc\xcd\xdf\x78\xdd\x6b\x77\xec\ \xf0\x5f\x73\xd9\x65\x20\x15\x7f\xf1\xc9\xbf\xe2\xce\xbb\xee\x7a\ \x2a\x0c\xa3\x1f\xff\x8f\xc8\x0d\xba\xc5\x9b\xca\x5b\x68\xad\x11\ \x42\x48\x80\x1d\x3b\x76\x9c\x73\xc9\x25\x97\xac\x8d\xe3\x58\x0b\ \x21\xd0\x5a\xaf\x3a\xe8\xe4\xef\x2f\xd5\xa6\x4e\xcc\x5d\xf7\xda\ \xcb\x2f\x7f\xff\xfb\xdf\xff\x5e\x96\x17\x17\xb9\xf3\xce\x3b\xb9\ \xfd\xb6\x3b\xa2\xbf\xfd\xdc\x67\x5e\x28\x95\x4a\x7b\x7c\xdf\x9f\ \xcf\xe7\xf3\xf3\xa5\x52\x69\xb6\x52\xa9\x2c\xe6\x72\xb9\xa6\xef\ \xfb\x6d\xdf\xf7\xa3\x74\x71\x1c\x27\x59\xb7\x6e\x9d\xb7\x79\xf3\ \xe6\xc1\xb5\x6b\xd7\x8e\xc6\x71\x1c\xf5\xf6\xf6\x0e\x45\x51\x14\ \x25\x49\x12\xc5\x71\x1c\x85\x61\x18\xd6\x6a\xb5\x6a\xb5\x5a\xad\ \x2d\x2f\x2f\xd7\x4f\x9c\x38\xb1\x32\x35\x75\xa2\xae\xd1\x24\x71\ \x42\x14\x85\xb2\x5e\xaf\xbb\xd3\xd3\xd3\xe5\xa5\xa5\xa5\x72\xad\ \x56\x2b\xd7\x6a\xb5\x9e\x66\xb3\xd9\xd3\x6e\xb7\x0b\x51\x14\x15\ \xb6\x9e\x79\xe6\xb9\x3f\xff\xfe\x9f\xdf\xb2\xe3\xca\x1d\x8c\x8e\ \x8d\x71\xe3\x8d\xdf\xe0\xef\xff\xe1\x0b\xb5\x62\x31\xf7\x6b\xfd\ \x7d\x95\x03\xdf\x4f\x01\x08\x21\x10\x42\x20\xe5\xf7\x37\x01\x9b\ \x1a\xdf\xc9\x46\x78\xaa\xf5\x8e\xe3\x88\x07\x1e\x78\x60\xe2\xf6\ \xdb\x6f\x7f\x16\x48\x84\xd6\x1a\xd7\x75\xbd\x28\x8a\x92\xeb\xaf\ \xbf\xfe\xf3\xef\x7d\xef\x7b\xdf\x1e\x45\x51\x76\xe0\x29\x61\xe3\ \x34\xfe\xfe\x53\x7f\xf3\x59\xd6\x8e\x8d\xf1\x93\x3f\x71\x2d\x87\ \x0e\x1c\xe0\x8e\xdb\x6e\xe3\xd6\xdb\x6e\xc3\xf1\x7d\xce\x38\xe3\ \x0c\xc6\xc6\xc6\x18\x1f\x1f\x67\x70\x70\x90\x7c\x3e\x4f\x3e\x9f\ \xc7\xf3\x3c\x3c\xcf\x33\x71\xbd\xe7\x9d\xb2\x93\x4e\xbe\x8f\xd3\ \x29\x62\xba\xad\xf3\x17\xc2\xb0\xcd\xdc\xdc\x1c\x33\x33\x33\xcc\ \xcc\xcc\x70\xe2\xc4\x09\xa6\xa7\xa7\xa9\x56\xab\xec\xde\xbd\x9b\ \xd7\x5c\x7a\x29\x6f\x7c\xe3\x9b\xd8\x7e\xfe\x76\xee\xbf\xff\x21\ \xfe\xe2\x93\xd7\xf3\x33\xff\xc7\x4f\x73\xe5\x95\x3b\x5e\xf2\xfc\ \xa7\x6b\xdf\xcb\x3e\x27\x1b\xdb\xcb\x3d\xd7\x77\x7b\x1e\xad\x35\ \x41\x10\xf0\xc5\x2f\x7e\xf1\x4b\xef\x79\xcf\x7b\x7e\xca\x71\x1c\ \xe1\x00\x28\xa5\x64\x14\x45\x81\x94\xd2\xf7\x3c\xef\x65\x35\xf4\ \xa5\x14\xe0\xaf\xff\xe7\xdf\xb0\x65\xcb\x66\xae\xbe\xea\x4d\x3c\ \xbb\x6b\x17\x37\xdf\x7c\x33\xb7\xdc\x72\x0b\xe7\x5f\x78\x21\xeb\ \xd6\xad\x63\xcb\x96\x2d\x8c\x8e\x8e\x52\x2c\x16\xc9\xe7\xf3\xe4\ \x72\x39\x82\x20\xc0\xf7\xfd\xd3\x9e\xf7\xfb\xd1\x82\xc0\xa7\x54\ \x2a\x31\x36\x36\xc6\xf4\xf4\x34\x53\x53\x53\x1c\x3f\x7e\x9c\xc9\ \xc9\x49\x72\xb9\x1c\x07\x0f\x1f\xe6\x86\x7f\xfe\x67\x6a\xb5\x2a\ \x97\x5d\x76\x29\xb9\x42\x8e\x3f\xfe\xe3\x3f\xc5\x71\x5d\xde\xf8\ \x86\x2b\xbf\xe3\xeb\x7c\x2f\xc2\x79\xa9\x75\x2f\x85\xbe\x2f\xf5\ \x37\xb1\x09\xb2\xb4\x1f\x4f\xb5\x9f\xe7\x79\x38\x8e\xe3\x03\x05\ \xa5\x54\xab\x7b\x2c\x40\xa6\x1c\x20\x3d\x11\x80\x92\xa7\x10\x8a\ \x71\x1b\xab\x56\xfd\xe5\x5f\x7d\x8a\x4d\x1b\x37\x71\xf5\x55\x6f\ \xe2\xa9\x27\x9e\xe0\x9b\xdf\xfa\x16\xb7\xde\x7a\x2b\xaf\x7b\xdd\ \xeb\x38\x73\xeb\x99\x6c\xde\xb4\x99\xde\xde\x0a\x85\x42\x61\x95\ \xf0\x85\x90\xb0\xea\x5c\x2f\x65\x39\xdf\xc9\x3e\x27\xef\xf7\xe2\ \xe6\xba\x2e\x63\x63\x63\xf4\xf5\xf5\x51\x2e\x97\xe9\xed\xed\xe5\ \xf0\xe1\xc3\xf8\xbe\xcf\xec\xec\x2c\x5f\xba\xe1\x4b\x84\x61\xc8\ \xe5\x57\x5c\xc1\xaf\xfe\xca\x87\xf9\xc3\x3f\xfa\x13\x80\x97\x51\ \x82\x6e\xdf\x9a\xae\xd2\x20\x3a\x77\xd3
codeparrot/github-code-clean
""" handhRL Hulks and Horrors: The Roguelike Copyright 2014 by John S. Berry III This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import math import textwrap import shelve import os import operator import random import hhmessage import libtcodpy as libtcod import hhtable SCREEN_WIDTH = 80 SCREEN_HEIGHT = 50 MAP_WIDTH = 80 MAP_HEIGHT = 43 LIMIT_FPS = 20 ROOM_MAX_SIZE = 10 ROOM_MIN_SIZE = 6 MAX_ROOMS = 30 FOV_ALGO = 0 FOV_LIGHT_WALLS = True TORCH_RADIUS = 10 BAR_WIDTH = 20 PANEL_HEIGHT = 7 PANEL_Y = SCREEN_HEIGHT - PANEL_HEIGHT MSG_X = BAR_WIDTH + 2 MSG_WIDTH = SCREEN_WIDTH - BAR_WIDTH - 2 MSG_HEIGHT = PANEL_HEIGHT - 1 INVENTORY_WIDTH = 50 HEAL_AMOUNT = [1, 4] LIGHTNING_DAMAGE = [2, 12] LIGHTNING_RANGE = 5 CONFUSE_NUM_TURNS = 10 CONFUSE_RANGE = 8 FIREBALL_DAMAGE = [1, 6] FIREBALL_RADIUS = 3 LEVEL_UP_BASE = 300 LEVEL_UP_FACTOR = 200 color_dark_wall = libtcod.Color(128, 128, 128) color_light_wall = libtcod.Color(130, 110, 50) color_dark_ground = libtcod.Color(192, 192, 192) color_light_ground = libtcod.Color(200, 180, 50) class Tile: # a tile of the map and its properties def __init__(self, blocked, block_sight=None): self.blocked = blocked # all tiles start unexplored self.explored = False # by default, if a tile is blocked, it also blocks sight if block_sight is None: block_sight = blocked self.block_sight = block_sight class Rect: # a rectangle on the map. used to characterize a room def __init__(self, x, y, w, h): self.x1 = x self.y1 = y self.x2 = x + w self.y2 = y + h def center(self): center_x = (self.x1 + self.x2) / 2 center_y = (self.y1 + self.y2) / 2 return center_x, center_y def intersect(self, other): # return true if rectangle intersects with another one return self.x1 <= other.x2 and self.x2 >= other.x1 and self.y1 <= other.y2 and self.y2 >= other.y1 class Object: # this is a generic object: the player, a monster, an item, the stairs... # it's always represented by a character on the screen. def __init__(self, x, y, char, name, color, blocks=False, always_visible=False, fighter=None, ai=None, item=None, equipment=None, placeable=None, seen_player=False, killed_by=None): self.x = x self.y = y self.char = char self.name = name self.color = color self.blocks = blocks self.always_visible = always_visible self.fighter = fighter if self.fighter: self.fighter.owner = self self.ai = ai if self.ai: self.ai.owner = self self.item = item if self.item: self.item.owner = self self.equipment = equipment if self.equipment: # let the equipment component know who owns it self.equipment.owner = self # there must be an item component for the equipment component to work properly self.item = Item() self.item.owner = self self.placeable = placeable if self.placeable: self.placeable.owner = self self.seen_player = seen_player self.killed_by = killed_by def move(self, dx, dy): # move by the given amount if not is_blocked(self.x + dx, self.y + dy): self.x += dx self.y += dy def draw(self): # set the color and then draw the character that represents this object at its position if libtcod.map_is_in_fov(fov_map, self.x, self.y) or (self.always_visible and map[self.x][self.y].explored): libtcod.console_set_default_foreground(con, self.color) libtcod.console_put_char(con, self.x, self.y, self.char, libtcod.BKGND_NONE) def clear(self): # erase the character that represents this object libtcod.console_put_char(con, self.x, self.y, ' ', libtcod.BKGND_NONE) def move_towards(self, target_x, target_y): # create and compute a path for the object to the target path = libtcod.path_new_using_map(fov_map) libtcod.path_compute(path, self.x, self.y, target_x, target_y) # get the target coords of the next spot on the path mx, my, = libtcod.path_walk(path, True) if mx is not None: dx = mx - self.x dy = my - self.y self.move(dx, dy) libtcod.path_delete(path) else: libtcod.path_delete(path) return def distance(self, x, y): # return the distance to some coordinates return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2) def distance_to(self, other): # return the distance to another object dx = other.x - self.x dy = other.y - self.y return math.sqrt(dx ** 2 + dy ** 2) def send_to_back(self): # make this object be drawn first, so all others appear above it if they're in the same tile global objects objects.remove(self) objects.insert(0, self) class Item: # an item that can be picked up and used. def __init__(self, reusable=False, uses=1, use_function=None): self.use_function = use_function self.reusable = reusable self.uses = uses if self.use_function: self.use_function.owner = self def use(self, *args): # just call the use_function if it is defined if self.use_function is None and not self.owner.equipment: message('The ' + self.owner.name + ' cannot be used.') elif self.owner.equipment: # special case: if object has equipment component, the use option is to equip/dequip self.owner.equipment.toggle_equip() return elif not self.reusable: if self.use_function.use(*args) != 'cancelled': inventory.remove(self.owner) # destroy after use unless cancelled else: if self.use_function.use(*args) != 'cancelled': self.uses -= 1 if self.uses < 1: inventory.remove(self.owner) def pick_up(self): # add to the player's inventory and remove from the map if len(inventory) >= 26: message('Your inventory is full, cannot pick up ' + self.owner.name + '.', libtcod.red) else: inventory.append(self.owner) objects.remove(self.owner) message('You picked up a ' + self.owner.name + '!', libtcod.green) # special case: automatically equip, if corresponding slot is unused equipment = self.owner.equipment if equipment and get_equipped_in_slot(equipment.slot) is None: equipment.equip() def drop(self): # special case: if equipped item, remove before dropping if self.owner.equipment: self.owner.equipment.dequip() # add to the map and remove from inventory. also, place at player coords objects.append(self.owner) inventory.remove(self.owner) self.owner.x = player.x self.owner.y = player.y message('You dropped a ' + self.owner.name + '.', libtcod.yellow) class Equipment: # an object that can be equipped, yielding bonuses. automatically adds the item component. def __init__(self, slot, to_hit_bonus=0, damage_bonus=0, damage_roll=None, armor_bonus=0, max_hp_bonus=0, ranged=False, ammo=None): self.to_hit_bonus = to_hit_bonus self.damage_bonus = damage_bonus self.damage_roll = damage_roll self.armor_bonus = armor_bonus self.max_hp_bonus = max_hp_bonus self.slot = slot self.is_equipped = False self.ranged = ranged self.ammo = ammo def toggle_equip(self): # toggle equip/dequip state if self.is_equipped: self.dequip() else: self.equip() def equip(self): # if the slot is already being used, dequip whatever is there first old_equipment = get_equipped_in_slot(self.slot) if old_equipment is not None: old_equipment.dequip() # equip an object and show a message about it self.is_equipped = True message('Equipped ' + self.owner.name + ' on ' + self.slot + '.', libtcod.light_green) def dequip(self): # dequip object and show a message about it. if not self.is_equipped: return self.is_equipped = False message('Dequipped ' + self.owner.name + ' from ' + self.slot + '.', libtcod.light_yellow) class Placeable: # a class for 'placeables', interactive world objects that may be usable. def __init__(self, reusable=False, used=False, use_class=None): self.reusable = reusable self.used = used self.use_class = use_class if self.use_class: self.use_class.owner = self def use(self, *args): # interact with the object # just call the use_function if it is defined if self.use_class is None: message('The ' + self.owner.name + ' cannot be used.') if self.used and not self.reusable: message('You have already used that object!') else: if self.use_class.use(*args) != 'cancelled': self.used = True # disable after use unless cancelled class Fighter: # combat-related properties and methods (monster, player, npc) def __init__(self, hp, armor_class, to_hit, damage, damage_roll, xp, damage_resistance=0, kills=0, death_function=None): self.base_max_hp = hp self.hp = hp self.base_armor_class = armor_class self.base_to_hit = to_hit self.base_damage = damage self.base_roll = damage_roll self.xp = xp self.damage_resistance = damage_resistance self.kills = kills self.death_function = death_function @property def to_hit(self): bonus = sum(equipment.to_hit_bonus for equipment in get_all_equipped(self.owner)) return self.base_to_hit + bonus @property def armor_class(self): # return actual defense, by summing up the bonuses from all equipped items bonus = sum(equipment.armor_bonus for equipment in get_all_equipped(self.owner)) if bonus < -12: bonus = -12 return self.base_armor_class + bonus @property def damage(self): # return actual damage bonus, plus any special bonuses bonus = sum(equipment.damage_bonus for equipment in get_all_equipped(self.owner)) return self.base_damage + bonus @property def damage_roll(self): # return current damage roll or roll from equipment for equipment in get_all_equipped(self.owner): if equipment.damage_roll: return equipment.damage_roll return self.base_roll @property def max_hp(self): # return actual max_hp, by summing up the bonuses from all equipped items bonus = sum(equipment.max_hp_bonus for equipment in get_all_equipped(self.owner)) return self.base_max_hp + bonus def take_damage(self, damage, killed_by): # apply damage if possible if damage > 0: self.hp -= damage # check for death. if there's a death function, call it, and update 'killed_by' to name of attacker if self.hp <= 0: function = self.death_function if function is not None: self.owner.killed_by = killed_by function(self.owner) if self.owner != player: # yield xp to player player.fighter.xp += self.xp player.fighter.kills += 1 def heal(self, amount): # heal by the given amount, without going over max_hp self.hp += amount if self.hp > self.max_hp: self.hp = self.max_hp def attack(self, target): # first check for to hit target, capped at 2 to 20 to_hit_target = self.to_hit + target.fighter.armor_class + 5 if to_hit_target > 20: to_hit_target = 20 elif to_hit_target == 1: to_hit_target = 2 # check of the target is attacking with a gun has_gun = False for i in get_all_equipped(self.owner): if i.is_equipped and i.ranged: has_gun = True gun = i # check if gun has ammo if has_gun: if gun.ammo > 0: gun.ammo -= 1 else: message("You don't have any ammo!") return # use the right pronoun if target.ai is not None: pronoun = 'the ' else: pronoun = '' # roll to hit if hhtable.rolldice(1, 20) >= to_hit_target: message(self.owner.name.title() + ' misses ' + pronoun + target.name + '.') return # now roll for damage (curr. using OD&D style) damage = (hhtable.rolldice(*self.damage_roll) + self.damage) - target.fighter.damage_resistance if damage > 0: # make the target take some damage message(self.owner.name.title() + ' hits ' + pronoun + target.name + ' for ' + str(damage) + ' hit points.', libtcod.yellow) target.fighter.take_damage(damage, self.owner.name) else: message(self.owner.name.title() + ' hits ' + pronoun + target.name + ' but it has no effect!', libtcod.grey) def shoot(self): # first check if the character is equipped with a ranged weapon has_gun = False for i in get_all_equipped(self.owner): if i.is_equipped and i.ranged: has_gun = True gun = i if not has_gun: message("You're not carrying a gun!", libtcod.red) return # check if the gun has ammo if gun.ammo is None or gun.ammo < 1: message("You're out of ammo in that gun!", libtcod.red) return # target a monster message('Left-click on a target monster, or right-click to cancel.') target = target_monster() if not target: return # calculate to-hit to_hit_target = self.to_hit + target.fighter.armor_class + 5 if to_hit_target > 20: to_hit_target = 20 elif to_hit_target == 1: to_hit_target = 2 # deduct ammo gun.ammo -= 1 # roll to hit if hhtable.rolldice(1, 20) >= to_hit_target: message(self.owner.name.title() + ' misses the ' + target.name + '.') return # now roll for damage (curr. using OD&D style) damage = (hhtable.rolldice(*self.damage_roll) + gun.damage_bonus) - target.fighter.damage_resistance if damage > 0: # make the target take some damage message(self.owner.name.title() + ' hits the ' + target.name + ' for ' + str(damage) + ' hit points.', libtcod.yellow) target.fighter.take_damage(damage, self.owner.name) else: message(self.owner.name.title() + ' hits the ' + target.name + ' but it has no effect!', libtcod.grey) class BasicMonster: # AI for a basic monster def __init__(self): pass def take_turn(self): # a basic monster takes its turn. If you can see it, it can see you monster = self.owner if libtcod.map_is_in_fov(fov_map, monster.x, monster.y): monster.seen_player = True if monster.seen_player: # move towards the player if far away if 2 <= monster.distance_to(player) <= 10: monster.move_towards(player.x, player.y) # close enough, attack! elif player.fighter.hp > 0: monster.fighter.attack(player) class FriendlyMonster: def __init__(self, max_range=10): self.max_range = max_range def take_turn(self): # a monster that protects the player and attacks other monsters monster = self.owner enemy = closest_monster(self.max_range, (monster, player)) if enemy is not None: message(self.owner.name + ' is targeting ' + enemy.name) if 2 <= monster.distance_to(enemy) <= self.max_range: monster.move_towards(enemy.x, enemy.y) elif enemy.fighter.hp > 0: monster.fighter.attack(enemy) else: monster.move_towards(player.x, player.y) class ConfusedMonster: # AI for a temporarily confused monster (reverts to normal AI after a while) def __init__(self, old_ai, num_turns=CONFUSE_NUM_TURNS): self.old_ai = old_ai self.num_turns = num_turns def take_turn(self): if self.num_turns > 0: # still confused # move in random direction and decrease confuse duration self.owner.move(libtcod.random_get_int(0, -1, 1), libtcod.random_get_int(0, -1, 1)) self.num_turns -= 1 else: # restore previous AI self.owner.ai = self.old_ai message('The ' + self.owner.name + ' is no longer confused!', libtcod.red) class Heal: # generic process for healing items def __init__(self, dice=HEAL_AMOUNT, max_boost=False, heal_all=False): self.dice = dice self.max_boost = max_boost self.heal_all = heal_all def use(self): # heal the player if player.fighter.hp == player.fighter.max_hp: message('You are already at full health.', libtcod.red) return 'cancelled' if self.heal_all: heal_roll = player.fighter.max_hp else: heal_roll = hhtable.rolldice(*self.dice) message('Your pain subsides, for now. You restore ' + str(heal_roll) + ' hit points.', libtcod.light_violet) player.fighter.heal(heal_roll) class Buff: # generic process for items which permanently improve stats def __init__(self, max_hp=0, to_hit=0, damage=0, ac=0, xp=0, dr=0, desc=None): self.max_hp = max_hp self.to_hit = to_hit self.damage = damage self.ac = ac self.xp = xp self.dr = dr self.desc = desc def use(self): # apply all bonuses from the item player.fighter.base_max_hp += self.max_hp player.fighter.base_to_hit += self.to_hit player.fighter.base_damage += self.damage player.fighter.base_armor_class += self.ac player.fighter.xp += self.xp player.fighter.damage_resistance += self.dr if self.desc is None: message('A rush flows through you, and you feel improved!') else: message(self.desc) class RandomDamage: # generic process for items that damage a random target def __init__(self, damage=LIGHTNING_DAMAGE, attack_range=LIGHTNING_RANGE): self.damage = damage self.attack_range = attack_range def use(self): # find closest enemy inside max range and damage it monster = closest_monster(self.attack_range, [player]) if monster is None: # no enemy found within range message('No enemy is within arc range.') return 'cancelled' # zap it! damage = hhtable.rolldice(*self.damage) message('A bolt of electricity arcs into the ' + monster.name + ' with a loud ZZZAP! The damage is ' + str( damage) + ' hit points.', libtcod.light_blue) monster.fighter.take_damage(damage, 'electrical discharge') class Grenade: # generic grenade throw function def __init__(self, damage=FIREBALL_DAMAGE, radius=FIREBALL_RADIUS, radius_damage=FIREBALL_DAMAGE, kills=False, kills_radius=False): self.damage = damage self.radius = radius self.radius_damage = radius_damage self.kills = kills self.kills_radius = kills_radius def use(self): # ask the player for a target tile to throw a 'fireball' at (ie. grenade, AOE, etc) message('Left-click a target tile, or right-click to cancel.', libtcod.light_cyan) (x, y) = target_tile() if x is None: return 'cancelled' message('The device explodes, striking everything within ' + str(self.radius) + ' tiles!', libtcod.orange) for obj in objects: # damage every fighter in range, including the player if obj.distance(x, y) == 0 and obj.fighter: if not self.kills: damage_rolled = hhtable.rolldice(*self.damage) else: damage_rolled = obj.fighter.hp message(obj.name.capitalize() + ' is at ground zero! Takes ' + str(damage_rolled) + ' hit points.', libtcod.orange) obj.fighter.take_damage(damage_rolled, 'own grenade') elif obj.distance(x, y) <= self.radius and obj.fighter: if not self.kills_radius: damage_rolled = hhtable.rolldice(*self.radius_damage) else: damage_rolled = obj.fighter.hp message(obj.name.capitalize() + ' takes blast damage for ' + str(damage_rolled) + ' hit points.', libtcod.orange) obj.fighter.take_damage(damage_rolled, 'own grenade') class Confuse: # generic class for confusion items def __init__(self, duration=CONFUSE_NUM_TURNS, attackrange=CONFUSE_RANGE): self.duration = duration self.attackrange = attackrange def use(self): # ask for target and confuse it message('Left-click an enemy to confuse it, or right-click to cancel.', libtcod.light_cyan) monster = target_monster(self.attackrange) if monster is None: return 'cancelled' old_ai = monster.ai monster.ai = ConfusedMonster(old_ai, num_turns=self.duration) monster.ai.owner = monster # tell the new component who owns it message('The eyes of the ' + monster.name + ' look vacant, as he starts to stumble around!', libtcod.light_green) class Detector: # generic class for a device that detects monster presences def __init__(self, detect_range=None): self.detect_range = detect_range def use(self): # flag all monsters within range as always_visible (or all monsters on map if detect_range=None) message('The machine goes "Ping!"') for obj in objects: if obj.fighter and self.detect_range is None: obj.always_visible = True elif obj.fighter and obj.distance(player.x, player.y) <= self.detect_range: obj.always_visible = True class Summon: # summon a friendly monster def __init__(self, name, hitdice, color): self.name = name self.hitdice = hitdice self.color = color def use(self): x = player.x y = player.y summon = get_monster_from_hitdice(x, y, self.name, self.hitdice, self.color, friendly=True) objects.append(summon) class Terminal: def __init__(self, type=None): self.type = type if self.type is None: self.type = random.choice(['log','hint']) def use(self): # get a random creepy message if self.type == 'log': hhmessage.creep_log() if self.type == 'hint': hhmessage.hint_message() class RestPod: def __init__(self, heal_amount=(1, 6), heal_bonus=0): self.heal_bonus = heal_bonus self.heal_amount = heal_amount def use(self): # heal the player if player.fighter.hp == player.fighter.max_hp: message('You are already at full health.', libtcod.red) return 'cancelled' heal_roll = hhtable.rolldice(*self.heal_amount) + self.heal_bonus message('You relax inside the metal cocoon. You restore ' + str(heal_roll) + ' hit points.', libtcod.light_violet) player.fighter.heal(heal_roll) class Teleporter: def __init__(self, new_level=None): self.new_level = new_level if self.new_level is None: self.new_level = libtcod.random_get_int(0, 1, 12) def use(self): global dungeon_level message('You feel a sudden jolt and find yourself staring at a completely different room.', libtcod.red) dungeon_level = self.new_level make_map() initialize_fov() def main_menu(firstrun=False): # The main game menu. img = hhmessage.generate_starpic() while not libtcod.console_is_window_closed(): # show the background image, at twice the regular resolution libtcod.image_blit_2x(img, 0, 0, 0) # show the game title and credits! libtcod.console_set_default_foreground(0, libtcod.light_yellow) libtcod.console_print_ex(0, SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2 - 4, libtcod.BKGND_NONE, libtcod.CENTER, 'HULKS AND HORRORS\nThe Roguelike') libtcod.console_print_ex(0, SCREEN_WIDTH / 2, SCREEN_HEIGHT - 2, libtcod.BKGND_NONE, libtcod.CENTER, '(c) 2014 by John \'jarcane\' Berry') # Change menu options to match state of 'savegame' if os.path.isfile('savegame'): newopt = 'Overwrite current save' else: newopt = 'Play a new game' # show options and wait for the player's choice choice = menu('', [newopt, 'Continue last save', 'Display high scores', 'Quit'], 26) if choice == 0: new_game(firstrun) firstrun = False play_game() if choice == 1: try: load_game() except: msgbox('\n No saved game to load.\n', 24) continue play_game() elif choice == 2: try: show_scores() except: msgbox('\n No high scores yet!\n', 24) continue elif choice == 3: break def new_game(firstrun=False): global player, inventory, game_msgs, game_state, dungeon_level # play intro sequence if starting up if firstrun: hhmessage.intro_sequence() # create Player object # Assume Soldier class with 10 STR, 10 DEX, 10 CON fighter_component = Fighter(hp=hhtable.rolldice(3, 6) + hhtable.rolldice(1, 10), armor_class=10, to_hit=1, damage=1, damage_roll=[1, 3], xp=0, death_function=player_death) player = Object(0, 0, chr(1), get_text_entry('What is your name, Ensign?', hhmessage.generate_screen()), libtcod.white, blocks=True, fighter=fighter_component) player.level = 1 # generate map dungeon_level = 1 make_map() initialize_fov() game_state = 'playing' inventory = [] # create the list of game messages and their colors, starts empty game_msgs = [] # a warm welcoming message! message('You awaken in a damp cave beneath the surface of Gamma Crionis IV. The ground rumbles beneath you.', libtcod.red) # initial equipment: a knife equipment_component = Equipment(slot='right hand', damage_roll=[1, 4]) obj = Object(0, 0, '-', 'combat knife', libtcod.sky, equipment=equipment_component) inventory.append(obj) equipment_component.equip() obj.always_visible = True def initialize_fov(): global fov_recompute, fov_map fov_recompute = True libtcod.console_clear(con) # unexplored areas start black # create the FOV map according to the generated map fov_map = libtcod.map_new(MAP_WIDTH, MAP_HEIGHT) for y in range(MAP_HEIGHT): for x in range(MAP_WIDTH): libtcod.map_set_properties(fov_map, x, y, not map[x][y].block_sight, not map[x][y].blocked) def play_game(): player_action = None mouse = libtcod.Mouse() key = libtcod.Key() while not libtcod.console_is_window_closed(): # render the screen libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse) render_all() libtcod.console_flush() check_level_up() # erase all objects at old locations before they move for object in objects: object.clear() # handle keys and exit game if needed player_action = handle_keys(key, mouse) if game_state == 'dead': try: os.remove('savegame') except: break elif player_action == 'exit': save_game() break # let monsters take their turn if game_state == 'playing' and player_action != 'didnt-take-turn': for object in objects: if object.ai: object.ai.take_turn() def save_game(): # open a new empty shelve (possibly rewriting old one) to write the game data file = shelve.open('savegame', 'n') file['map'] = map file['objects'] = objects file['player_index'] = objects.index(player) file['inventory'] = inventory file['game_msgs'] = game_msgs file['game_state'] = game_state file['stairs_index'] = objects.index(stairs) file['dungeon_level'] = dungeon_level file.close() def load_game(): # open the previous saved shelve and load the game data global map, objects, player, inventory, game_msgs, game_state, stairs, dungeon_level file = shelve.open('savegame', 'r') map = file['map'] objects = file['objects'] player = objects[file['player_index']] # get index of player in objects list and access it inventory = file['inventory'] game_msgs = file['game_msgs'] game_state = file['game_state'] stairs = objects[file['stairs_index']] dungeon_level = file['dungeon_level'] file.close() initialize_fov() def new_score(player): # generate a new score from player and dungeon_level, save it to file, then ask to display it. score = player.fighter.kills * player.level * dungeon_level score_data = [score, player.name.title(), player.killed_by, str(dungeon_level)] scores = shelve.open('scorefile', 'c', writeback=True) if 'scores' in scores: list = scores['scores'] list.append(score_data) scores['scores'] = list else: new_list = [score_data] scores['scores'] = new_list scores.close() choice = menu('Game Over\n', ['See your score', 'Return to main menu'], 22) if choice == 0: show_scores() def show_scores(): # load the score file, sort the list by score, then display score_file = shelve.open('scorefile', 'r') scores = score_file['scores'] scores.sort(key=operator.itemgetter(0), reverse=True) score_list = ['High Scores'] c = 0 for i in scores: n_score = '{0: >3}'.format(str(c + 1)) + '. ' + '{0: >5}'.format(str(scores[c][0])) + ' ' + scores[c][1] n_score += ', killed by ' + scores[c][2] + ' on level ' + scores[c][3] score_list.append(n_score) c += 1 if c > 10: break score_file.close() hhmessage.show_text_log(score_list, hhmessage.generate_starpic(), delay=False, center_first_line=True) def end_game(): ending = [ '*INITIATE COMM SEQUENCE EMERGENCY ALPHA-0x1*', 'Calling Guild Post Alpha Ceti.', 'Come in Guild Post Alpha Ceti.', 'This is the last survivor of the Ark-1.', 'Requesting immediate evacuation.', 'Please respond.', 'Can anyone hear me?', '... Is there anybody out there?', '...', '*silence*' ] hhmessage.show_text_log(ending, hhmessage.generate_starpic()) os.remove('savegame') main_menu() def handle_keys(key, mouse): if key.vk == libtcod.KEY_ENTER and key.lalt: # Alt+Enter: toggle fullscreen libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen()) elif key.vk == libtcod.KEY_ESCAPE: return 'exit' # exit game # movement keys if game_state == 'playing': if key.vk == libtcod.KEY_UP or key.vk == libtcod.KEY_KP8: player_move_or_attack(0, -1) elif key.vk == libtcod.KEY_DOWN or key.vk == libtcod.KEY_KP2: player_move_or_attack(0, 1) elif key.vk == libtcod.KEY_LEFT or key.vk == libtcod.KEY_KP4: player_move_or_attack(-1, 0) elif key.vk == libtcod.KEY_RIGHT or key.vk == libtcod.KEY_KP6: player_move_or_attack(1, 0) elif key.vk == libtcod.KEY_HOME or key.vk == libtcod.KEY_KP7: player_move_or_attack(-1, -1) elif key.vk == libtcod.KEY_PAGEUP or key.vk == libtcod.KEY_KP9: player_move_or_attack(1, -1) elif key.vk == libtcod.KEY_END or key.vk == libtcod.KEY_KP1: player_move_or_attack(-1, 1) elif key.vk == libtcod.KEY_PAGEDOWN or key.vk == libtcod.KEY_KP3: player_move_or_attack(1, 1) elif key.vk == libtcod.KEY_KP5 or key.vk == libtcod.KEY_SPACE: pass # do nothing ie wait for the monster to come to you else: # test for other keys key_char = chr(key.c) if key_char == 'a': has_gun = False for i in get_all_equipped(player): if i.is_equipped and i.ranged: has_gun = True gun = i if has_gun: message(gun.owner.name.capitalize() + ' has ' + str(gun.ammo) + ' shots remaining.') if key_char == 's': # shoot at someone player.fighter.shoot() # remove the target from the map until the next redraw for object in objects: object.clear() return if key_char == 'g': # pick up an item for object in objects: if object.x == player.x and object.y == player.y and object.item: object.item.pick_up() break if key_char == 'u': # use a placeable if present for object in objects: if object.x == player.x and object.y == player.y and object.placeable: object.placeable.use() break if key_char == 'i': # show the inventory chosen_item = inventory_menu('Press the key next to an item to use it, or any other to cancel.\n') if chosen_item is not None: chosen_item.use() if key_char == 'd': # show inventory, if an item is selected, drop it chosen_item = inventory_menu('Press the key next to an item to drop it, or any other to cancel.\n') if chosen_item is not None: chosen_item.drop() if key_char == '<': # go down stairs, if the player is on them if stairs.x == player.x and stairs.y == player.y: next_level() if key_char == 'c': # show character information level_up_xp = LEVEL_UP_BASE + (player.level * LEVEL_UP_FACTOR) try: highest = 'H' + str(player.fighter.damage_roll[2]) except: highest = '' hhmessage.show_text_log([ 'Character Information', 'Name: ' + player.name, 'Level: ' + str(player.level), 'Experience: ' + str(player.fighter.xp), 'Experience to level up: ' + str(level_up_xp), 'Maximum HP: ' + str(player.fighter.max_hp), 'AC: ' + str(player.fighter.armor_class), 'DR: ' + str(player.fighter.damage_resistance), 'To-hit: +' + str(player.fighter.to_hit), 'Damage Bonus: +' + str(player.fighter.damage), 'Damage Roll: ' + str(player.fighter.damage_roll[0]) + 'd' + str( player.fighter.damage_roll[1]) + highest, ], hhmessage.generate_screen(), delay=False) if key_char == 'h' or key_char == '?': hhmessage.help_screen() return 'didnt-take-turn' def target_tile(max_range=None): # return the position of a tile left-clicked in player FOV (optionally in a range) # or return (None,None) if right clicked key = libtcod.Key() mouse = libtcod.Mouse() while True: # render the screen. this raises the inventory and shows the names of objects under the mouse libtcod.console_flush() libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse) render_all() (x, y) = (mouse.cx, mouse.cy) if (mouse.lbutton_pressed and libtcod.map_is_in_fov(fov_map, x, y) and ( max_range is None or player.distance(x, y) <= max_range)): return x, y if mouse.rbutton_pressed or key.vk == libtcod.KEY_ESCAPE: return None, None # cancel on ESC or right clicked def target_monster(max_range=None): # returns a clicked monster inside FOV up to a range, or None if right-clicked while True: (x, y) = target_tile(max_range) if x is None: # player cancelled return None # return first clicked monster, otherwise keep looping for obj in objects: if obj.x == x and obj.y == y and obj.fighter and obj != player: return obj def get_names_under_mouse(): key = libtcod.Key() mouse = libtcod.Mouse() libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse) # return a string with the names of all objects under the mouse (x, y) = (mouse.cx, mouse.cy) # create a list with the names of all objects at the mouse's coordinates within FOV names = [obj.name for obj in objects if obj.x == x and obj.y == y and libtcod.map_is_in_fov(fov_map, obj.x, obj.y)] if names: names = ', '.join(names) # join the names, seperated by commas names = 'Under mouse: ' + names else: names = '' return names.title() def get_names_under_player(): names = [obj.name for obj in objects if obj.x == player.x and obj.y == player.y and obj.name != player.name] if names: names = ', '.join(names) # join the names, seperated by commas names = 'Under player: ' + names else: names = '' return names.title() def get_text_entry(header, img): timer = 0 command = "" cursor = 0 x = SCREEN_HEIGHT / 3 y = (SCREEN_HEIGHT / 4) + 2 libtcod.image_blit_2x(img, 0, 0, 0) libtcod.console_set_default_foreground(0, libtcod.green) libtcod.console_print_ex(0, SCREEN_WIDTH / 4, SCREEN_HEIGHT / 4, libtcod.BKGND_NONE, libtcod.LEFT, header) while not libtcod.console_is_window_closed(): key = libtcod.console_check_for_keypress(libtcod.KEY_PRESSED) timer += 1 if timer % (LIMIT_FPS // 4) == 0: if timer % (LIMIT_FPS // 2) == 0: timer = 0 libtcod.console_set_char(0, cursor + x, y, "_") libtcod.console_set_char_foreground(0, cursor + x, y, libtcod.white) else: libtcod.console_set_char(0, cursor + x, y, " ") libtcod.console_set_char_foreground(0, cursor + x, y, libtcod.white) if key.vk == libtcod.KEY_BACKSPACE and cursor > 0: libtcod.console_set_char(0, cursor + x, y, " ") libtcod.console_set_char_foreground(0, cursor + x, y, libtcod.white) command = command[:-1] cursor -= 1 elif key.vk == libtcod.KEY_ENTER: break elif key.vk == libtcod.KEY_ESCAPE: command = "" break elif key.c > 0: letter = chr(key.c) libtcod.console_set_char(0, cursor + x, y, letter) # print new character at appropriate position on screen libtcod.console_set_char_foreground(0, cursor + x, y, libtcod.white) # make it white or something command += letter # add to the string cursor += 1 libtcod.console_flush() return command def player_move_or_attack(dx, dy): global fov_recompute global objects # the coordinates the player is moving to/attacking x = player.x + dx y = player.y + dy # try to find an attackable object there target = None for object in objects: friendly = isinstance(object.ai, FriendlyMonster) if object.fighter and not friendly and object.x == x and object.y == y: target = object break # attack if target found, move otherwise if target is not None: player.fighter.attack(target) else: player.move(dx, dy) fov_recompute = True def create_room(room): global map # go through the tiles in the rectangle and make them passable for x in range(room.x1 + 1, room.x2): for y in range(room.y1 + 1, room.y2): map[x][y].blocked = False map[x][y].block_sight = False def random_choice(chances_dict): # choose one option from dictionary of chances, returning its key chances = chances_dict.values() strings = chances_dict.keys() return strings[random_choice_index(chances)] def random_choice_index(chances): # choose one option from a list of chances and return its index dice = libtcod.random_get_int(0, 1, sum(chances)) # go through all chances, keep sum so far running_sum = 0 choice = 0 for w in chances: running_sum += w # see if the dice landed in the part that corresponds to this choice if dice <= running_sum: return choice choice += 1 def from_dungeon_level(table): # returns a value that depends on level. the table specifies what value occurs after each level, default is 0 for (value, level) in reversed(table): if dungeon_level >= level: return value return 0 def from_player_level(table): # returns a value dependent on level. Table specifies what value occurs after each level, default is 0 for (value, level) in reversed(table): if player.level >= level: return value return 0 def get_monster_from_hitdice(x, y, name, hitdice, color, friendly=False): # generate monster object from number of hit dice # get tuple components num = hitdice[0] sides = hitdice[1] # determine to-hit from num and sides if sides == 12: to_hit = num elif 11 >= sides >= 8: to_hit = num / 2 else: to_hit = num / 3 # if sides >= 10, make letter a capital if sides >= 10: letter = name[0].capitalize() else: letter = name[0] # get number of damage dice from hitdice, making sure it's at least 1. if num / 2 < 1: roll = (1, sides) else: roll = (num / 2, sides) fighter_component = Fighter(hp=hhtable.rolldice(*hitdice), armor_class=10 - num, to_hit=to_hit, damage=0, damage_roll=roll, xp=num * sides * 5, death_function=monster_death) if friendly: ai_component = FriendlyMonster() else: ai_component = BasicMonster() monster = Object(x, y, letter, name, color, blocks=(not friendly), fighter=fighter_component, ai=ai_component) return monster def get_item(x, y): choice = random.choice(['heal', 'grenade', 'misc']) if choice == 'heal': # create a healing item heal_item = hhtable.make_heal_item() heal_component = Heal(dice=heal_item['roll'], heal_all=heal_item['heal_all']) item_component = Item(reusable=heal_item['reuse'], uses=heal_item['uses'], use_function=heal_component) item = Object(x, y, '!', heal_item['name'], libtcod.violet, item=item_component) elif choice == 'grenade': # create a grenade grenade = hhtable.make_grenade() grenade_component = Grenade(damage=grenade['damage'], radius=grenade['radius'], radius_damage=grenade['radius_damage'], kills=grenade['kills'], kills_radius=grenade['kills_radius']) item_component = Item(use_function=grenade_component) item = Object(x, y, '*', grenade['name'], libtcod.light_yellow, item=item_component) elif choice == 'misc': subchoice = random.choice(['confuse', 'buff', 'random_damage', 'detector', 'summon', 'vector']) if subchoice == 'random_damage': # create an arc lightning device random_damage_component = RandomDamage() item_component = Item(use_function=random_damage_component) item = Object(x, y, '#', 'Tesla arc device', libtcod.light_yellow, item=item_component) elif subchoice == 'confuse': # create a confuse item confuse_component = Confuse() item_component = Item(use_function=confuse_component) item = Object(x, y, '#', 'neural scrambler', libtcod.light_yellow, item=item_component) elif subchoice == 'buff': # create a buff item buff = hhtable.make_buff() buff_component = Buff(*buff['args']) item_component = Item(use_function=buff_component) item = Object(x, y, chr(167), buff['name'], libtcod.dark_magenta, item=item_component) elif subchoice == 'detector': # create a motion tracker detector_component = Detector(detect_range=10) item_component = Item(reusable=True, uses=hhtable.rolldice(1, 3), use_function=detector_component) item = Object(x, y, '#', 'motion tracker', libtcod.light_yellow, item=item_component) elif subchoice == 'summon': # create a friendly summonable monster summon_component = Summon(name='TED-3', hitdice=(4, 6), color=libtcod.sepia) item_component = Item(use_function=summon_component) item = Object(x, y, chr(12), 'TED-3', libtcod.sepia, item=item_component) elif subchoice == 'vector': # create the vector-jet harness harness = Equipment('back',armor_bonus=-1) item = Object(x, y, '%', 'vector-jet harness', libtcod.black, equipment=harness) return item def get_weapon(x, y): weapon = hhtable.make_weapon() equipment_component = Equipment(slot='right hand', damage_roll=weapon['damage'], to_hit_bonus=weapon['bonus'], damage_bonus=weapon['bonus'], ranged=weapon['gun'], ammo=weapon['ammo']) item = Object(x, y, weapon['char'], weapon['name'], libtcod.brass, equipment=equipment_component) return item def get_armor(x, y): armor = hhtable.make_armor() if armor['char'] == '[': armor_slot = 'shield' else: armor_slot = 'armor' equipment_component = Equipment(slot=armor_slot, armor_bonus=armor['ac'], damage_bonus=armor['str_bonus'], to_hit_bonus=armor['dex_bonus']) item = Object(x, y, armor['char'], armor['name'], libtcod.dark_gray, equipment=equipment_component) return item def get_placeable(x, y): type = random.choice(['terminal', 'restpod', 'teleporter']) if type == 'terminal': terminal = Terminal() placeable = Placeable(use_class=terminal) obj = Object(x, y, chr(127), 'terminal', libtcod.silver, placeable=placeable) elif type == 'restpod': restpod = RestPod(heal_bonus=dungeon_level) placeable = Placeable(use_class=restpod) obj = Object(x, y, chr(239), 'rest pod', libtcod.purple, placeable=placeable) elif type == 'teleporter': teleport = Teleporter() placeable = Placeable(use_class=teleport) obj = Object(x, y, chr(23), 'teleporter', libtcod.dark_blue, placeable=placeable) return obj def place_objects(room): # maximum number of monsters per room max_monsters = from_dungeon_level([[2, 1], [3, 4], [4, 6], [5, 8]]) # monster table # key = name # dict entries: # key[0]: dungeon level appearing # key[1]: list[name, hitdice tuple, color] monster_table = hhtable.make_monster_table(dungeon_level) # max number of items per room max_items = from_dungeon_level([[1, 1], [2, 4]]) # chance of each item # functions the same as the monster chances (weighted values, availability by level) # future revisions should break this down by type instead of individual item, resolving specific items in the # sub entries below. item_chances = {'item': 4, 'armor': 3, 'weapon': 3, 'placeable': 2} # choose random number of monsters num_monsters = libtcod.random_get_int(0, 0, max_monsters) for i in range(num_monsters): # choose random spot for this monster x = libtcod.random_get_int(0, room.x1 + 1, room.x2 - 1) y = libtcod.random_get_int(0, room.y1 + 1, room.y2 - 1) # only place it if the tile is not blocked if not is_blocked(x, y): # pick a monster, then check if it's valid for this dungeon level choice = random.choice(monster_table.keys()) monster = get_monster_from_hitdice(x, y, *monster_table[choice][1]) objects.append(monster) # choose a random number of items num_items = libtcod.random_get_int(0, 0, max_items) for i in range(num_items): # choose a random spot for the item x = libtcod.random_get_int(0, room.x1 + 1, room.x2 - 1) y = libtcod.random_get_int(0, room.y1 + 1, room.y2 - 1) # only place it if the tile is not blocked if not is_blocked(x, y): choice = random_choice(item_chances) if choice == 'item': item = get_item(x, y) elif choice == 'armor': item = get_armor(x, y) elif choice == 'weapon': item = get_weapon(x, y) elif choice == 'placeable': item = get_placeable(x, y) objects.append(item) item.send_to_back() # items appear below other objects def make_map(): global map, objects, stairs # the list of objects with just the player objects = [player] # fill map with "unblocked" tiles map = [[Tile(True) for y in range(MAP_HEIGHT)] for x in range(MAP_WIDTH)] # create two rooms rooms = [] num_rooms = 0 for r in range(MAX_ROOMS): # random width and height w = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE) h = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE) # random position without leaving map x = libtcod.random_get_int(0, 0, MAP_WIDTH - w - 1) y = libtcod.random_get_int(0, 0, MAP_HEIGHT - h - 1) # "Rect" class makes rectangles easier to work with new_room = Rect(x, y, w, h) # run through the other rooms and see if they intersect with this one failed = False for other_room in rooms: if new_room.intersect(other_room): failed = True break if not failed: # this means there are no intersections so the room is valid # "paint" it to the map's tiles' create_room(new_room) place_objects(new_room) # center coordinates of new_room, will be useful later (new_x, new_y) = new_room.center() # print "room number" onto room (optional, not included in sample code) # remove later if issues arise, but I think it looks cool and H&H-y # room_no = Object(new_x,new_y,chr(65+num_rooms), 'room number', libtcod.white, blocks=False) # objects.insert(0,room_no) if num_rooms == 0: # this is the first room, where the player starts at player.x = new_x player.y = new_y else: # all rooms after the first: # connect it to the previous room with a tunnel # center coordinates of previous room (prev_x, prev_y) = rooms[num_rooms - 1].center() if libtcod.random_get_int(0, 0, 1) == 1: # first move horizontally then vertically create_h_tunnel(prev_x, new_x, prev_y) create_v_tunnel(prev_y, new_y, new_x) else: # first move vertically then horizontally create_v_tunnel(prev_y, new_y, prev_x) create_h_tunnel(prev_x, new_x, new_y) # finally, append the new room to the list rooms.append(new_room) num_rooms += 1 # create stairs at the center of the last room stairs = Object(new_x, new_y, '<', 'stairs', libtcod.white, always_visible=True) objects.append(stairs) stairs.send_to_back() # so it draws below monsters def next_level(): global dungeon_level if dungeon_level >= 13: message('At last, you find an escape to the surface. You crawl up the narrow passage in search of rescue.', libtcod.yellow) end_game() # advance to the next level message('You take a moment to rest, and recover your strength.', libtcod.yellow) player.fighter.heal(player.fighter.max_hp / 2) # heal player by 50% message('After a rare moment of peace, you descend further into the cave.', libtcod.red) dungeon_level += 1 make_map() initialize_fov() def menu(header, options, width): if len(options) > 26: raise ValueError('Cannot have a menu with more than 26 options.') # calculate total height for the header (after auto wrap) and one line per option header_height = libtcod.console_get_height_rect(con, 0, 0, width, SCREEN_HEIGHT, header) if header == '': header_height = 0 height = len(options) + header_height # create an off-screen console that represents the menu's window window = libtcod.console_new(width, height) # print the header with auto-wrap libtcod.console_set_default_foreground(window, libtcod.white) libtcod.console_print_rect_ex(window, 0, 0, width, height, libtcod.BKGND_NONE, libtcod.LEFT, header) # print all the options y = header_height letter_index = ord('a') for option_text in options: text = '(' + chr(letter_index) + ')' + option_text libtcod.console_print_ex(window, 0, y, libtcod.BKGND_NONE, libtcod.LEFT, text) y += 1 letter_index += 1 # blit the contents of window to root console x = SCREEN_WIDTH / 2 - width / 2 y = SCREEN_HEIGHT / 2 - height / 2 libtcod.console_blit(window, 0, 0, width, height, 0, x, y, 1.0, 0.7) # present the root console to the player and wait for keypress libtcod.console_flush() input_valid = False while not input_valid: key = libtcod.console_wait_for_keypress(True) if key.pressed: key = libtcod.console_wait_for_keypress(False) if not key.pressed: input_valid = True if key.vk == libtcod.KEY_ENTER and key.lalt: # special case, have to check for alt+enter for fullscreen libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen()) # convert the ASCII code to an index; if it corresponds to an option, return it index = key.c - ord('a') if 0 <= index < len(options): return index return None def msgbox(text, width=50): menu(text, [], width) # use menu() as a sort of 'message box' def inventory_menu(header): # show a menu of each item in the inventory as an option if len(inventory) == 0: options = ['Inventory is empty.'] else: options = [] for item in inventory: text = item.name # show additional information if equipped if item.equipment and item.equipment.is_equipped: text = text + ' (on ' + item.equipment.slot + ')' options.append(text) index = menu(header, options, INVENTORY_WIDTH) # if an item was chosen, return it if index is None or len(inventory) == 0: return None return inventory[index].item def render_all(): global color_light_wall global color_light_ground global fov_recompute if fov_recompute: # recompute FOV if needed fov_recompute = False libtcod.map_compute_fov(fov_map, player.x, player.y, TORCH_RADIUS, FOV_LIGHT_WALLS, FOV_ALGO) # go through all tiles, and set their background color for y in range(MAP_HEIGHT): for x in range(MAP_WIDTH): visible = libtcod.map_is_in_fov(fov_map, x, y) wall = map[x][y].block_sight if not visible: # if it's not visible right now, the player can only see it if it's explored if map[x][y].explored: # it's out of the player FOV if wall: libtcod.console_set_char_background(con, x, y, color_dark_wall, libtcod.BKGND_SET) else: libtcod.console_set_char_background(con, x, y, color_dark_ground, libtcod.BKGND_SET) else: # it's visible if wall: libtcod.console_set_char_background(con, x, y, color_light_wall, libtcod.BKGND_SET) else: libtcod.console_set_char_background(con, x, y, color_light_ground, libtcod.BKGND_SET) map[x][y].explored = True # draw all objects in the list for object in objects: if object != player: object.draw() player.draw() # blit con to root console libtcod.console_blit(con, 0, 0, MAP_WIDTH, MAP_HEIGHT, 0, 0, 0) # prepare to render the GUI panel libtcod.console_set_default_background(panel, libtcod.black) libtcod.console_clear(panel) # print the game messages, one line at a time y = 1 for (line, color) in game_msgs: libtcod.console_set_default_foreground(panel, color) libtcod.console_print_ex(panel, MSG_X, y, libtcod.BKGND_NONE, libtcod.LEFT, line) y += 1 # show the player's stats level_up_xp = LEVEL_UP_BASE + (player.level * LEVEL_UP_FACTOR) render_bar(1, 1, BAR_WIDTH, 'HP', player.fighter.hp, player.fighter.max_hp, libtcod.light_red, libtcod.darker_red) render_bar(1, 2, BAR_WIDTH, 'XP', player.fighter.xp, level_up_xp, libtcod.dark_green, libtcod.grey) libtcod.console_print_ex(panel, 1, 4, libtcod.BKGND_NONE, libtcod.LEFT, 'Exp. level ' + str(player.level)) libtcod.console_print_ex(panel, 1, 5, libtcod.BKGND_NONE, libtcod.LEFT, 'Cave level ' + str(dungeon_level)) libtcod.console_print_ex(panel, 1, 6, libtcod.BKGND_NONE, libtcod.LEFT, 'Kills: ' + str(player.fighter.kills)) # display names of objects under mouse libtcod.console_set_default_foreground(panel, libtcod.light_gray) libtcod.console_print_ex(panel, 1, 0, libtcod.BKGND_NONE, libtcod.LEFT, get_names_under_mouse()) # display names of objects under player on right side of panel libtcod.console_print_ex(panel, SCREEN_WIDTH - 2, 0, libtcod.BKGND_NONE, libtcod.RIGHT, get_names_under_player()) # blit the contents of "panel" to root console libtcod.console_blit(panel, 0, 0, SCREEN_WIDTH, PANEL_HEIGHT, 0, 0, PANEL_Y) def render_bar(x, y, total_width, name, value, maximum, bar_color, back_color): # render a bar (HP, XP, etc). first calculate width of bar bar_width = int(float(value) / maximum * total_width) # render background first libtcod.console_set_default_background(panel, back_color) libtcod.console_rect(panel, x, y, total_width, 1, False, libtcod.BKGND_SCREEN) # now render the bar on top libtcod.console_set_default_background(panel, bar_color) if bar_width > 0: libtcod.console_rect(panel, x, y, bar_width, 1, False, libtcod.BKGND_SCREEN) # finally, some centered text with the values libtcod.console_set_default_foreground(panel, libtcod.white) libtcod.console_print_ex(panel, x + total_width / 2, y, libtcod.BKGND_NONE, libtcod.CENTER, name + ': ' + str(value) + '/' + str(maximum)) def message(new_msg, color=libtcod.white): # split the message if necessary, among multiple lines new_msg_lines = textwrap.wrap(new_msg, MSG_WIDTH) for line in new_msg_lines: # if the bugger is full, remove the first line to make room for the new one. if len(game_msgs) == MSG_HEIGHT: del game_msgs[0] # add the new line as a tuple, with the text and color game_msgs.append((line, color)) def create_h_tunnel(x1, x2, y): global map for x in range(min(x1, x2), max(x1, x2) + 1): map[x][y].blocked = False map[x][y].block_sight = False def create_v_tunnel(y1, y2, x): global map # vertical tunnel for y in range(min(y1, y2), max(y1, y2) + 1): map[x][y].blocked = False map[x][y].block_sight = False def is_blocked(x, y): global map global objects # first test the map tile if map[x][y].blocked: return True # now check for blocking objects for object in objects: if object.blocks and object.x == x and object.y == y: return True return False def closest_monster(max_range, exclusions): # find closest enemy, up to a max range and in player FOV # exclusions argument MUST BE A LIST closest_enemy = None closest_dist = max_range + 1 # start with slightly more than max range for obj in objects: if obj.fighter and obj not in exclusions and libtcod.map_is_in_fov(fov_map, obj.x, obj.y): # calculate distance between this object and the player dist = player.distance_to(obj) if dist < closest_dist: # it's closer so remember it closest_enemy = obj closest_dist = dist return closest_enemy def player_death(player): # the game ended! global game_state message('You died!', libtcod.red) render_all() game_state = 'dead' # for added effect, transform player into a corpse! player.char = '%' player.color = libtcod.white new_score(player) def monster_death(monster): # transform it into a nasty corpse! it doesn't block, can't be # attacked, and doesn't move message(monster.name.title() + ' is dead! You gain ' + str(monster.fighter.xp) + ' experience points.', libtcod.orange) monster.char = '%' monster.color = libtcod.dark_red monster.blocks = False monster.fighter = None monster.ai = None monster.name = 'remains of ' + monster.name monster.send_to_back() def get_equipped_in_slot(slot): for obj in inventory: if obj.equipment and obj.equipment.slot == slot and obj.equipment.is_equipped: return obj.equipment return None def get_all_equipped(obj): if obj == player: equipped_list = [] for item in inventory: if item.equipment and item.equipment.is_equipped: equipped_list.append(item.equipment) return equipped_list else: return [] # other objects have no equipment def check_level_up(): # see if the player's experience is enough to level-up level_up_xp = LEVEL_UP_BASE + (player.level * LEVEL_UP_FACTOR) if player.fighter.xp >= level_up_xp: # it is! *ding* level up player.level += 1 player.fighter.xp -= level_up_xp message('Your battle experience has hardened you further. You reached level ' + str(player.level) + '!', libtcod.yellow) render_all() # re-render console so that message plays before menu # check player level, roll 1d10 for new HP if 6 or less, or just +3 (see H&H rulebook) if player.level <= 6: hit_die = hhtable.rolldice(1, 10) else: hit_die = 3 player.fighter.max_hp += hit_die player.fighter.hp += hit_die # after level six, to_hit and damage only improve on even levels. if player.level <= 6 or player.level % 2 == 0: player.fighter.base_to_hit += 1 player.fighter.base_damage += 1 # ############################################ # Initialization & Main Loop # ############################################ libtcod.console_set_custom_font('terminal16x16_gs_ro.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_ASCII_INROW) libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'Hulks and Horrors', False) libtcod.sys_set_fps(LIMIT_FPS) panel = libtcod.console_new(SCREEN_WIDTH, PANEL_HEIGHT) con = libtcod.console_new(MAP_WIDTH, MAP_HEIGHT) main_menu(firstrun=True)
codeparrot/github-code-clean
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2009-2014: # Gabes Jean, naparuba@gmail.com # Gerhard Lausser, Gerhard.Lausser@consol.de # Gregory Starck, g.starck@gmail.com # Hartmut Goebel, h.goebel@goebel-consult.de # # This file is part of Shinken. # # Shinken is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Shinken is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Shinken. If not, see <http://www.gnu.org/licenses/>. import time import os import cStringIO import tempfile import traceback import cPickle import threading from Queue import Empty from shinken.external_command import ExternalCommand from shinken.check import Check from shinken.notification import Notification from shinken.eventhandler import EventHandler from shinken.brok import Brok from shinken.downtime import Downtime from shinken.contactdowntime import ContactDowntime from shinken.comment import Comment from shinken.acknowledge import Acknowledge from shinken.log import logger from shinken.util import nighty_five_percent, get_memory from shinken.load import Load from shinken.http_client import HTTPClient, HTTPExceptions from shinken.stats import statsmgr from shinken.misc.common import DICT_MODATTR class Scheduler(object): """Please Add a Docstring to describe the class here""" def __init__(self, scheduler_daemon): ''' :type scheduler_daemon: shinken.daemons.schedulerdaemon.Shinken ''' self.sched_daemon = scheduler_daemon # When set to false by us, we die and arbiter launch a new Scheduler self.must_run = True # protect this uniq list self.waiting_results_lock = threading.RLock() self.waiting_results = [] # satellites returns us results # and to not wait for them, we put them here and # use them later # Every N seconds we call functions like consume, del zombies # etc. All of theses functions are in recurrent_works with the # every tick to run. So must be an integer > 0 # The order is important, so make key an int. # TODO: at load, change value by configuration one (like reaper time, etc) self.recurrent_works = { 0: ('update_downtimes_and_comments', self.update_downtimes_and_comments, 1), 1: ('schedule', self.schedule, 1), # just schedule 2: ('consume_results', self.consume_results, 1), # incorporate checks and dependencies # now get the news actions (checks, notif) raised 3: ('get_new_actions', self.get_new_actions, 1), 4: ('get_new_broks', self.get_new_broks, 1), # and broks 5: ('scatter_master_notifications', self.scatter_master_notifications, 1), 6: ('delete_zombie_checks', self.delete_zombie_checks, 1), 7: ('delete_zombie_actions', self.delete_zombie_actions, 1), # 3: (self.delete_unwanted_notifications, 1), 8: ('check_freshness', self.check_freshness, 10), 9: ('clean_caches', self.clean_caches, 1), 10: ('update_retention_file', self.update_retention_file, 3600), 11: ('check_orphaned', self.check_orphaned, 60), # For NagVis like tools: update our status every 10s 12: ('get_and_register_update_program_status_brok', self.get_and_register_update_program_status_brok, 10), # Check for system time change. And AFTER get new checks # so they are changed too. 13: ('check_for_system_time_change', self.sched_daemon.check_for_system_time_change, 1), # launch if need all internal checks 14: ('manage_internal_checks', self.manage_internal_checks, 1), # clean some times possible overridden Queues, to do not explode in memory usage # every 1/4 of hour 15: ('clean_queues', self.clean_queues, 1), # Look for new business_impact change by modulation every minute 16: ('update_business_values', self.update_business_values, 60), # Reset the topology change flag if need 17: ('reset_topology_change_flag', self.reset_topology_change_flag, 1), 18: ('check_for_expire_acknowledge', self.check_for_expire_acknowledge, 1), 19: ('send_broks_to_modules', self.send_broks_to_modules, 1), 20: ('get_objects_from_from_queues', self.get_objects_from_from_queues, 1), } # stats part self.nb_checks_send = 0 self.nb_actions_send = 0 self.nb_broks_send = 0 self.nb_check_received = 0 # Log init logger.load_obj(self) self.instance_id = 0 # Temporary set. Will be erase later # Ours queues self.checks = {} self.actions = {} self.downtimes = {} self.contact_downtimes = {} self.comments = {} self.broks = [] # Some flags self.has_full_broks = False # have a initial_broks in broks queue? self.need_dump_memory = False # set by signal 1 self.need_objects_dump = False # set by signal 2 # And a dummy push flavor self.push_flavor = 0 # Now fake initialize for our satellites self.brokers = {} self.pollers = {} self.reactionners = {} def reset(self): self.must_run = True with self.waiting_results_lock: del self.waiting_results[:] for o in self.checks, self.actions, self.downtimes,\ self.contact_downtimes, self.comments,\ self.brokers: o.clear() del self.broks[:] def iter_hosts_and_services(self): for what in (self.hosts, self.services): for elt in what: yield elt # Load conf for future use # we are in_test if the data are from an arbiter object like, # so only for tests def load_conf(self, conf, in_test=False): self.program_start = int(time.time()) self.conf = conf self.hostgroups = conf.hostgroups self.services = conf.services # We need reversed list for search in the retention # file read self.services.optimize_service_search(conf.hosts) self.hosts = conf.hosts self.notificationways = conf.notificationways self.checkmodulations = conf.checkmodulations self.macromodulations = conf.macromodulations self.contacts = conf.contacts self.contactgroups = conf.contactgroups self.servicegroups = conf.servicegroups self.timeperiods = conf.timeperiods self.commands = conf.commands self.triggers = conf.triggers self.triggers.compile() self.triggers.load_objects(self) if not in_test: # Commands in the host/services/contacts are not real one # we must relink them t0 = time.time() self.conf.late_linkify() logger.debug("Late command relink in %d", time.time() - t0) # self.status_file = StatusFile(self) # External status file # From Arbiter. Use for Broker to differentiate schedulers self.instance_id = conf.instance_id # Tag our hosts with our instance_id for h in self.hosts: h.instance_id = conf.instance_id for s in self.services: s.instance_id = conf.instance_id # self for instance_name self.instance_name = conf.instance_name # and push flavor self.push_flavor = conf.push_flavor # Now we can update our 'ticks' for special calls # like the retention one, etc self.update_recurrent_works_tick('update_retention_file', self.conf.retention_update_interval * 60) self.update_recurrent_works_tick('clean_queues', self.conf.cleaning_queues_interval) # Update the 'tick' for a function call in our # recurrent work def update_recurrent_works_tick(self, f_name, new_tick): for i in self.recurrent_works: (name, f, old_tick) = self.recurrent_works[i] if name == f_name: logger.debug("Changing the tick to %d for the function %s", new_tick, name) self.recurrent_works[i] = (name, f, new_tick) # Load the pollers from our app master def load_satellites(self, pollers, reactionners): self.pollers = pollers self.reactionners = reactionners # Oh... Arbiter want us to die... To launch a new Scheduler # "Mais qu'a-t-il de plus que je n'ais pas?" # "But.. On which point it is better than me?" def die(self): self.must_run = False def dump_objects(self): d = tempfile.gettempdir() p = os.path.join(d, 'scheduler-obj-dump-%d' % time.time()) logger.info('Opening the DUMP FILE %s', p) try: f = open(p, 'w') f.write('Scheduler DUMP at %d\n' % time.time()) for c in self.checks.values(): s = 'CHECK: %s:%s:%s:%s:%s:%s\n' % \ (c.id, c.status, c.t_to_go, c.poller_tag, c.command, c.worker) f.write(s) for a in self.actions.values(): s = '%s: %s:%s:%s:%s:%s:%s\n' % \ (a.__class__.my_type.upper(), a.id, a.status, a.t_to_go, a.reactionner_tag, a.command, a.worker) f.write(s) for b in self.broks: s = 'BROK: %s:%s\n' % (b.id, b.type) f.write(s) f.close() except Exception, exp: logger.error("Error in writing the dump file %s : %s", p, str(exp)) def dump_config(self): d = tempfile.gettempdir() p = os.path.join(d, 'scheduler-conf-dump-%d' % time.time()) logger.info('Opening the DUMP FILE %s', p) try: f = open(p, 'w') f.write('Scheduler config DUMP at %d\n' % time.time()) self.conf.dump(f) f.close() except Exception, exp: logger.error("Error in writing the dump file %s : %s", p, str(exp)) # Load the external command def load_external_command(self, e): self.external_command = e # We've got activity in the fifo, we get and run commands def run_external_commands(self, cmds): for command in cmds: self.run_external_command(command) def run_external_command(self, command): logger.debug("scheduler resolves command '%s'", command) ext_cmd = ExternalCommand(command) self.external_command.resolve_command(ext_cmd) # Add_Brok is a bit more complex than the others, because # on starting, the broks are put in a global queue : self.broks # then when the first broker connect, it will generate initial_broks # in it's own queue (so bname != None). # and when in "normal" run, we just need to put the brok to all queues def add_Brok(self, brok, bname=None): # For brok, we TAG brok with our instance_id brok.instance_id = self.instance_id # Maybe it's just for one broker if bname: self.brokers[bname]['broks'].append(brok) else: # If there are known brokers, give it to them if len(self.brokers) > 0: # Or maybe it's for all for bname in self.brokers: self.brokers[bname]['broks'].append(brok) else: # no brokers? maybe at startup for logs # we will put in global queue, that the first broker # connection will get all self.broks.append(brok) def add_Notification(self, notif): self.actions[notif.id] = notif # A notification ask for a brok if notif.contact is not None: b = notif.get_initial_status_brok() self.add(b) def add_Check(self, c): self.checks[c.id] = c # A new check means the host/service changes its next_check # need to be refreshed b = c.ref.get_next_schedule_brok() self.add(b) def add_EventHandler(self, action): # print "Add an event Handler", elt.id self.actions[action.id] = action def add_Downtime(self, dt): self.downtimes[dt.id] = dt if dt.extra_comment: self.add_Comment(dt.extra_comment) def add_ContactDowntime(self, contact_dt): self.contact_downtimes[contact_dt.id] = contact_dt def add_Comment(self, comment): self.comments[comment.id] = comment b = comment.ref.get_update_status_brok() self.add(b) # Ok one of our modules send us a command? just run it! def add_ExternalCommand(self, ext_cmd): self.external_command.resolve_command(ext_cmd) # Schedulers have some queues. We can simplify call by adding # elements into the proper queue just by looking at their type # Brok -> self.broks # Check -> self.checks # Notification -> self.actions # Downtime -> self.downtimes # ContactDowntime -> self.contact_downtimes def add(self, elt): f = self.__add_actions.get(elt.__class__, None) if f: # print("found action for %s: %s" % (elt.__class__.__name__, f.__name__)) f(self, elt) __add_actions = { Check: add_Check, Brok: add_Brok, Notification: add_Notification, EventHandler: add_EventHandler, Downtime: add_Downtime, ContactDowntime: add_ContactDowntime, Comment: add_Comment, ExternalCommand: add_ExternalCommand, } # We call the function of modules that got the # hook function # TODO: find a way to merge this and the version in daemon.py def hook_point(self, hook_name): for inst in self.sched_daemon.modules_manager.instances: full_hook_name = 'hook_' + hook_name logger.debug("hook_point: %s: %s %s", inst.get_name(), str(hasattr(inst, full_hook_name)), hook_name) if hasattr(inst, full_hook_name): f = getattr(inst, full_hook_name) try: f(self) except Exception, exp: logger.error("The instance %s raise an exception %s." "I disable it and set it to restart it later", inst.get_name(), str(exp)) output = cStringIO.StringIO() traceback.print_exc(file=output) logger.error("Exception trace follows: %s", output.getvalue()) output.close() self.sched_daemon.modules_manager.set_to_restart(inst) # Ours queues may explode if no one ask us for elements # It's very dangerous: you can crash your server... and it's a bad thing :) # So we 'just' keep last elements: 5 of max is a good overhead def clean_queues(self): # if we set the interval at 0, we bail out if self.conf.cleaning_queues_interval == 0: return max_checks = 5 * (len(self.hosts) + len(self.services)) max_broks = 5 * (len(self.hosts) + len(self.services)) max_actions = 5 * len(self.contacts) * (len(self.hosts) + len(self.services)) # For checks, it's not very simple: # For checks, they may be referred to their host/service # We do not just del them in the check list, but also in their service/host # We want id of lower than max_id - 2*max_checks if len(self.checks) > max_checks: # keys does not ensure sorted keys. Max is slow but we have no other way. id_max = max(self.checks.keys()) to_del_checks = [c for c in self.checks.values() if c.id < id_max - max_checks] nb_checks_drops = len(to_del_checks) if nb_checks_drops > 0: logger.info("I have to del some checks (%d)..., sorry", nb_checks_drops) for c in to_del_checks: i = c.id elt = c.ref # First remove the link in host/service elt.remove_in_progress_check(c) # Then in dependent checks (I depend on, or check # depend on me) for dependent_checks in c.depend_on_me: dependent_checks.depend_on.remove(c.id) for c_temp in c.depend_on: c_temp.depen_on_me.remove(c) del self.checks[i] # Final Bye bye ... else: nb_checks_drops = 0 # For broks and actions, it's more simple # or brosk, manage global but also all brokers queue b_lists = [self.broks] for (bname, e) in self.brokers.iteritems(): b_lists.append(e['broks']) nb_broks_drops = 0 for broks in b_lists: if len(broks) > max_broks: count = len(broks) - max_broks del broks[-count:] nb_broks_drops += count if len(self.actions) > max_actions: id_max = max(self.actions.keys()) id_to_del_actions = [i for i in self.actions if i < id_max - max_actions] nb_actions_drops = len(id_to_del_actions) for i in id_to_del_actions: # Remember to delete reference of notification in service/host a = self.actions[i] if a.is_a == 'notification': a.ref.remove_in_progress_notification(a) del self.actions[i] else: nb_actions_drops = 0 statsmgr.incr("scheduler.checks.dropped", nb_checks_drops, "queue") statsmgr.incr("scheduler.broks.dropped", nb_broks_drops, "queue") statsmgr.incr("scheduler.actions.dropped", nb_actions_drops, "queue") if nb_checks_drops != 0 or nb_broks_drops != 0 or nb_actions_drops != 0: logger.warning("We drop %d checks, %d broks and %d actions", nb_checks_drops, nb_broks_drops, nb_actions_drops) # For tunning purpose we use caches but we do not want them to explode # So we clean them def clean_caches(self): for tp in self.timeperiods: tp.clean_cache() # Ask item (host or service) an update_status # and add it to our broks queue def get_and_register_status_brok(self, item): b = item.get_update_status_brok() self.add(b) # Ask item (host or service) a check_result_brok # and add it to our broks queue def get_and_register_check_result_brok(self, item): b = item.get_check_result_brok() self.add(b) # We do not want this downtime id def del_downtime(self, dt_id): if dt_id in self.downtimes: self.downtimes[dt_id].ref.del_downtime(dt_id) del self.downtimes[dt_id] # We do not want this downtime id def del_contact_downtime(self, dt_id): if dt_id in self.contact_downtimes: self.contact_downtimes[dt_id].ref.del_downtime(dt_id) del self.contact_downtimes[dt_id] # We do not want this comment id def del_comment(self, c_id): if c_id in self.comments: self.comments[c_id].ref.del_comment(c_id) del self.comments[c_id] # We are looking for outdated acks, and if so, remove them def check_for_expire_acknowledge(self): for elt in self.iter_hosts_and_services(): elt.check_for_expire_acknowledge() # We update all business_impact to look at new modulation # start for impacts, and so update broks status and # problems value too def update_business_values(self): for elt in self.iter_hosts_and_services(): if not elt.is_problem: was = elt.business_impact elt.update_business_impact_value() new = elt.business_impact # Ok, the business_impact change, we can update the broks if new != was: # print "The elements", i.get_name(), "change it's business_impact value" self.get_and_register_status_brok(elt) # When all impacts and classic elements are updated, # we can update problems (their value depend on impacts, so # they must be done after) for elt in self.iter_hosts_and_services(): # We first update impacts and classic elements if elt.is_problem: was = elt.business_impact elt.update_business_impact_value() new = elt.business_impact # Maybe one of the impacts change it's business_impact to a high value # and so ask for the problem to raise too if new != was: # print "The elements", i.get_name(), # print "change it's business_impact value from", was, "to", new self.get_and_register_status_brok(elt) # Each second we search for master notification that are scatterisable and we do the job # we take the sons and we put them into our actions queue def scatter_master_notifications(self): now = time.time() for a in self.actions.values(): # We only want notifications if a.is_a != 'notification': continue if a.status == 'scheduled' and a.is_launchable(now): if not a.contact: # This is a "master" notification created by create_notifications. # It wont sent itself because it has no contact. # We use it to create "child" notifications (for the contacts and # notification_commands) which are executed in the reactionner. item = a.ref childnotifications = [] if not item.notification_is_blocked_by_item(a.type, now): # If it is possible to send notifications # of this type at the current time, then create # a single notification for each contact of this item. childnotifications = item.scatter_notification(a) for c in childnotifications: c.status = 'scheduled' self.add(c) # this will send a brok # If we have notification_interval then schedule # the next notification (problems only) if a.type == 'PROBLEM': # Update the ref notif number after raise the one of the notification if len(childnotifications) != 0: # notif_nb of the master notification # was already current_notification_number+1. # If notifications were sent, # then host/service-counter will also be incremented item.current_notification_number = a.notif_nb if item.notification_interval != 0 and a.t_to_go is not None: # We must continue to send notifications. # Just leave it in the actions list and set it to "scheduled" # and it will be found again later # Ask the service/host to compute the next notif time. It can be just # a.t_to_go + item.notification_interval*item.__class__.interval_length # or maybe before because we have an # escalation that need to raise up before a.t_to_go = item.get_next_notification_time(a) a.notif_nb = item.current_notification_number + 1 a.status = 'scheduled' else: # Wipe out this master notification. One problem notification is enough. item.remove_in_progress_notification(a) self.actions[a.id].status = 'zombie' else: # Wipe out this master notification. # We don't repeat recover/downtime/flap/etc... item.remove_in_progress_notification(a) self.actions[a.id].status = 'zombie' # Called by poller to get checks # Can get checks and actions (notifications and co) def get_to_run_checks(self, do_checks=False, do_actions=False, poller_tags=['None'], reactionner_tags=['None'], worker_name='none', module_types=['fork'], max_actions=None ): res = [] now = time.time() # As priority attribute may not exist on objects loaded from retention # backend, we ensure that filtering does not break def get_prio(o): return getattr(o, "priority", o.properties["priority"].default) # If poller want to do checks if do_checks: for c in sorted(self.checks.itervalues(), key=get_prio): if max_actions is not None and len(res) >= max_actions: break # If the command is untagged, and the poller too, or if both are tagged # with same name, go for it # if do_check, call for poller, and so poller_tags by default is ['None'] # by default poller_tag is 'None' and poller_tags is ['None'] # and same for module_type, the default is the 'fork' type if c.poller_tag in poller_tags and c.module_type in module_types: # must be ok to launch, and not an internal one (business rules based) if c.status == 'scheduled' and c.is_launchable(now) and not c.internal: c.status = 'inpoller' c.worker = worker_name # We do not send c, because it is a link (c.ref) to # host/service and poller do not need it. It only # need a shell with id, command and defaults # parameters. It's the goal of copy_shell res.append(c.copy_shell()) # If reactionner want to notify too if do_actions: for a in sorted(self.actions.itervalues(), key=get_prio): if max_actions is not None and len(res) >= max_actions: break is_master = (a.is_a == 'notification' and not a.contact) if not is_master: # if do_action, call the reactionner, # and so reactionner_tags by default is ['None'] # by default reactionner_tag is 'None' and reactionner_tags is ['None'] too # So if not the good one, loop for next :) if a.reactionner_tag not in reactionner_tags: continue # same for module_type if a.module_type not in module_types: continue # And now look for can launch or not :) if a.status == 'scheduled' and a.is_launchable(now): if not is_master: # This is for child notifications and eventhandlers a.status = 'inpoller' a.worker = worker_name new_a = a.copy_shell() res.append(new_a) return res # Called by poller and reactionner to send result def put_results(self, c): if c.is_a == 'notification': # We will only see childnotifications here try: timeout = False if c.status == 'timeout': # Unfortunately the remove_in_progress_notification # sets the status to zombie, so we need to save it here. timeout = True execution_time = c.execution_time # Add protection for strange charset if isinstance(c.output, str): c.output = c.output.decode('utf8', 'ignore') self.actions[c.id].get_return_from(c) item = self.actions[c.id].ref item.remove_in_progress_notification(c) self.actions[c.id].status = 'zombie' item.last_notification = c.check_time # And we ask the item to update it's state self.get_and_register_status_brok(item) # If we' ve got a problem with the notification, raise a Warning log if timeout: logger.warning("Contact %s %s notification command '%s ' " "timed out after %d seconds", self.actions[c.id].contact.contact_name, self.actions[c.id].ref.__class__.my_type, self.actions[c.id].command, int(execution_time)) elif c.exit_status != 0: logger.warning("The notification command '%s' raised an error " "(exit code=%d): '%s'", c.command, c.exit_status, c.output) except KeyError, exp: # bad number for notif, not that bad logger.warning('put_results:: get unknown notification : %s ', str(exp)) except AttributeError, exp: # bad object, drop it logger.warning('put_results:: get bad notification : %s ', str(exp)) elif c.is_a == 'check': try: if c.status == 'timeout': c.output = "(%s Check Timed Out)" %\ self.checks[c.id].ref.__class__.my_type.capitalize() c.long_output = c.output c.exit_status = self.conf.timeout_exit_status self.checks[c.id].get_return_from(c) self.checks[c.id].status = 'waitconsume' except KeyError, exp: pass elif c.is_a == 'eventhandler': try: old_action = self.actions[c.id] old_action.status = 'zombie' except KeyError: # cannot find old action return if c.status == 'timeout': _type = 'event handler' if c.is_snapshot: _type = 'snapshot' logger.warning("%s %s command '%s ' timed out after %d seconds" % (self.actions[c.id].ref.__class__.my_type.capitalize(), _type, self.actions[c.id].command, int(c.execution_time))) # If it's a snapshot we should get the output an export it if c.is_snapshot: old_action.get_return_from(c) b = old_action.ref.get_snapshot_brok(old_action.output, old_action.exit_status) self.add(b) else: logger.error("The received result type in unknown! %s", str(c.is_a)) # Get the good tabs for links regarding to the kind. If unknown, return None def get_links_from_type(self, type): t = {'poller': self.pollers, 'reactionner': self.reactionners} if type in t: return t[type] return None # Check if we do not connect to often to this def is_connection_try_too_close(self, elt): now = time.time() last_connection = elt['last_connection'] if now - last_connection < 5: return True return False # initialize or re-initialize connection with a poller # or a reactionner def pynag_con_init(self, id, type='poller'): # Get good links tab for looping.. links = self.get_links_from_type(type) if links is None: logger.debug("Unknown '%s' type for connection!", type) return # We want only to initiate connections to the passive # pollers and reactionners passive = links[id]['passive'] if not passive: return # If we try to connect too much, we slow down our tests if self.is_connection_try_too_close(links[id]): return # Ok, we can now update it links[id]['last_connection'] = time.time() logger.debug("Init connection with %s", links[id]['uri']) uri = links[id]['uri'] try: links[id]['con'] = HTTPClient(uri=uri, strong_ssl=links[id]['hard_ssl_name_check']) con = links[id]['con'] except HTTPExceptions, exp: logger.warning("Connection problem to the %s %s: %s", type, links[id]['name'], str(exp)) links[id]['con'] = None return try: # initial ping must be quick con.get('ping') except HTTPExceptions, exp: logger.warning("Connection problem to the %s %s: %s", type, links[id]['name'], str(exp)) links[id]['con'] = None return except KeyError, exp: logger.warning("The %s '%s' is not initialized: %s", type, links[id]['name'], str(exp)) links[id]['con'] = None return logger.info("Connection OK to the %s %s", type, links[id]['name']) # We should push actions to our passives satellites def push_actions_to_passives_satellites(self): # We loop for our passive pollers or reactionners for p in filter(lambda p: p['passive'], self.pollers.values()): logger.debug("I will send actions to the poller %s", str(p)) con = p['con'] poller_tags = p['poller_tags'] if con is not None: # get actions lst = self.get_to_run_checks(True, False, poller_tags, worker_name=p['name']) try: # initial ping must be quick logger.debug("Sending %s actions", len(lst)) con.post('push_actions', {'actions': lst, 'sched_id': self.instance_id}) self.nb_checks_send += len(lst) except HTTPExceptions, exp: logger.warning("Connection problem to the %s %s: %s", type, p['name'], str(exp)) p['con'] = None return except KeyError, exp: logger.warning("The %s '%s' is not initialized: %s", type, p['name'], str(exp)) p['con'] = None return else: # no connection? try to reconnect self.pynag_con_init(p['instance_id'], type='poller') # TODO:factorize # We loop for our passive reactionners for p in filter(lambda p: p['passive'], self.reactionners.values()): logger.debug("I will send actions to the reactionner %s", str(p)) con = p['con'] reactionner_tags = p['reactionner_tags'] if con is not None: # get actions lst = self.get_to_run_checks(False, True, reactionner_tags=reactionner_tags, worker_name=p['name']) try: # initial ping must be quick logger.debug("Sending %d actions", len(lst)) con.post('push_actions', {'actions': lst, 'sched_id': self.instance_id}) self.nb_checks_send += len(lst) except HTTPExceptions, exp: logger.warning("Connection problem to the %s %s: %s", type, p['name'], str(exp)) p['con'] = None return except KeyError, exp: logger.warning("The %s '%s' is not initialized: %s", type, p['name'], str(exp)) p['con'] = None return else: # no connection? try to reconnect self.pynag_con_init(p['instance_id'], type='reactionner') # We should get returns from satellites def get_actions_from_passives_satellites(self): # We loop for our passive pollers for p in [p for p in self.pollers.values() if p['passive']]: logger.debug("I will get actions from the poller %s", str(p)) con = p['con'] poller_tags = p['poller_tags'] if con is not None: try: # initial ping must be quick # Before ask a call that can be long, do a simple ping to be sure it is alive con.get('ping') results = con.get('get_returns', {'sched_id': self.instance_id}, wait='long') try: results = str(results) except UnicodeEncodeError: # ascii not working, switch to utf8 so # if not eally utf8 will be a real problem results = results.encode("utf8", 'ignore') # and data will be invalid, socatch by the pickle. # now go the cpickle pass, and catch possible errors from it try: results = cPickle.loads(results) except Exception, exp: logger.error('Cannot load passive results from satellite %s : %s', p['name'], str(exp)) continue nb_received = len(results) self.nb_check_received += nb_received logger.debug("Received %d passive results", nb_received) for result in results: result.set_type_passive() with self.waiting_results_lock: self.waiting_results.extend(results) except HTTPExceptions, exp: logger.warning("Connection problem to the %s %s: %s", type, p['name'], str(exp)) p['con'] = None continue except KeyError, exp: logger.warning("The %s '%s' is not initialized: %s", type, p['name'], str(exp)) p['con'] = None continue else: # no connection, try reinit self.pynag_con_init(p['instance_id'], type='poller') # We loop for our passive reactionners for p in [p for p in self.reactionners.values() if p['passive']]: logger.debug("I will get actions from the reactionner %s", str(p)) con = p['con'] reactionner_tags = p['reactionner_tags'] if con is not None: try: # initial ping must be quick # Before ask a call that can be long, do a simple ping to be sure it is alive con.get('ping') results = con.get('get_returns', {'sched_id': self.instance_id}, wait='long') results = cPickle.loads(str(results)) nb_received = len(results) self.nb_check_received += nb_received logger.debug("Received %d passive results", nb_received) for result in results: result.set_type_passive() with self.waiting_results_lock: self.waiting_results.extend(results) except HTTPExceptions, exp: logger.warning("Connection problem to the %s %s: %s", type, p['name'], str(exp)) p['con'] = None return except KeyError, exp: logger.warning("The %s '%s' is not initialized: %s", type, p['name'], str(exp)) p['con'] = None return else: # no connection, try reinit self.pynag_con_init(p['instance_id'], type='reactionner') # Some checks are purely internal, like business based one # simply ask their ref to manage it when it's ok to run def manage_internal_checks(self): now = time.time() for c in self.checks.values(): # must be ok to launch, and not an internal one (business rules based) if c.internal and c.status == 'scheduled' and c.is_launchable(now): c.ref.manage_internal_check(self.hosts, self.services, c) # it manage it, now just ask to consume it # like for all checks c.status = 'waitconsume' # Call by brokers to have broks # We give them, and clean them! def get_broks(self, bname, broks_batch=0): res = [] if broks_batch > 0: count = len(self.broks) else: count = min(broks_batch, len(self.broks)) res.extend(self.broks[:count]) del self.broks[:count] # If we are here, we are sure the broker entry exists if broks_batch > 0: count = len(self.brokers[bname]['broks']) else: count = min(broks_batch, len(self.brokers[bname]['broks'])) count -= len(res) res.extend(self.brokers[bname]['broks'][:count]) del self.brokers[bname]['broks'][:count] return res # An element can have its topology changed by an external command # if so a brok will be generated with this flag. No need to reset all of # them. def reset_topology_change_flag(self): for i in self.hosts: i.topology_change = False for i in self.services: i.topology_change = False # Update the retention file and give all te data in # a dict so the read function can pickup what it wants # For now compression is not used, but it can be added easily # just uncomment :) def update_retention_file(self, forced=False): # If we set the update to 0, we do not want of this # if we do not forced (like at stopping) if self.conf.retention_update_interval == 0 and not forced: return self.hook_point('save_retention') # Load the retention file and get status from it. It does not get all checks in progress # for the moment, just the status and the notifications. def retention_load(self): self.hook_point('load_retention') # Helper function for module, will give the host and service # data def get_retention_data(self): # We create an all_data dict with list of useful retention data dicts # of our hosts and services all_data = {'hosts': {}, 'services': {}} for h in self.hosts: d = {} running_properties = h.__class__.running_properties for prop, entry in running_properties.items(): if entry.retention: v = getattr(h, prop) # Maybe we should "prepare" the data before saving it # like get only names instead of the whole objects f = entry.retention_preparation if f: v = f(h, v) d[prop] = v # and some properties are also like this, like # active checks enabled or not properties = h.__class__.properties for prop, entry in properties.items(): if entry.retention: v = getattr(h, prop) # Maybe we should "prepare" the data before saving it # like get only names instead of the whole objects f = entry.retention_preparation if f: v = f(h, v) d[prop] = v all_data['hosts'][h.host_name] = d # Same for services for s in self.services: d = {} running_properties = s.__class__.running_properties for prop, entry in running_properties.items(): if entry.retention: v = getattr(s, prop) # Maybe we should "prepare" the data before saving it # like get only names instead of the whole objects f = entry.retention_preparation if f: v = f(s, v) d[prop] = v # We consider the service ONLY if it has modified attributes. # If not, then no non-running attributes will be saved for this service. if s.modified_attributes > 0: # Same for properties, like active checks enabled or not properties = s.__class__.properties for prop, entry in properties.items(): # We save the value only if the attribute # is selected for retention AND has been modified. if entry.retention and \ not (prop in DICT_MODATTR and not DICT_MODATTR[prop].value & s.modified_attributes): v = getattr(s, prop) # Maybe we should "prepare" the data before saving it # like get only names instead of the whole objects f = entry.retention_preparation if f: v = f(s, v) d[prop] = v all_data['services'][(s.host.host_name, s.service_description)] = d return all_data # Get back our broks from a retention module :) def restore_retention_data(self, data): # Now load interesting properties in hosts/services # Tagging retention=False prop that not be directly load # Items will be with theirs status, but not in checking, so # a new check will be launched like with a normal beginning (random distributed # scheduling) ret_hosts = data['hosts'] for ret_h_name in ret_hosts: # We take the dict of our value to load d = data['hosts'][ret_h_name] h = self.hosts.find_by_name(ret_h_name) if h is not None: # First manage all running properties running_properties = h.__class__.running_properties for prop, entry in running_properties.items(): if entry.retention: # Maybe the saved one was not with this value, so # we just bypass this if prop in d: setattr(h, prop, d[prop]) # Ok, some are in properties too (like active check enabled # or not. Will OVERRIDE THE CONFIGURATION VALUE! properties = h.__class__.properties for prop, entry in properties.items(): if entry.retention: # Maybe the saved one was not with this value, so # we just bypass this if prop in d: setattr(h, prop, d[prop]) # Now manage all linked objects load from previous run for a in h.notifications_in_progress.values(): a.ref = h self.add(a) # Also raises the action id, so do not overlap ids a.assume_at_least_id(a.id) h.update_in_checking() # And also add downtimes and comments for dt in h.downtimes: dt.ref = h if hasattr(dt, 'extra_comment'): dt.extra_comment.ref = h else: dt.extra_comment = None # raises the downtime id to do not overlap Downtime.id = max(Downtime.id, dt.id + 1) self.add(dt) for c in h.comments: c.ref = h self.add(c) # raises comment id to do not overlap ids Comment.id = max(Comment.id, c.id + 1) if h.acknowledgement is not None: h.acknowledgement.ref = h # Raises the id of future ack so we don't overwrite # these one Acknowledge.id = max(Acknowledge.id, h.acknowledgement.id + 1) # Relink the notified_contacts as a set() of true contacts objects # it it was load from the retention, it's now a list of contacts # names if 'notified_contacts' in d: new_notified_contacts = set() for cname in h.notified_contacts: c = self.contacts.find_by_name(cname) # Maybe the contact is gone. Skip it if c: new_notified_contacts.add(c) h.notified_contacts = new_notified_contacts # SAme for services ret_services = data['services'] for (ret_s_h_name, ret_s_desc) in ret_services: # We take our dict to load d = data['services'][(ret_s_h_name, ret_s_desc)] s = self.services.find_srv_by_name_and_hostname(ret_s_h_name, ret_s_desc) if s is not None: # Load the major values from running properties running_properties = s.__class__.running_properties for prop, entry in running_properties.items(): if entry.retention: # Maybe the saved one was not with this value, so # we just bypass this if prop in d: setattr(s, prop, d[prop]) # And some others from properties dict too properties = s.__class__.properties for prop, entry in properties.items(): if entry.retention: # Maybe the saved one was not with this value, so # we just bypass this if prop in d: setattr(s, prop, d[prop]) # Ok now manage all linked objects for a in s.notifications_in_progress.values(): a.ref = s self.add(a) # Also raises the action id, so do not overlap id a.assume_at_least_id(a.id) s.update_in_checking() # And also add downtimes and comments for dt in s.downtimes: dt.ref = s if hasattr(dt, 'extra_comment'): dt.extra_comment.ref = s else: dt.extra_comment = None # raises the downtime id to do not overlap Downtime.id = max(Downtime.id, dt.id + 1) self.add(dt) for c in s.comments: c.ref = s self.add(c) # raises comment id to do not overlap ids Comment.id = max(Comment.id, c.id + 1) if s.acknowledgement is not None: s.acknowledgement.ref = s # Raises the id of future ack so we don't overwrite # these one Acknowledge.id = max(Acknowledge.id, s.acknowledgement.id + 1) # Relink the notified_contacts as a set() of true contacts objects # it it was load from the retention, it's now a list of contacts # names if 'notified_contacts' in d: new_notified_contacts = set() for cname in s.notified_contacts: c = self.contacts.find_by_name(cname) # Maybe the contact is gone. Skip it if c: new_notified_contacts.add(c) s.notified_contacts = new_notified_contacts # Fill the self.broks with broks of self (process id, and co) # broks of service and hosts (initial status) def fill_initial_broks(self, bname, with_logs=False): # First a Brok for delete all from my instance_id b = Brok('clean_all_my_instance_id', {'instance_id': self.instance_id}) self.add_Brok(b, bname) # first the program status b = self.get_program_status_brok() self.add_Brok(b, bname) # We can't call initial_status from all this types # The order is important, service need host... initial_status_types = (self.timeperiods, self.commands, self.contacts, self.contactgroups, self.hosts, self.hostgroups, self.services, self.servicegroups) self.conf.skip_initial_broks = getattr(self.conf, 'skip_initial_broks', False) logger.debug("Skipping initial broks? %s", str(self.conf.skip_initial_broks)) if not self.conf.skip_initial_broks: for tab in initial_status_types: for i in tab: b = i.get_initial_status_brok() self.add_Brok(b, bname) # Only raises the all logs at the scheduler startup if with_logs: # Ask for INITIAL logs for services and hosts for i in self.hosts: i.raise_initial_state() for i in self.services: i.raise_initial_state() # Add a brok to say that we finished all initial_pass b = Brok('initial_broks_done', {'instance_id': self.instance_id}) self.add_Brok(b, bname) # We now have all full broks self.has_full_broks = True logger.info("[%s] Created %d initial Broks for broker %s", self.instance_name, len(self.brokers[bname]['broks']), bname) # Crate a brok with program status info def get_and_register_program_status_brok(self): b = self.get_program_status_brok() self.add(b) # Crate a brok with program status info def get_and_register_update_program_status_brok(self): b = self.get_program_status_brok() b.type = 'update_program_status' self.add(b) # Get a brok with program status # TODO: GET REAL VALUES def get_program_status_brok(self): now = int(time.time()) data = {"is_running": 1, "instance_id": self.instance_id, "instance_name": self.instance_name, "last_alive": now, "interval_length": self.conf.interval_length, "program_start": self.program_start, "pid": os.getpid(), "daemon_mode": 1, "last_command_check": now, "last_log_rotation": now, "notifications_enabled": self.conf.enable_notifications, "active_service_checks_enabled": self.conf.execute_service_checks, "passive_service_checks_enabled": self.conf.accept_passive_service_checks, "active_host_checks_enabled": self.conf.execute_host_checks, "passive_host_checks_enabled": self.conf.accept_passive_host_checks, "event_handlers_enabled": self.conf.enable_event_handlers, "flap_detection_enabled": self.conf.enable_flap_detection, "failure_prediction_enabled": 0, "process_performance_data": self.conf.process_performance_data, "obsess_over_hosts": self.conf.obsess_over_hosts, "obsess_over_services": self.conf.obsess_over_services, "modified_host_attributes": 0, "modified_service_attributes": 0, "global_host_event_handler": self.conf.global_host_event_handler, 'global_service_event_handler': self.conf.global_service_event_handler, 'check_external_commands': self.conf.check_external_commands, 'check_service_freshness': self.conf.check_service_freshness, 'check_host_freshness': self.conf.check_host_freshness, 'command_file': self.conf.command_file } b = Brok('program_status', data) return b # Called every 1sec to consume every result in services or hosts # with these results, they are OK, CRITICAL, UP/DOWN, etc... def consume_results(self): # All results are in self.waiting_results # We need to get them first with self.waiting_results_lock: waiting_results = self.waiting_results self.waiting_results = [] for c in waiting_results: self.put_results(c) # Then we consume them # print "**********Consume*********" for c in self.checks.values(): if c.status == 'waitconsume': item = c.ref item.consume_result(c) # All 'finished' checks (no more dep) raise checks they depends on for c in self.checks.values(): if c.status == 'havetoresolvedep': for dependent_checks in c.depend_on_me: # Ok, now dependent will no more wait c dependent_checks.depend_on.remove(c.id) # REMOVE OLD DEP CHECK -> zombie c.status = 'zombie' # Now, reinteger dep checks for c in self.checks.values(): if c.status == 'waitdep' and len(c.depend_on) == 0: item = c.ref item.consume_result(c) # Called every 1sec to delete all checks in a zombie state # zombie = not useful anymore def delete_zombie_checks(self): # print "**********Delete zombies checks****" id_to_del = [] for c in self.checks.values(): if c.status == 'zombie': id_to_del.append(c.id) # une petite tape dans le dos et tu t'en vas, merci... # *pat pat* GFTO, thks :) for id in id_to_del: del self.checks[id] # ZANKUSEN! # Called every 1sec to delete all actions in a zombie state # zombie = not useful anymore def delete_zombie_actions(self): # print "**********Delete zombies actions****" id_to_del = [] for a in self.actions.values(): if a.status == 'zombie': id_to_del.append(a.id) # une petite tape dans le dos et tu t'en vas, merci... # *pat pat* GFTO, thks :) for id in id_to_del: del self.actions[id] # ZANKUSEN! # Check for downtimes start and stop, and register # them if needed def update_downtimes_and_comments(self): broks = [] now = time.time() # Look for in objects comments, and look if we already got them for elt in self.iter_hosts_and_services(): for c in elt.comments: if c.id not in self.comments: self.comments[c.id] = c # Check maintenance periods for elt in self.iter_hosts_and_services(): if elt.maintenance_period is None: continue if elt.in_maintenance is None: if elt.maintenance_period.is_time_valid(now): start_dt = elt.maintenance_period.get_next_valid_time_from_t(now) end_dt = elt.maintenance_period.get_next_invalid_time_from_t(start_dt + 1) - 1 dt = Downtime(elt, start_dt, end_dt, 1, 0, 0, "system", "this downtime was automatically scheduled " "through a maintenance_period") elt.add_downtime(dt) self.add(dt) self.get_and_register_status_brok(elt) elt.in_maintenance = dt.id else: if elt.in_maintenance not in self.downtimes: # the main downtimes has expired or was manually deleted elt.in_maintenance = None # Check the validity of contact downtimes for elt in self.contacts: for dt in elt.downtimes: dt.check_activation() # A loop where those downtimes are removed # which were marked for deletion (mostly by dt.exit()) for dt in self.downtimes.values(): if dt.can_be_deleted is True: ref = dt.ref self.del_downtime(dt.id) broks.append(ref.get_update_status_brok()) # Same for contact downtimes: for dt in self.contact_downtimes.values(): if dt.can_be_deleted is True: ref = dt.ref self.del_contact_downtime(dt.id) broks.append(ref.get_update_status_brok()) # Downtimes are usually accompanied by a comment. # An exiting downtime also invalidates it's comment. for c in self.comments.values(): if c.can_be_deleted is True: ref = c.ref self.del_comment(c.id) broks.append(ref.get_update_status_brok()) # Check start and stop times for dt in self.downtimes.values(): if dt.real_end_time < now: # this one has expired broks.extend(dt.exit()) # returns downtimestop notifications elif now >= dt.start_time and dt.fixed and not dt.is_in_effect: # this one has to start now broks.extend(dt.enter()) # returns downtimestart notifications broks.append(dt.ref.get_update_status_brok()) for b in broks: self.add(b) # Main schedule function to make the regular scheduling def schedule(self): # ask for service and hosts their next check for elt in self.iter_hosts_and_services(): elt.schedule() # Main actions reaper function: it get all new checks, # notification and event handler from hosts and services def get_new_actions(self): self.hook_point('get_new_actions') # ask for service and hosts their next check for elt in self.iter_hosts_and_services(): for a in elt.actions: self.add(a) # We take all, we can clear it elt.actions = [] # Similar as above, but for broks def get_new_broks(self): # ask for service and hosts their broks waiting # be eaten for elt in self.iter_hosts_and_services(): for b in elt.broks: self.add(b) # We take all, we can clear it elt.broks = [] # Raises checks for no fresh states for services and hosts def check_freshness(self): # print "********** Check freshness******" for elt in self.iter_hosts_and_services(): c = elt.do_check_freshness() if c is not None: self.add(c) # Check for orphaned checks: checks that never returns back # so if inpoller and t_to_go < now - 300s: pb! # Warn only one time for each "worker" # XXX I think we should make "time_to_orphanage" configurable # each action type, each for notification, event_handler & check # I think it will be a little more useful that way, not sure tho def check_orphaned(self): worker_names = {} now = int(time.time()) for c in self.checks.values(): time_to_orphanage = c.ref.get_time_to_orphanage() if time_to_orphanage: if c.status == 'inpoller' and c.t_to_go < now - time_to_orphanage: c.status = 'scheduled' if c.worker not in worker_names: worker_names[c.worker] = {"checks": 1} continue if "checks" not in worker_names[c.worker]: worker_names[c.worker]["checks"] = 1 continue worker_names[c.worker]["checks"] += 1 for a in self.actions.values(): time_to_orphanage = a.ref.get_time_to_orphanage() if time_to_orphanage: if a.status == 'inpoller' and a.t_to_go < now - time_to_orphanage: a.status = 'scheduled' if a.worker not in worker_names: worker_names[a.worker] = {"actions": 1} continue if "actions" not in worker_names[a.worker]: worker_names[a.worker]["actions"] = 1 continue worker_names[a.worker]["actions"] += 1 reenabled = {"checks": 0, "actions": 0} for w in worker_names: for _type in worker_names[w]: reenabled[_type] += worker_names[w][_type] logger.warning("%d %s never came back for the satellite " "'%s'. I reenable them for polling", worker_names[w][_type], _type, w) for _type in reenabled: count = reenabled[_type] if count: statsmgr.incr("scheduler.%s.reenabled" % _type, count, "queue") # Each loop we are going to send our broks to our modules (if need) def send_broks_to_modules(self): t0 = time.time() nb_sent = 0 for mod in self.sched_daemon.modules_manager.get_external_instances(): logger.debug("Look for sending to module %s", mod.get_name()) q = mod.to_q to_send = [b for b in self.broks if not getattr(b, 'sent_to_sched_externals', False) and mod.want_brok(b)] q.put(to_send) nb_sent += len(to_send) # No more need to send them for b in self.broks: b.sent_to_sched_externals = True logger.debug("Time to send %s broks (after %d secs)", nb_sent, time.time() - t0) # special one for scheduler ; see Daemon.get_objects_from_from_queues() def get_objects_from_from_queues(self): ''' Same behavior than Daemon.get_objects_from_from_queues(). ''' return self.sched_daemon.get_objects_from_from_queues() # Gets internal metrics for both statsd and def get_internal_metrics(self): # Queues metrics = [ ('core.scheduler.mem', get_memory(), 'system'), ('core.scheduler.checks.queue', len(self.checks), 'queue'), ('core.scheduler.actions.queue', len(self.actions), 'queue'), ('core.scheduler.broks.queue', len(self.broks), 'queue'), ('core.scheduler.downtimes.queue', len(self.downtimes), 'queue'), ('core.scheduler.comments.queue', len(self.comments), 'queue'), ] # Queues for s in ("scheduled", "inpoller", "zombie", "timeout", "waitconsume", "waitdep", "havetoresolvedep"): count = len([c for c in self.checks.values() if c.status == s]) metrics.append(('core.scheduler.checks.%s' % s, count, 'queue')) # Latency latencies = [s.latency for s in self.services] lat_avg, lat_min, lat_max = nighty_five_percent(latencies) if lat_min: metrics.append(('core.scheduler.latency.min', lat_min, 'queue')) metrics.append(('core.scheduler.latency.avg', lat_avg, 'queue')) metrics.append(('core.scheduler.latency.max', lat_max, 'queue')) # Objects for t in ("contacts", "contactgroups", "hosts", "hostgroups", "services", "servicegroups", "commands"): count = len(getattr(self, t)) metrics.append(('core.scheduler.%s' % t, count, 'object')) return metrics # stats threads is asking us a main structure for stats def get_stats_struct(self): now = int(time.time()) res = self.sched_daemon.get_stats_struct() instance_name = getattr(self, "instance_name", "") res.update({'name': instance_name, 'type': 'scheduler'}) # Get a overview of the latencies with just # a 95 percentile view, but lso min/max values latencies = [s.latency for s in self.services] lat_avg, lat_min, lat_max = nighty_five_percent(latencies) res['latency'] = (0.0, 0.0, 0.0) if lat_avg: res['latency'] = {'avg': lat_avg, 'min': lat_min, 'max': lat_max} # Managed objects res["objects"] = {} for t in ("contacts", "contactgroups", "hosts", "hostgroups", "services", "servicegroups", "commands"): res["objects"][t] = len(getattr(self, t)) # metrics specific metrics = res['metrics'] for metric in self.get_internal_metrics(): name, value, mtype = metric metrics.append(name, value, now, mtype) all_commands = {} # compute some stats for elt in self.iter_hosts_and_services(): last_cmd = elt.last_check_command if not last_cmd: continue interval = elt.check_interval if interval == 0: interval = 1 cmd = os.path.split(last_cmd.split(' ', 1)[0])[1] u_time = elt.u_time s_time = elt.s_time old_u_time, old_s_time = all_commands.get(cmd, (0.0, 0.0)) old_u_time += u_time / interval old_s_time += s_time / interval all_commands[cmd] = (old_u_time, old_s_time) # now sort it p = [] for (c, e) in all_commands.iteritems(): u_time, s_time = e p.append({'cmd': c, 'u_time': u_time, 's_time': s_time}) def p_sort(e1, e2): if e1['u_time'] > e2['u_time']: return 1 if e1['u_time'] < e2['u_time']: return -1 return 0 p.sort(p_sort) # takethe first 10 ones for the put res['commands'] = p[:10] return res # Main function def run(self): # Then we see if we've got info in the retention file self.retention_load() # Finally start the external modules now we got our data self.hook_point('pre_scheduler_mod_start') self.sched_daemon.modules_manager.start_external_instances(late_start=True) # Ok, now all is initialized, we can make the initial broks logger.info("[%s] First scheduling launched", self.instance_name) self.schedule() logger.info("[%s] First scheduling done", self.instance_name) # Now connect to the passive satellites if needed for p_id in self.pollers: self.pynag_con_init(p_id, type='poller') for r_id in self.reactionners: self.pynag_con_init(r_id, type='reactionner') # Ticks are for recurrent function call like consume # del zombies etc ticks = 0 timeout = 1.0 # For the select gogogo = time.time() # We must reset it if we received a new conf from the Arbiter. # Otherwise, the stat check average won't be correct self.nb_check_received = 0 self.load_one_min = Load(initial_value=1) logger.debug("First loop at %d", time.time()) while self.must_run: # print "Loop" # Before answer to brokers, we send our broks to modules # Ok, go to send our broks to our external modules # self.send_broks_to_modules() elapsed, _, _ = self.sched_daemon.handleRequests(timeout) if elapsed: timeout -= elapsed if timeout > 0: continue self.load_one_min.update_load(self.sched_daemon.sleep_time) # load of the scheduler is the percert of time it is waiting l = min(100, 100.0 - self.load_one_min.get_load() * 100) logger.debug("Load: (sleep) %.2f (average: %.2f) -> %d%%", self.sched_daemon.sleep_time, self.load_one_min.get_load(), l) self.sched_daemon.sleep_time = 0.0 # Timeout or time over timeout = 1.0 ticks += 1 # Do recurrent works like schedule, consume # delete_zombie_checks for i in self.recurrent_works: (name, f, nb_ticks) = self.recurrent_works[i] # A 0 in the tick will just disable it if nb_ticks != 0: if ticks % nb_ticks == 0: # Call it and save the time spend in it _t = time.time() f() statsmgr.timing('loop.scheduler.%s' % name, time.time() - _t, 'perf') # Getting memory has a cost, do not cellect it if not needed # DBG: push actions to passives? self.push_actions_to_passives_satellites() self.get_actions_from_passives_satellites() # stats nb_scheduled = len([c for c in self.checks.values() if c.status == 'scheduled']) nb_inpoller = len([c for c in self.checks.values() if c.status == 'inpoller']) nb_zombies = len([c for c in self.checks.values() if c.status == 'zombie']) nb_notifications = len(self.actions) logger.debug("Checks: total %s, scheduled %s," "inpoller %s, zombies %s, notifications %s", len(self.checks), nb_scheduled, nb_inpoller, nb_zombies, nb_notifications) # Get a overview of the latencies with just # a 95 percentile view, but lso min/max values latencies = [s.latency for s in self.services] lat_avg, lat_min, lat_max = nighty_five_percent(latencies) if lat_avg is not None: logger.debug("Latency (avg/min/max): %.2f/%.2f/%.2f", lat_avg, lat_min, lat_max) # print "Notifications:", nb_notifications now = time.time() if self.nb_checks_send != 0: logger.debug("Nb checks/notifications/event send: %s", self.nb_checks_send) self.nb_checks_send = 0 if self.nb_broks_send != 0: logger.debug("Nb Broks send: %s", self.nb_broks_send) self.nb_broks_send = 0 time_elapsed = now - gogogo logger.debug("Check average = %d checks/s", int(self.nb_check_received / time_elapsed)) if self.need_dump_memory: self.sched_daemon.dump_memory() self.need_dump_memory = False if self.need_objects_dump: logger.debug('I need to dump my objects!') self.dump_objects() self.dump_config() self.need_objects_dump = False # Checks if memory consumption did not exceed allowed thresold self.sched_daemon.check_memory_usage() # WE must save the retention at the quit BY OURSELF # because our daemon will not be able to do it for us self.update_retention_file(True)
codeparrot/github-code-clean
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-short-docstring-punctuation """Sparse Tensor Representation. See the [Sparse Ops](https://tensorflow.org/api_guides/python/sparse_ops) guide. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numbers import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_sparse_ops from tensorflow.python.ops import math_ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_sparse_ops import * # pylint: enable=wildcard-import from tensorflow.python.util import compat from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export def _convert_to_sparse_tensor(sp_input): """Convert `sp_input` to `SparseTensor` and return it. Args: sp_input: `SparseTensor` or `SparseTensorValue`. Returns: `sp_input` converted to `SparseTensor`. Raises: ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`. """ if isinstance(sp_input, sparse_tensor.SparseTensorValue): return sparse_tensor.SparseTensor.from_value(sp_input) if not isinstance(sp_input, sparse_tensor.SparseTensor): raise TypeError("Input must be a SparseTensor.") return sp_input def _convert_to_sparse_tensors(sp_inputs): """Convert `sp_inputs` to `SparseTensor` objects and return them. Args: sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue` objects. Returns: `sp_inputs` converted to `SparseTensor` objects. Raises: ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor `SparseTensorValue`. """ if isinstance(sp_inputs, list): return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs] if isinstance(sp_inputs, tuple): return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs) raise TypeError("Inputs must be a list or tuple.") def _make_int64_tensor(value, name): if isinstance(value, compat.integral_types): return ops.convert_to_tensor(value, name=name, dtype=dtypes.int64) if not isinstance(value, ops.Tensor): raise TypeError("{} must be an integer value".format(name)) if value.dtype == dtypes.int64: return value return math_ops.cast(value, dtypes.int64) @tf_export("sparse.expand_dims") def sparse_expand_dims(sp_input, axis=None, name=None): """Inserts a dimension of 1 into a tensor's shape. Given a tensor `sp_input`, this operation inserts a dimension of 1 at the dimension index `axis` of `sp_input`'s shape. The dimension index `axis` starts at zero; if you specify a negative number for `axis` it is counted backwards from the end. Args: sp_input: A `SparseTensor`. axis: 0-D (scalar). Specifies the dimension index at which to expand the shape of `input`. Must be in the range `[-rank(sp_input) - 1, rank(sp_input)]`. name: The name of the output `SparseTensor`. Returns: A `SparseTensor` with the same data as `sp_input`, but its shape has an additional dimension of size 1 added. """ rank = sp_input.dense_shape.get_shape()[0] axis = -1 if axis is None else axis with ops.name_scope(name, default_name="expand_dims", values=[sp_input]): if isinstance(axis, compat.integral_types): axis = ops.convert_to_tensor(axis, name="axis", dtype=dtypes.int32) elif not isinstance(axis, ops.Tensor): raise TypeError("axis must be an integer value in range [-rank(sp_input)" " - 1, rank(sp_input)]") # Convert axis to a positive value if it is negative. axis = array_ops.where(axis >= 0, axis, axis + rank + 1) # Create the new column of indices for the sparse tensor by slicing # the indices and inserting a new column of indices for the new dimension. column_size = array_ops.shape(sp_input.indices)[0] new_index = array_ops.zeros([column_size, 1], dtype=dtypes.int64) indices_before = array_ops.slice(sp_input.indices, [0, 0], [-1, axis]) indices_after = array_ops.slice(sp_input.indices, [0, axis], [-1, -1]) indices = array_ops.concat( [indices_before, new_index, indices_after], axis=1) # Create the new dense shape by splicing the tensor [1] in the correct # dimension of the existing shape. shape_before = array_ops.slice(sp_input.dense_shape, [0], [axis]) shape_after = array_ops.slice(sp_input.dense_shape, [axis], [-1]) new_shape = ops.convert_to_tensor([1], name="new_shape", dtype=dtypes.int64) shape = array_ops.concat([shape_before, new_shape, shape_after], axis=0) # Create the output sparse tensor. return sparse_tensor.SparseTensor( indices=indices, values=sp_input.values, dense_shape=shape) @tf_export("sparse.eye") def sparse_eye(num_rows, num_columns=None, dtype=dtypes.float32, name=None): """Creates a two-dimensional sparse tensor with ones along the diagonal. Args: num_rows: Non-negative integer or `int32` scalar `tensor` giving the number of rows in the resulting matrix. num_columns: Optional non-negative integer or `int32` scalar `tensor` giving the number of columns in the resulting matrix. Defaults to `num_rows`. dtype: The type of element in the resulting `Tensor`. name: A name for this `Op`. Defaults to "eye". Returns: A `SparseTensor` of shape [num_rows, num_columns] with ones along the diagonal. """ with ops.name_scope(name, default_name="eye", values=[num_rows, num_columns]): num_rows = _make_int64_tensor(num_rows, "num_rows") num_columns = num_rows if num_columns is None else _make_int64_tensor( num_columns, "num_columns") # Create the sparse tensor. diag_size = math_ops.minimum(num_rows, num_columns) diag_range = math_ops.range(diag_size, dtype=dtypes.int64) return sparse_tensor.SparseTensor( indices=array_ops.stack([diag_range, diag_range], axis=1), values=array_ops.ones(diag_size, dtype=dtype), dense_shape=[num_rows, num_columns]) # pylint: disable=protected-access @tf_export("sparse.concat", "sparse_concat") @deprecation.deprecated_endpoints("sparse_concat") @deprecation.deprecated_args( None, "concat_dim is deprecated, use axis instead", "concat_dim") def sparse_concat(axis, sp_inputs, name=None, expand_nonconcat_dim=False, concat_dim=None): """Concatenates a list of `SparseTensor` along the specified dimension. Concatenation is with respect to the dense versions of each sparse input. It is assumed that each inputs is a `SparseTensor` whose elements are ordered along increasing dimension number. If expand_nonconcat_dim is False, all inputs' shapes must match, except for the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are allowed to vary among all inputs. The `indices`, `values`, and `shapes` lists must have the same length. If expand_nonconcat_dim is False, then the output shape is identical to the inputs', except along the concat dimension, where it is the sum of the inputs' sizes along that dimension. If expand_nonconcat_dim is True, then the output shape along the non-concat dimensions will be expand to be the largest among all inputs, and it is the sum of the inputs sizes along the concat dimension. The output elements will be resorted to preserve the sort order along increasing dimension number. This op runs in `O(M log M)` time, where `M` is the total number of non-empty values across all inputs. This is due to the need for an internal sort in order to concatenate efficiently across an arbitrary dimension. For example, if `axis = 1` and the inputs are sp_inputs[0]: shape = [2, 3] [0, 2]: "a" [1, 0]: "b" [1, 1]: "c" sp_inputs[1]: shape = [2, 4] [0, 1]: "d" [0, 2]: "e" then the output will be shape = [2, 7] [0, 2]: "a" [0, 4]: "d" [0, 5]: "e" [1, 0]: "b" [1, 1]: "c" Graphically this is equivalent to doing [ a] concat [ d e ] = [ a d e ] [b c ] [ ] [b c ] Another example, if 'axis = 1' and the inputs are sp_inputs[0]: shape = [3, 3] [0, 2]: "a" [1, 0]: "b" [2, 1]: "c" sp_inputs[1]: shape = [2, 4] [0, 1]: "d" [0, 2]: "e" if expand_nonconcat_dim = False, this will result in an error. But if expand_nonconcat_dim = True, this will result in: shape = [3, 7] [0, 2]: "a" [0, 4]: "d" [0, 5]: "e" [1, 0]: "b" [2, 1]: "c" Graphically this is equivalent to doing [ a] concat [ d e ] = [ a d e ] [b ] [ ] [b ] [ c ] [ c ] Args: axis: Dimension to concatenate along. Must be in range [-rank, rank), where rank is the number of dimensions in each input `SparseTensor`. sp_inputs: List of `SparseTensor` to concatenate. name: A name prefix for the returned tensors (optional). expand_nonconcat_dim: Whether to allow the expansion in the non-concat dimensions. Defaulted to False. concat_dim: The old (deprecated) name for axis. Returns: A `SparseTensor` with the concatenated output. Raises: TypeError: If `sp_inputs` is not a list of `SparseTensor`. """ axis = deprecation.deprecated_argument_lookup("axis", axis, "concat_dim", concat_dim) sp_inputs = _convert_to_sparse_tensors(sp_inputs) if len(sp_inputs) == 1: # Degenerate case of one tensor. return sp_inputs[0] inds = [sp_input.indices for sp_input in sp_inputs] vals = [sp_input.values for sp_input in sp_inputs] shapes = [sp_input.dense_shape for sp_input in sp_inputs] if expand_nonconcat_dim: max_shape = math_ops.reduce_max( array_ops.concat( [array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0) shapes = [ array_ops.concat([ max_shape[:axis], shape[-1:] if axis == -1 else shape[axis:axis + 1], [] if axis == -1 else max_shape[axis + 1:] ], 0) for shape in shapes ] output_ind, output_val, output_shape = ( gen_sparse_ops.sparse_concat(inds, vals, shapes, axis, name=name)) return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) @tf_export("sparse.add", v1=["sparse.add", "sparse_add"]) @deprecation.deprecated_endpoints("sparse_add") def sparse_add(a, b, thresh=0): """Adds two tensors, at least one of each is a `SparseTensor`. If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order of arguments does not matter. Use vanilla `tf.add()` for adding two dense `Tensor`s. The shapes of the two operands must match: broadcasting is not supported. The indices of any input `SparseTensor` are assumed ordered in standard lexicographic order. If this is not the case, before this step run `SparseReorder` to restore index ordering. If both arguments are sparse, we perform "clipping" as follows. By default, if two values sum to zero at some index, the output `SparseTensor` would still include that particular location in its index, storing a zero in the corresponding value slot. To override this, callers can specify `thresh`, indicating that if the sum has a magnitude strictly smaller than `thresh`, its corresponding value and index would then not be included. In particular, `thresh == 0.0` (default) means everything is kept and actual thresholding happens only for a positive value. For example, suppose the logical sum of two sparse operands is (densified): [ 2] [.1 0] [ 6 -.2] Then, * `thresh == 0` (the default): all 5 index/value pairs will be returned. * `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three index/value pairs will be returned. * `thresh == 0.21`: .1, 0, and -.2 will vanish. Args: a: The first operand; `SparseTensor` or `Tensor`. b: The second operand; `SparseTensor` or `Tensor`. At least one operand must be sparse. thresh: A 0-D `Tensor`. The magnitude threshold that determines if an output value/index pair takes space. Its dtype should match that of the values if they are real; if the latter are complex64/complex128, then the dtype should be float32/float64, correspondingly. Returns: A `SparseTensor` or a `Tensor`, representing the sum. Raises: TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead. """ sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue) if not any(isinstance(inp, sparse_classes) for inp in [a, b]): raise TypeError("At least one input should be SparseTensor; do you mean to" " use tf.add()?") if all(isinstance(inp, sparse_classes) for inp in [a, b]): a = _convert_to_sparse_tensor(a) b = _convert_to_sparse_tensor(b) thresh = ops.convert_to_tensor( thresh, dtype=a.values.dtype.real_dtype.base_dtype, name="thresh") output_ind, output_val, output_shape = ( gen_sparse_ops.sparse_add(a.indices, a.values, a.dense_shape, b.indices, b.values, b.dense_shape, thresh)) # Attempt to get output_shape statically. a.get_shape().assert_is_compatible_with(b.get_shape()) static_shape = array_ops.broadcast_static_shape(a.get_shape(), b.get_shape()) if static_shape.is_fully_defined(): output_shape = static_shape.as_list() return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) else: # swap to make `a` the SparseTensor. if isinstance(b, sparse_classes): a, b = b, a return gen_sparse_ops.sparse_tensor_dense_add(a.indices, a.values, a.dense_shape, b) @tf_export("sparse.cross") def sparse_cross(inputs, name=None): """Generates sparse cross from a list of sparse and dense tensors. For example, if the inputs are * inputs[0]: SparseTensor with shape = [2, 2] [0, 0]: "a" [1, 0]: "b" [1, 1]: "c" * inputs[1]: SparseTensor with shape = [2, 1] [0, 0]: "d" [1, 0]: "e" * inputs[2]: Tensor [["f"], ["g"]] then the output will be: shape = [2, 2] [0, 0]: "a_X_d_X_f" [1, 0]: "b_X_e_X_g" [1, 1]: "c_X_e_X_g" Args: inputs: An iterable of `Tensor` or `SparseTensor`. name: Optional name for the op. Returns: A `SparseTensor` of type `string`. """ return _sparse_cross_internal(inputs=inputs, hashed_output=False, name=name) _sparse_cross = sparse_cross @tf_export("sparse.cross_hashed") def sparse_cross_hashed(inputs, num_buckets=0, hash_key=None, name=None): """Generates hashed sparse cross from a list of sparse and dense tensors. For example, if the inputs are * inputs[0]: SparseTensor with shape = [2, 2] [0, 0]: "a" [1, 0]: "b" [1, 1]: "c" * inputs[1]: SparseTensor with shape = [2, 1] [0, 0]: "d" [1, 0]: "e" * inputs[2]: Tensor [["f"], ["g"]] then the output will be: shape = [2, 2] [0, 0]: FingerprintCat64( Fingerprint64("f"), FingerprintCat64( Fingerprint64("d"), Fingerprint64("a"))) [1, 0]: FingerprintCat64( Fingerprint64("g"), FingerprintCat64( Fingerprint64("e"), Fingerprint64("b"))) [1, 1]: FingerprintCat64( Fingerprint64("g"), FingerprintCat64( Fingerprint64("e"), Fingerprint64("c"))) Args: inputs: An iterable of `Tensor` or `SparseTensor`. num_buckets: An `int` that is `>= 0`. output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. hash_key: Integer hash_key that will be used by the `FingerprintCat64` function. If not given, will use a default key. name: Optional name for the op. Returns: A `SparseTensor` of type `int64`. """ return _sparse_cross_internal( inputs=inputs, hashed_output=True, num_buckets=num_buckets, hash_key=hash_key, name=name) _sparse_cross_hashed = sparse_cross_hashed _DEFAULT_HASH_KEY = 0xDECAFCAFFE def _sparse_cross_internal(inputs, hashed_output=False, num_buckets=0, hash_key=None, name=None): """See gen_sparse_ops.sparse_cross.""" if not isinstance(inputs, list): raise TypeError("Inputs must be a list") if not all( isinstance(i, sparse_tensor.SparseTensor) or isinstance(i, ops.Tensor) for i in inputs): raise TypeError("All inputs must be SparseTensors") sparse_inputs = [ i for i in inputs if isinstance(i, sparse_tensor.SparseTensor) ] dense_inputs = [ i for i in inputs if not isinstance(i, sparse_tensor.SparseTensor) ] indices = [sp_input.indices for sp_input in sparse_inputs] values = [sp_input.values for sp_input in sparse_inputs] shapes = [sp_input.dense_shape for sp_input in sparse_inputs] out_type = dtypes.int64 if hashed_output else dtypes.string internal_type = dtypes.string for i in range(len(values)): if values[i].dtype != dtypes.string: values[i] = math_ops.to_int64(values[i]) internal_type = dtypes.int64 for i in range(len(dense_inputs)): if dense_inputs[i].dtype != dtypes.string: dense_inputs[i] = math_ops.to_int64(dense_inputs[i]) internal_type = dtypes.int64 indices_out, values_out, shape_out = gen_sparse_ops.sparse_cross( indices=indices, values=values, shapes=shapes, dense_inputs=dense_inputs, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key or _DEFAULT_HASH_KEY, out_type=out_type, internal_type=internal_type, name=name) return sparse_tensor.SparseTensor(indices_out, values_out, shape_out) def sparse_dense_cwise_add(sp_t, dense_t): """Adds up a SparseTensor and a dense Tensor, using these special rules: (1) Broadcasts the dense side to have the same shape as the sparse side, if eligible; (2) Then, only the dense values pointed to by the indices of the SparseTensor participate in the cwise addition. By the rules, the result is a logical SparseTensor with exactly the same indices and shape, but possibly with different non-zero values. The output of this Op is the resultant non-zero values. Args: sp_t: the SparseTensor operand. dense_t: the dense Tensor operand; must have the same dtype and a broadcast-compatible shape as `sp_t`. Returns: output: the SparseTensor output. """ result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values, sp_t.dense_shape, dense_t) return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape) @tf_export("sparse.reorder", v1=["sparse.reorder", "sparse_reorder"]) @deprecation.deprecated_endpoints("sparse_reorder") def sparse_reorder(sp_input, name=None): """Reorders a `SparseTensor` into the canonical, row-major ordering. Note that by convention, all sparse ops preserve the canonical ordering along increasing dimension number. The only time ordering can be violated is during manual manipulation of the indices and values to add entries. Reordering does not affect the shape of the `SparseTensor`. For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`: [0, 3]: b [0, 1]: a [3, 1]: d [2, 0]: c then the output will be a `SparseTensor` of shape `[4, 5]` and `indices` / `values`: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d Args: sp_input: The input `SparseTensor`. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` with the same shape and non-empty values, but in canonical ordering. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) reordered_ind, reordered_val = ( gen_sparse_ops.sparse_reorder( sp_input.indices, sp_input.values, sp_input.dense_shape, name=name)) if sp_input.get_shape().is_fully_defined(): dense_shape = sp_input.get_shape().as_list() else: dense_shape = array_ops.identity(sp_input.dense_shape) return sparse_tensor.SparseTensor(reordered_ind, reordered_val, dense_shape) @tf_export("sparse.reshape", v1=["sparse.reshape", "sparse_reshape"]) @deprecation.deprecated_endpoints("sparse_reshape") def sparse_reshape(sp_input, shape, name=None): """Reshapes a `SparseTensor` to represent values in a new dense shape. This operation has the same semantics as `reshape` on the represented dense tensor. The indices of non-empty values in `sp_input` are recomputed based on the new dense shape, and a new `SparseTensor` is returned containing the new indices and new shape. The order of non-empty values in `sp_input` is unchanged. If one component of `shape` is the special value -1, the size of that dimension is computed so that the total dense size remains constant. At most one component of `shape` can be -1. The number of dense elements implied by `shape` must be the same as the number of dense elements originally represented by `sp_input`. For example, if `sp_input` has shape `[2, 3, 6]` and `indices` / `values`: [0, 0, 0]: a [0, 0, 1]: b [0, 1, 0]: c [1, 0, 0]: d [1, 2, 3]: e and `shape` is `[9, -1]`, then the output will be a `SparseTensor` of shape `[9, 4]` and `indices` / `values`: [0, 0]: a [0, 1]: b [1, 2]: c [4, 2]: d [8, 1]: e Args: sp_input: The input `SparseTensor`. shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the represented `SparseTensor`. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` with the same non-empty values but with indices calculated by the new dense shape. Raises: TypeError: If `sp_input` is not a `SparseTensor`. ValueError: If argument `shape` requests a `SparseTensor` with a different number of elements than `sp_input`. ValueError: If `shape` has more than one inferred (== -1) dimension. """ sp_input = _convert_to_sparse_tensor(sp_input) shape = math_ops.cast(shape, dtype=dtypes.int64) with ops.name_scope(name, "SparseReshape", [sp_input]) as name: reshaped_ind, reshaped_shape = gen_sparse_ops.sparse_reshape( sp_input.indices, sp_input.dense_shape, shape, name=name) reshaped_shape_const = tensor_util.constant_value(shape) if (reshaped_shape_const is not None and sp_input.get_shape().is_fully_defined()): num_implied = sum((dim == -1) for dim in reshaped_shape_const) if num_implied > 1: raise ValueError("At most one dimension can be inferred (-1). Found: %s" % reshaped_shape_const) original_reshaped_shape = list(reshaped_shape_const) # Copy. in_shape_size = np.prod(sp_input.get_shape().as_list()) if num_implied: implied_idx = original_reshaped_shape.index(-1) non_implied_idx = ( original_reshaped_shape[:implied_idx] + original_reshaped_shape[implied_idx + 1:]) reshaped_shape_const[implied_idx] = ( in_shape_size // np.prod(non_implied_idx)) reshaped_size = np.prod(reshaped_shape_const) if reshaped_size != in_shape_size: raise ValueError("Cannot reshape a tensor with %d elements to shape %s " "(%d elements)." % (in_shape_size, original_reshaped_shape, reshaped_size)) reshaped_shape = reshaped_shape_const return sparse_tensor.SparseTensor(reshaped_ind, array_ops.identity(sp_input.values), reshaped_shape) # TODO(aselle): Remove keyword required once for 1.0 final class KeywordRequired(object): def __repr__(self): # This is needed to make documentation without fully qualified module paths return "KeywordRequired()" @tf_export("sparse.split", "sparse_split") @deprecation.deprecated_endpoints("sparse_split") @deprecation.deprecated_args( None, "split_dim is deprecated, use axis instead", "split_dim") def sparse_split(keyword_required=KeywordRequired(), sp_input=None, num_split=None, axis=None, name=None, split_dim=None): """Split a `SparseTensor` into `num_split` tensors along `axis`. If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split` each slice starting from 0:`shape[axis] % num_split` gets extra one dimension. For example, if `axis = 1` and `num_split = 2` and the input is: input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: output_tensor[0] = [ a ] [b c ] output_tensor[1] = [ d e ] [ ] Args: keyword_required: Python 2 standin for * (temporary for argument reorder) sp_input: The `SparseTensor` to split. num_split: A Python integer. The number of ways to split. axis: A 0-D `int32` `Tensor`. The dimension along which to split. name: A name for the operation (optional). split_dim: Deprecated old name for axis. Returns: `num_split` `SparseTensor` objects resulting from splitting `value`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. ValueError: If the deprecated `split_dim` and `axis` are both non None. """ if not isinstance(keyword_required, KeywordRequired): raise ValueError("Keyword arguments are required for this function.") if sp_input is None: raise ValueError("sp_input is required") if num_split is None: raise ValueError("num_split is required") if axis is None: raise ValueError("axis is required") axis = deprecation.deprecated_argument_lookup("axis", axis, "split_dim", split_dim) sp_input = _convert_to_sparse_tensor(sp_input) output_inds, output_vals, output_shapes = ( gen_sparse_ops.sparse_split( axis, sp_input.indices, sp_input.values, sp_input.dense_shape, num_split, name=name)) sparse_tensors = [] for i in range(0, num_split): sparse_tensors.append( sparse_tensor.SparseTensor(output_inds[i], output_vals[i], output_shapes[i])) return sparse_tensors @tf_export("sparse.slice", v1=["sparse.slice", "sparse_slice"]) @deprecation.deprecated_endpoints("sparse_slice") def sparse_slice(sp_input, start, size, name=None): """Slice a `SparseTensor` based on the `start` and `size. For example, if the input is input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: sparse.slice([0, 0], [2, 4]) = shape = [2, 4] [ a ] [b c ] sparse.slice([0, 4], [2, 3]) = shape = [2, 3] [ d e ] [ ] Args: sp_input: The `SparseTensor` to split. start: 1-D. tensor represents the start of the slice. size: 1-D. tensor represents the size of the slice. name: A name for the operation (optional). Returns: A `SparseTensor` objects resulting from splicing. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) start = ops.convert_to_tensor(start, dtypes.int64) size = ops.convert_to_tensor(size, dtypes.int64) with ops.name_scope(name, "SparseSlice", [sp_input]) as name: output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice( sp_input.indices, sp_input.values, sp_input.dense_shape, start, size, name=name) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape) @tf_export("sparse_to_dense") @deprecation.deprecated( None, "Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.") def sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=0, validate_indices=True, name=None): """Converts a sparse representation into a dense tensor. Builds an array `dense` with shape `output_shape` such that ```python # If sparse_indices is scalar dense[i] = (i == sparse_indices ? sparse_values : default_value) # If sparse_indices is a vector, then for each i dense[sparse_indices[i]] = sparse_values[i] # If sparse_indices is an n by d matrix, then for each i in [0, n) dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] ``` All other values in `dense` are set to `default_value`. If `sparse_values` is a scalar, all sparse indices are set to this single value. Indices should be sorted in lexicographic order, and indices must not contain any repeats. If `validate_indices` is True, these properties are checked during execution. Args: sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`. `sparse_indices[i]` contains the complete index where `sparse_values[i]` will be placed. output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape of the dense output tensor. sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of `sparse_indices`, or a scalar value to be used for all sparse indices. default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value to set for indices not specified in `sparse_indices`. Defaults to zero. validate_indices: A boolean value. If True, indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name for the operation (optional). Returns: Dense `Tensor` of shape `output_shape`. Has the same type as `sparse_values`. """ return gen_sparse_ops.sparse_to_dense( sparse_indices, output_shape, sparse_values, default_value=default_value, validate_indices=validate_indices, name=name) @tf_export("sparse.reduce_max", "sparse_reduce_max") @deprecation.deprecated_endpoints("sparse_reduce_max") @deprecation.deprecated_args( None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def sparse_reduce_max(sp_input, axis=None, keepdims=None, reduction_axes=None, keep_dims=None): """Computes the max of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` instead of a sparse one. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, similar to the indexing rules in Python. The values not defined in `sp_input` don't participate in the reduce max, as opposed to be implicitly assumed 0 -- hence it can return negative values for sparse `reduction_axes`. But, in case there are no values in `reduction_axes`, it will reduce to 0. See second example below. For example: ```python # 'x' represents [[1, ?, 2] # [?, 3, ?]] # where ? is implicitly-zero. tf.sparse.reduce_max(x) ==> 3 tf.sparse.reduce_max(x, 0) ==> [1, 3, 2] tf.sparse.reduce_max(x, 1) ==> [2, 3] # Can also use -1 as the axis. tf.sparse.reduce_max(x, 1, keepdims=True) ==> [[2], [3]] tf.sparse.reduce_max(x, [0, 1]) ==> 3 # 'y' represents [[-7, ?] # [ 4, 3] # [ ?, ?] tf.sparse.reduce_max(x, 1) ==> [-7, 4, 0] ``` Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced Tensor. """ keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) if keepdims is None: keepdims = False return gen_sparse_ops.sparse_reduce_max( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis, reduction_axes), keepdims) @tf_export("sparse.reduce_max_sparse", "sparse_reduce_max_sparse") @deprecation.deprecated_endpoints("sparse_reduce_max_sparse") @deprecation.deprecated_args( None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def sparse_reduce_max_sparse(sp_input, axis=None, keepdims=None, reduction_axes=None, keep_dims=None): """Computes the max of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_max()`. In contrast to SparseReduceSum, this Op returns a SparseTensor. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python. Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced SparseTensor. """ keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) if keepdims is None: keepdims = False output_ind, output_val, output_shape = ( gen_sparse_ops.sparse_reduce_max_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis, reduction_axes), keepdims)) return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) @tf_export("sparse.reduce_sum", "sparse_reduce_sum") @deprecation.deprecated_endpoints("sparse_reduce_sum") @deprecation.deprecated_args( None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def sparse_reduce_sum(sp_input, axis=None, keepdims=None, reduction_axes=None, keep_dims=None): """Computes the sum of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` instead of a sparse one. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, similar to the indexing rules in Python. For example: ```python # 'x' represents [[1, ?, 1] # [?, 1, ?]] # where ? is implicitly-zero. tf.sparse.reduce_sum(x) ==> 3 tf.sparse.reduce_sum(x, 0) ==> [1, 1, 1] tf.sparse.reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis. tf.sparse.reduce_sum(x, 1, keepdims=True) ==> [[2], [1]] tf.sparse.reduce_sum(x, [0, 1]) ==> 3 ``` Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced Tensor. """ keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) if keepdims is None: keepdims = False return gen_sparse_ops.sparse_reduce_sum( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis, reduction_axes), keepdims) @tf_export("sparse.reduce_sum_sparse", "sparse_reduce_sum_sparse") @deprecation.deprecated_endpoints("sparse_reduce_sum_sparse") @deprecation.deprecated_args( None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def sparse_reduce_sum_sparse(sp_input, axis=None, keepdims=None, reduction_axes=None, keep_dims=None): """Computes the sum of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a SparseTensor. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python. Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced SparseTensor. """ keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) if keepdims is None: keepdims = False output_ind, output_val, output_shape = ( gen_sparse_ops.sparse_reduce_sum_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis, reduction_axes), keepdims)) return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) @tf_export("sparse.to_dense", v1=["sparse.to_dense", "sparse_tensor_to_dense"]) @deprecation.deprecated_endpoints("sparse_tensor_to_dense") def sparse_tensor_to_dense(sp_input, default_value=0, validate_indices=True, name=None): """Converts a `SparseTensor` into a dense tensor. This op is a convenience wrapper around `sparse_to_dense` for `SparseTensor`s. For example, if `sp_input` has shape `[3, 5]` and non-empty string values: [0, 1]: a [0, 3]: b [2, 0]: c and `default_value` is `x`, then the output will be a dense `[3, 5]` string tensor with values: [[x a x b x] [x x x x x] [c x x x x]] Indices must be without repeats. This is only tested if validate_indices is True. Args: sp_input: The input `SparseTensor`. default_value: Scalar value to set for indices not specified in `sp_input`. Defaults to zero. validate_indices: A boolean value. If `True`, indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name prefix for the returned tensors (optional). Returns: A dense tensor with shape `sp_input.dense_shape` and values specified by the non-empty values in `sp_input`. Indices not in `sp_input` are assigned `default_value`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return sparse_to_dense( sp_input.indices, sp_input.dense_shape, sp_input.values, default_value=default_value, validate_indices=validate_indices, name=name) @tf_export( "sparse.to_indicator", v1=["sparse.to_indicator", "sparse_to_indicator"]) @deprecation.deprecated_endpoints("sparse_to_indicator") def sparse_to_indicator(sp_input, vocab_size, name=None): """Converts a `SparseTensor` of ids into a dense bool indicator tensor. The last dimension of `sp_input.indices` is discarded and replaced with the values of `sp_input`. If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`, then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True and False elsewhere in `output`. For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values: [0, 0, 0]: 0 [0, 1, 0]: 10 [1, 0, 3]: 103 [1, 1, 2]: 150 [1, 1, 3]: 149 [1, 1, 4]: 150 [1, 2, 1]: 121 and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool tensor with False everywhere except at positions (0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150), (1, 2, 121). Note that repeats are allowed in the input SparseTensor. This op is useful for converting `SparseTensor`s into dense formats for compatibility with ops that expect dense tensors. The input `SparseTensor` must be in row-major order. Args: sp_input: A `SparseTensor` with `values` property of type `int32` or `int64`. vocab_size: A scalar int64 Tensor (or Python int) containing the new size of the last dimension, `all(0 <= sp_input.values < vocab_size)`. name: A name prefix for the returned tensors (optional) Returns: A dense bool indicator tensor representing the indices with specified value. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) with ops.name_scope(name, "SparseToIndicator", [sp_input]) as name: num_entries = array_ops.shape(sp_input.indices)[0] new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True) sp_values = sparse_tensor.SparseTensor(sp_input.indices, new_values, sp_input.dense_shape) sp_new = sparse_merge(sp_input, sp_values, vocab_size, name) # validate_indices may be False because we allow duplicates in new_indices: # repeated indices are allowed when creating an indicator matrix. return sparse_tensor_to_dense( sp_new, default_value=False, validate_indices=False, name=name) @tf_export("sparse.merge", v1=["sparse.merge", "sparse_merge"]) @deprecation.deprecated_endpoints("sparse_merge") def sparse_merge(sp_ids, sp_values, vocab_size, name=None, already_sorted=False): """Combines a batch of feature ids and values into a single `SparseTensor`. The most common use case for this function occurs when feature ids and their corresponding values are stored in `Example` protos on disk. `parse_example` will return a batch of ids and a batch of values, and this function joins them into a single logical `SparseTensor` for use in functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc. The `SparseTensor` returned by this function has the following properties: - `indices` is equivalent to `sp_ids.indices` with the last dimension discarded and replaced with `sp_ids.values`. - `values` is simply `sp_values.values`. - If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then `output.shape = [D0, D1, ..., Dn, vocab_size]`. For example, consider the following feature vectors: ```python vector1 = [-3, 0, 0, 0, 0, 0] vector2 = [ 0, 1, 0, 4, 1, 0] vector3 = [ 5, 0, 0, 9, 0, 0] ``` These might be stored sparsely in the following Example protos by storing only the feature ids (column number if the vectors are treated as a matrix) of the non-zero elements and the corresponding values: ```python examples = [Example(features={ "ids": Feature(int64_list=Int64List(value=[0])), "values": Feature(float_list=FloatList(value=[-3]))}), Example(features={ "ids": Feature(int64_list=Int64List(value=[1, 4, 3])), "values": Feature(float_list=FloatList(value=[1, 1, 4]))}), Example(features={ "ids": Feature(int64_list=Int64List(value=[0, 3])), "values": Feature(float_list=FloatList(value=[5, 9]))})] ``` The result of calling parse_example on these examples will produce a dictionary with entries for "ids" and "values". Passing those two objects to this function along with vocab_size=6, will produce a `SparseTensor` that sparsely represents all three instances. Namely, the `indices` property will contain the coordinates of the non-zero entries in the feature matrix (the first dimension is the row number in the matrix, i.e., the index within the batch, and the second dimension is the column number, i.e., the feature id); `values` will contain the actual values. `shape` will be the shape of the original matrix, i.e., (3, 6). For our example above, the output will be equal to: ```python SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]], values=[-3, 1, 4, 1, 5, 9], dense_shape=[3, 6]) ``` This method generalizes to higher-dimensions by simply providing a list for both the sp_ids as well as the vocab_size. In this case the resulting `SparseTensor` has the following properties: - `indices` is equivalent to `sp_ids[0].indices` with the last dimension discarded and concatenated with `sp_ids[0].values, sp_ids[1].values, ...`. - `values` is simply `sp_values.values`. - If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then `output.shape = [D0, D1, ..., Dn] + vocab_size`. Args: sp_ids: A single `SparseTensor` with `values` property of type `int32` or `int64` or a Python list of such `SparseTensor`s or a list thereof. sp_values: A `SparseTensor` of any type. vocab_size: A scalar `int64` Tensor (or Python int) containing the new size of the last dimension, `all(0 <= sp_ids.values < vocab_size)`. Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for all `i`. name: A name prefix for the returned tensors (optional) already_sorted: A boolean to specify whether the per-batch values in `sp_values` are already sorted. If so skip sorting, False by default (optional). Returns: A `SparseTensor` compactly representing a batch of feature ids and values, useful for passing to functions that expect such a `SparseTensor`. Raises: TypeError: If `sp_values` is not a `SparseTensor`. Or if `sp_ids` is neither a `SparseTensor` nor a list thereof. Or if `vocab_size` is not a `Tensor` or a Python int and `sp_ids` is a `SparseTensor`. Or if `vocab_size` is not a or list thereof and `sp_ids` is a list. ValueError: If `sp_ids` and `vocab_size` are lists of different lengths. """ if isinstance(sp_ids, sparse_tensor.SparseTensorValue) or isinstance( sp_ids, sparse_tensor.SparseTensor): sp_ids = [sp_ids] if not (isinstance(vocab_size, ops.Tensor) or isinstance(vocab_size, numbers.Integral)): raise TypeError("vocab_size has to be a Tensor or Python int. Found %s" % type(vocab_size)) vocab_size = [vocab_size] else: if not isinstance(sp_ids, collections.Iterable): raise TypeError("sp_ids has to be a SparseTensor or list thereof. " "Found %s" % type(sp_ids)) if not isinstance(vocab_size, collections.Iterable): raise TypeError("vocab_size has to be a list of Tensors or Python ints. " "Found %s" % type(vocab_size)) for dim in vocab_size: if not (isinstance(dim, ops.Tensor) or isinstance(dim, numbers.Integral)): raise TypeError( "vocab_size has to be a list of Tensors or Python ints. Found %s" % type(dim)) if len(sp_ids) != len(vocab_size): raise ValueError("sp_ids and vocab_size have to have equal lengths.") with ops.name_scope(name, "SparseMerge", [sp_ids, sp_values]): sp_ids = [_convert_to_sparse_tensor(sp_ids_dim) for sp_ids_dim in sp_ids] sp_values = _convert_to_sparse_tensor(sp_values) ids = [] for sp_ids_dim in sp_ids: ids_dim = sp_ids_dim.values if sp_ids_dim.dtype != dtypes.int64: ids_dim = math_ops.cast(ids_dim, dtypes.int64) ids += [array_ops.expand_dims(ids_dim, axis=1)] vocab_size = [math_ops.cast(x, dtypes.int64) for x in vocab_size] # Slice off the last dimension of indices, then tack on the ids indices_columns_to_preserve = sp_ids[0].indices[:, :-1] new_indices = array_ops.concat([indices_columns_to_preserve] + ids, 1) new_values = sp_values.values new_shape = array_ops.concat([sp_ids[0].dense_shape[:-1], vocab_size], 0) result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape) if already_sorted: return result sorted_result = sparse_reorder(result) return sparse_tensor.SparseTensor( sorted_result.indices, sorted_result.values, new_shape) @tf_export("sparse.retain", v1=["sparse.retain", "sparse_retain"]) @deprecation.deprecated_endpoints("sparse_retain") def sparse_retain(sp_input, to_retain): """Retains specified non-empty values within a `SparseTensor`. For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d and `to_retain = [True, False, False, True]`, then the output will be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values: [0, 1]: a [3, 1]: d Args: sp_input: The input `SparseTensor` with `N` non-empty elements. to_retain: A bool vector of length `N` with `M` true values. Returns: A `SparseTensor` with the same shape as the input and `M` non-empty elements corresponding to the true positions in `to_retain`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) to_retain = ops.convert_to_tensor(to_retain) # Shape checking, if shape is known at graph construction time retain_shape = to_retain.get_shape() retain_shape.assert_has_rank(1) sp_input.values.get_shape()[0].merge_with(retain_shape[0]) where_true = array_ops.reshape(array_ops.where(to_retain), [-1]) new_indices = array_ops.gather(sp_input.indices, where_true) new_values = array_ops.gather(sp_input.values, where_true) return sparse_tensor.SparseTensor(new_indices, new_values, array_ops.identity(sp_input.dense_shape)) @tf_export( "sparse.reset_shape", v1=["sparse.reset_shape", "sparse_reset_shape"]) @deprecation.deprecated_endpoints("sparse_reset_shape") def sparse_reset_shape(sp_input, new_shape=None): """Resets the shape of a `SparseTensor` with indices and values unchanged. If `new_shape` is None, returns a copy of `sp_input` with its shape reset to the tight bounding box of `sp_input`. This will be a shape consisting of all zeros if sp_input has no values. If `new_shape` is provided, then it must be larger or equal in all dimensions compared to the shape of `sp_input`. When this condition is met, the returned SparseTensor will have its shape reset to `new_shape` and its indices and values unchanged from that of `sp_input.` For example: Consider a `sp_input` with shape [2, 3, 5]: [0, 0, 1]: a [0, 1, 0]: b [0, 2, 2]: c [1, 0, 3]: d - It is an error to set `new_shape` as [3, 7] since this represents a rank-2 tensor while `sp_input` is rank-3. This is either a ValueError during graph construction (if both shapes are known) or an OpError during run time. - Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or equal in every dimension compared to the original shape [2, 3, 5]. - On the other hand, setting new_shape as [2, 3, 4] is also an error: The third dimension is smaller than the original shape [2, 3, 5] (and an `InvalidArgumentError` will be raised). - If `new_shape` is None, the returned SparseTensor will have a shape [2, 3, 4], which is the tight bounding box of `sp_input`. Args: sp_input: The input `SparseTensor`. new_shape: None or a vector representing the new shape for the returned `SparseTensor`. Returns: A `SparseTensor` indices and values unchanged from `input_sp`. Its shape is `new_shape` if that is set. Otherwise it is the tight bounding box of `input_sp` Raises: TypeError: If `sp_input` is not a `SparseTensor`. ValueError: If `new_shape` represents a tensor with a different rank from that of `sp_input` (if shapes are known when graph is constructed). ValueError: If `new_shape` is determined during graph build to have dimension sizes that are too small. OpError: - If `new_shape` has dimension sizes that are too small. - If shapes are not known during graph construction time, and during run time it is found out that the ranks do not match. """ sp_input = _convert_to_sparse_tensor(sp_input) in_indices = array_ops.identity(sp_input.indices) in_values = array_ops.identity(sp_input.values) in_shape = array_ops.identity(sp_input.dense_shape) if new_shape is None: dim_low_bound = math_ops.reduce_max(in_indices, axis=0) output_shape_tensor = math_ops.maximum( array_ops.constant(0, dtype=dtypes.int64), math_ops.add(dim_low_bound, array_ops.ones_like(in_shape))) else: output_shape_tensor = ops.convert_to_tensor(new_shape) output_shape_tensor.get_shape().assert_has_rank(1) output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64) # For cases when shape is known during graph construction, this catches the # error before the sparse_tensor.SparseTensor catches it. output_shape_tensor.get_shape()[0].merge_with(in_shape.get_shape()[0]) output_shape_tensor_const = tensor_util.constant_value(output_shape_tensor) # For cases where all shapes are known during graph construction if (output_shape_tensor_const is not None and sp_input.get_shape().is_fully_defined()): in_shape_const = np.array(sp_input.get_shape().as_list()) if not np.all(in_shape_const <= output_shape_tensor_const): raise ValueError( "Requested new_shape should have dimension sizes >= sp_input.shape." " Found new_shape (%s), sp_input.shape (%s)." % (in_shape_const, output_shape_tensor_const)) output_shape_tensor = output_shape_tensor_const else: # For cases where shape is not known during graph construction. output_shape_tensor = control_flow_ops.with_dependencies([ check_ops.assert_equal( array_ops.shape(in_shape), array_ops.shape(output_shape_tensor)) ], output_shape_tensor) output_shape_tensor = control_flow_ops.with_dependencies( [check_ops.assert_less_equal(in_shape, output_shape_tensor)], output_shape_tensor) return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor) @tf_export( "sparse.fill_empty_rows", v1=["sparse.fill_empty_rows", "sparse_fill_empty_rows"]) @deprecation.deprecated_endpoints("sparse_fill_empty_rows") def sparse_fill_empty_rows(sp_input, default_value, name=None): """Fills empty rows in the input 2-D `SparseTensor` with a default value. This op adds entries with the specified `default_value` at index `[row, 0]` for any row in the input that does not already have a value. For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: [0, 1]: a [0, 3]: b [1, 0]: default_value [2, 0]: c [3, 1]: d [4, 0]: default_value Note that the input may have empty columns at the end, with no effect on this op. The output `SparseTensor` will be in row-major order and will have the same shape as the input. This op also returns an indicator vector such that empty_row_indicator[i] = True iff row i was an empty row. Args: sp_input: A `SparseTensor` with shape `[N, M]`. default_value: The value to fill for empty rows, with the same type as `sp_input.` name: A name prefix for the returned tensors (optional) Returns: sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty rows filled in with `default_value`. empty_row_indicator: A bool vector of length `N` indicating whether each input row was empty. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) with ops.name_scope(name, "SparseFillEmptyRows", [sp_input]): default_value = ops.convert_to_tensor( default_value, dtype=sp_input.values.dtype) (output_indices, output_values, empty_row_indicator, unused_reverse_index_map) = gen_sparse_ops.sparse_fill_empty_rows( indices=sp_input.indices, values=sp_input.values, dense_shape=sp_input.dense_shape, default_value=default_value) return (sparse_tensor.SparseTensor( indices=output_indices, values=output_values, dense_shape=sp_input.dense_shape), empty_row_indicator) @tf_export( "io.serialize_sparse", v1=["io.serialize_sparse", "serialize_sparse"]) @deprecation.deprecated_endpoints("serialize_sparse") def serialize_sparse(sp_input, name=None, out_type=dtypes.string): """Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object. Args: sp_input: The input `SparseTensor`. name: A name prefix for the returned tensors (optional). out_type: The `dtype` to use for serialization. Returns: A 3-vector (1-D `Tensor`), with each column representing the serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops.serialize_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type) @tf_export( "io.serialize_many_sparse", v1=["io.serialize_many_sparse", "serialize_many_sparse"]) @deprecation.deprecated_endpoints("serialize_many_sparse") def serialize_many_sparse(sp_input, name=None, out_type=dtypes.string): """Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`. The `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The serialized `SparseTensor` objects going into each row of the output `Tensor` will have rank `R-1`. The minibatch size `N` is extracted from `sparse_shape[0]`. Args: sp_input: The input rank `R` `SparseTensor`. name: A name prefix for the returned tensors (optional). out_type: The `dtype` to use for serialization. Returns: A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column represents serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops.serialize_many_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type) def deserialize_sparse(serialized_sparse, dtype, rank=None, name=None): """Deserialize `SparseTensor` objects. The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where the last dimension stores serialized `SparseTensor` objects and the other N dimensions (N >= 0) correspond to a batch. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, its rank is the rank of the incoming `SparseTensor` objects plus N; the sparse tensors have been concatenated along new dimensions, one for each batch. The output `SparseTensor` object's shape values for the original dimensions are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. The new dimensions match the size of the batch. The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `SparseReorder` to restore index ordering. For example, if the serialized input is a `[2 x 3]` matrix representing two original `SparseTensor` objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized `SparseTensor` will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: serialized_sparse: The serialized `SparseTensor` objects. The last dimension must have 3 columns. dtype: The `dtype` of the serialized `SparseTensor` objects. rank: (optional) Python int, the rank of the `SparseTensor` objects. name: A name prefix for the returned tensors (optional). Returns: A `SparseTensor` representing the deserialized `SparseTensor` objects. """ output_indices, output_values, output_shape = ( gen_sparse_ops.deserialize_sparse(serialized_sparse, dtype, name=name)) # Feed rank data back in, if available output_indices.set_shape([None, rank]) output_shape.set_shape([rank]) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape) @tf_export( "io.deserialize_many_sparse", v1=["io.deserialize_many_sparse", "deserialize_many_sparse"]) @deprecation.deprecated_endpoints("deserialize_many_sparse") def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None): """Deserialize and concatenate `SparseTensors` from a serialized minibatch. The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where `N` is the minibatch size and the rows correspond to packed outputs of `serialize_sparse`. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, it has rank one higher than the ranks of the incoming `SparseTensor` objects (they have been concatenated along a new row dimension). The output `SparseTensor` object's shape values for all dimensions but the first are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. Its first shape value is `N`, the minibatch size. The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `sparse.reorder` to restore index ordering. For example, if the serialized input is a `[2, 3]` matrix representing two original `SparseTensor` objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized `SparseTensor` will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`. The serialized and packed `SparseTensor` objects. dtype: The `dtype` of the serialized `SparseTensor` objects. rank: (optional) Python int, the rank of the `SparseTensor` objects. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` representing the deserialized `SparseTensor`s, concatenated along the `SparseTensor`s' first dimension. All of the serialized `SparseTensor`s must have had the same rank and type. """ output_indices, output_values, output_shape = ( gen_sparse_ops.deserialize_many_sparse( serialized_sparse, dtype, name=name)) # Feed rank data back in, if available output_indices.set_shape([None, rank]) output_shape.set_shape([rank]) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape) @tf_export("sparse.matmul", v1=["sparse.matmul", "sparse_tensor_dense_matmul"]) @deprecation.deprecated_endpoints("sparse_tensor_dense_matmul") def sparse_tensor_dense_matmul(sp_a, b, adjoint_a=False, adjoint_b=False, name=None): # pylint: disable=line-too-long """Multiply SparseTensor (of rank 2) "A" by dense matrix "B". No validity checking is performed on the indices of `A`. However, the following input format is recommended for optimal behavior: * If `adjoint_a == false`: `A` should be sorted in lexicographically increasing order. Use `sparse.reorder` if you're not sure. * If `adjoint_a == true`: `A` should be sorted in order of increasing dimension 1 (i.e., "column major" order instead of "row major" order). Using `tf.nn.embedding_lookup_sparse` for sparse multiplication: It's not obvious but you can consider `embedding_lookup_sparse` as another sparse and dense multiplication. In some situations, you may prefer to use `embedding_lookup_sparse` even though you're not dealing with embeddings. There are two questions to ask in the decision process: Do you need gradients computed as sparse too? Is your sparse data represented as two `SparseTensor`s: ids and values? There is more explanation about data format below. If you answer any of these questions as yes, consider using `tf.nn.embedding_lookup_sparse`. Following explains differences between the expected SparseTensors: For example if dense form of your sparse data has shape `[3, 5]` and values: [[ a ] [b c] [ d ]] `SparseTensor` format expected by `sparse_tensor_dense_matmul`: `sp_a` (indices, values): [0, 1]: a [1, 0]: b [1, 4]: c [2, 2]: d `SparseTensor` format expected by `embedding_lookup_sparse`: `sp_ids` `sp_weights` [0, 0]: 1 [0, 0]: a [1, 0]: 0 [1, 0]: b [1, 1]: 4 [1, 1]: c [2, 0]: 2 [2, 0]: d Deciding when to use `sparse_tensor_dense_matmul` vs. `matmul`(a_is_sparse=True): There are a number of questions to ask in the decision process, including: * Will the SparseTensor `A` fit in memory if densified? * Is the column count of the product large (>> 1)? * Is the density of `A` larger than approximately 15%? If the answer to several of these questions is yes, consider converting the `SparseTensor` to a dense one and using `tf.matmul` with `a_is_sparse=True`. This operation tends to perform well when `A` is more sparse, if the column size of the product is small (e.g. matrix-vector multiplication), if `sp_a.dense_shape` takes on large values. Below is a rough speed comparison between `sparse_tensor_dense_matmul`, labeled 'sparse', and `matmul`(a_is_sparse=True), labeled 'dense'. For purposes of the comparison, the time spent converting from a `SparseTensor` to a dense `Tensor` is not included, so it is overly conservative with respect to the time ratio. Benchmark system: CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB GPU: NVidia Tesla k40c Compiled with: `-c opt --config=cuda --copt=-mavx` ``` tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks A sparse [m, k] with % nonzero values between 1% and 80% B dense [k, n] % nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense) 0.01 1 True 100 100 0.000221166 0.00010154 0.459112 0.01 1 True 100 1000 0.00033858 0.000109275 0.322745 0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385 0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669 0.01 1 False 100 100 0.000208085 0.000107603 0.51711 0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762 0.01 1 False 1000 100 0.000308222 0.00010345 0.335635 0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124 0.01 10 True 100 100 0.000218522 0.000105537 0.482958 0.01 10 True 100 1000 0.000340882 0.000111641 0.327506 0.01 10 True 1000 100 0.000315472 0.000117376 0.372064 0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128 0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354 0.01 10 False 100 1000 0.000330552 0.000112615 0.340687 0.01 10 False 1000 100 0.000341277 0.000114097 0.334324 0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549 0.01 25 True 100 100 0.000207806 0.000105977 0.509981 0.01 25 True 100 1000 0.000322879 0.00012921 0.400181 0.01 25 True 1000 100 0.00038262 0.00014158 0.370035 0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504 0.01 25 False 100 100 0.000209401 0.000104696 0.499979 0.01 25 False 100 1000 0.000321161 0.000130737 0.407076 0.01 25 False 1000 100 0.000377012 0.000136801 0.362856 0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413 0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833 0.2 1 True 100 1000 0.000348674 0.000147475 0.422959 0.2 1 True 1000 100 0.000336908 0.00010122 0.300439 0.2 1 True 1000 1000 0.001022 0.000203274 0.198898 0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746 0.2 1 False 100 1000 0.000356127 0.000146824 0.41228 0.2 1 False 1000 100 0.000322664 0.000100918 0.312764 0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648 0.2 10 True 100 100 0.000211692 0.000109903 0.519165 0.2 10 True 100 1000 0.000372819 0.000164321 0.440753 0.2 10 True 1000 100 0.000338651 0.000144806 0.427596 0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064 0.2 10 False 100 100 0.000215727 0.000110502 0.512231 0.2 10 False 100 1000 0.000375419 0.0001613 0.429653 0.2 10 False 1000 100 0.000336999 0.000145628 0.432132 0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618 0.2 25 True 100 100 0.000218705 0.000129913 0.594009 0.2 25 True 100 1000 0.000394794 0.00029428 0.745402 0.2 25 True 1000 100 0.000404483 0.0002693 0.665788 0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052 0.2 25 False 100 100 0.000221494 0.0001306 0.589632 0.2 25 False 100 1000 0.000396436 0.000297204 0.74969 0.2 25 False 1000 100 0.000409346 0.000270068 0.659754 0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046 0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836 0.5 1 True 100 1000 0.000415328 0.000223073 0.537101 0.5 1 True 1000 100 0.000358324 0.00011269 0.314492 0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851 0.5 1 False 100 100 0.000224196 0.000101423 0.452386 0.5 1 False 100 1000 0.000400987 0.000223286 0.556841 0.5 1 False 1000 100 0.000368825 0.00011224 0.304318 0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563 0.5 10 True 100 100 0.000222125 0.000112308 0.505608 0.5 10 True 100 1000 0.000461088 0.00032357 0.701753 0.5 10 True 1000 100 0.000394624 0.000225497 0.571422 0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801 0.5 10 False 100 100 0.000232083 0.000114978 0.495418 0.5 10 False 100 1000 0.000454574 0.000324632 0.714146 0.5 10 False 1000 100 0.000379097 0.000227768 0.600817 0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638 0.5 25 True 100 100 0.00023429 0.000151703 0.647501 0.5 25 True 100 1000 0.000497462 0.000598873 1.20386 0.5 25 True 1000 100 0.000460778 0.000557038 1.20891 0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845 0.5 25 False 100 100 0.000228981 0.000155334 0.678371 0.5 25 False 100 1000 0.000496139 0.000620789 1.25124 0.5 25 False 1000 100 0.00045473 0.000551528 1.21287 0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927 0.8 1 True 100 100 0.000222037 0.000105301 0.47425 0.8 1 True 100 1000 0.000410804 0.000329327 0.801664 0.8 1 True 1000 100 0.000349735 0.000131225 0.375212 0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633 0.8 1 False 100 100 0.000214079 0.000107486 0.502085 0.8 1 False 100 1000 0.000413746 0.000323244 0.781261 0.8 1 False 1000 100 0.000348983 0.000131983 0.378193 0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282 0.8 10 True 100 100 0.000229159 0.00011825 0.516017 0.8 10 True 100 1000 0.000498845 0.000532618 1.0677 0.8 10 True 1000 100 0.000383126 0.00029935 0.781336 0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689 0.8 10 False 100 100 0.000230783 0.000124958 0.541452 0.8 10 False 100 1000 0.000493393 0.000550654 1.11606 0.8 10 False 1000 100 0.000377167 0.000298581 0.791642 0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024 0.8 25 True 100 100 0.000233496 0.000175241 0.75051 0.8 25 True 100 1000 0.00055654 0.00102658 1.84458 0.8 25 True 1000 100 0.000463814 0.000783267 1.68875 0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132 0.8 25 False 100 100 0.000240243 0.000175047 0.728625 0.8 25 False 100 1000 0.000578102 0.00104499 1.80763 0.8 25 False 1000 100 0.000485113 0.000776849 1.60138 0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992 ``` Args: sp_a: SparseTensor A, of rank 2. b: A dense Matrix with the same dtype as sp_a. adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex, this is transpose(conj(A)). Otherwise it's transpose(A). adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex, this is transpose(conj(B)). Otherwise it's transpose(B). name: A name prefix for the returned tensors (optional) Returns: A dense matrix (pseudo-code in dense np.matrix notation): `A = A.H if adjoint_a else A` `B = B.H if adjoint_b else B` `return A*B` """ # pylint: enable=line-too-long sp_a = _convert_to_sparse_tensor(sp_a) with ops.name_scope(name, "SparseTensorDenseMatMul", [sp_a.indices, sp_a.values, b]) as name: b = ops.convert_to_tensor(b, name="b") return gen_sparse_ops.sparse_tensor_dense_mat_mul( a_indices=sp_a.indices, a_values=sp_a.values, a_shape=sp_a.dense_shape, b=b, adjoint_a=adjoint_a, adjoint_b=adjoint_b) @tf_export("sparse.softmax", v1=["sparse.softmax", "sparse_softmax"]) @deprecation.deprecated_endpoints("sparse_softmax") def sparse_softmax(sp_input, name=None): """Applies softmax to a batched N-D `SparseTensor`. The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` (where `N >= 2`), and with indices sorted in the canonical lexicographic order. This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost logical submatrix with shape `[B, C]`, but with the catch that *the implicitly zero elements do not participate*. Specifically, the algorithm is equivalent to: (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix with shape `[B, C]`, along the size-C dimension; (2) Masks out the original implicitly-zero locations; (3) Renormalizes the remaining elements. Hence, the `SparseTensor` result has exactly the same non-zero indices and shape. Example: ```python # First batch: # [? e.] # [1. ? ] # Second batch: # [e ? ] # [e e ] shape = [2, 2, 2] # 3-D SparseTensor values = np.asarray([[[0., np.e], [1., 0.]], [[np.e, 0.], [np.e, np.e]]]) indices = np.vstack(np.where(values)).astype(np.int64).T result = tf.sparse_softmax(tf.SparseTensor(indices, values, shape)) # ...returning a 3-D SparseTensor, equivalent to: # [? 1.] [1 ?] # [1. ? ] and [.5 .5] # where ? means implicitly zero. ``` Args: sp_input: N-D `SparseTensor`, where `N >= 2`. name: optional name of the operation. Returns: output: N-D `SparseTensor` representing the results. """ with ops.name_scope(name, "SparseSoftmax", [sp_input.indices, sp_input.values]) as name: out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values, sp_input.dense_shape) return sparse_tensor.SparseTensor(sp_input.indices, out_vals, sp_input.dense_shape) @tf_export("sparse.maximum", v1=["sparse.maximum", "sparse_maximum"]) @deprecation.deprecated_endpoints("sparse_maximum") def sparse_maximum(sp_a, sp_b, name=None): """Returns the element-wise max of two SparseTensors. Assumes the two SparseTensors have the same shape, i.e., no broadcasting. Example: ```python sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7]) sp_one = sparse_tensor.SparseTensor([[1]], [1], [7]) res = tf.sparse_maximum(sp_zero, sp_one).eval() # "res" should be equal to SparseTensor([[0], [1]], [0, 1], [7]). ``` Args: sp_a: a `SparseTensor` operand whose dtype is real, and indices lexicographically ordered. sp_b: the other `SparseTensor` operand with the same requirements (and the same shape). name: optional name of the operation. Returns: output: the output SparseTensor. """ with ops.name_scope( name, "SparseSparseMaximum", [sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name: out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum( sp_a.indices, sp_a.values, sp_a.dense_shape, sp_b.indices, sp_b.values, sp_b.dense_shape, name=name) return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape) @tf_export("sparse.minimum", v1=["sparse.minimum", "sparse_minimum"]) @deprecation.deprecated_endpoints("sparse_minimum") def sparse_minimum(sp_a, sp_b, name=None): """Returns the element-wise min of two SparseTensors. Assumes the two SparseTensors have the same shape, i.e., no broadcasting. Example: ```python sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7]) sp_one = sparse_tensor.SparseTensor([[1]], [1], [7]) res = tf.sparse_minimum(sp_zero, sp_one).eval() # "res" should be equal to SparseTensor([[0], [1]], [0, 0], [7]). ``` Args: sp_a: a `SparseTensor` operand whose dtype is real, and indices lexicographically ordered. sp_b: the other `SparseTensor` operand with the same requirements (and the same shape). name: optional name of the operation. Returns: output: the output SparseTensor. """ with ops.name_scope( name, "SparseSparseMinimum", [sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name: out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum( sp_a.indices, sp_a.values, sp_a.dense_shape, sp_b.indices, sp_b.values, sp_b.dense_shape, name=name) return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape) @tf_export("sparse.transpose", v1=["sparse.transpose", "sparse_transpose"]) @deprecation.deprecated_endpoints("sparse_transpose") def sparse_transpose(sp_input, perm=None, name=None): """Transposes a `SparseTensor` The returned tensor's dimension i will correspond to the input dimension `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is the rank of the input tensor. Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors. For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`: [0, 3]: b [0, 1]: a [3, 1]: d [2, 0]: c then the output will be a `SparseTensor` of shape `[5, 4]` and `indices` / `values`: [0, 2]: c [1, 0]: a [1, 3]: d [3, 0]: b Args: sp_input: The input `SparseTensor`. perm: A permutation of the dimensions of `sp_input`. name: A name prefix for the returned tensors (optional) Returns: A transposed `SparseTensor`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ with ops.name_scope(name, "SparseTranspose", [sp_input]) as name: if perm is None: rank = array_ops.rank(sp_input) perm = (rank - 1) - math_ops.range(0, rank, 1) indices = sp_input.indices transposed_indices = array_ops.transpose( array_ops.gather(array_ops.transpose(indices), perm)) perm_ = tensor_util.constant_value(ops.convert_to_tensor(perm)) if perm_ is not None and sp_input.get_shape().is_fully_defined(): old_shape_ = sp_input.get_shape().as_list() transposed_dense_shape = list(old_shape_) # Copy. for i, p in enumerate(perm_): transposed_dense_shape[i] = old_shape_[p] else: dense_shape = sp_input.dense_shape transposed_dense_shape = array_ops.gather(dense_shape, perm) transposed_st = sparse_tensor.SparseTensor( transposed_indices, sp_input.values, transposed_dense_shape) transposed_st = sparse_reorder(transposed_st) return transposed_st def _add_sparse_to_tensors_map(sp_input, container=None, shared_name=None, name=None): """Add a `SparseTensor` to a `SparseTensorsMap` and return its handle. Args: sp_input: The input `SparseTensor`. container: The container for the underlying `SparseTensorsMap` (optional). shared_name: The shared name for the underlying `SparseTensorsMap` (optional, defaults to the name of the newly created op). name: A name prefix for the returned tensors (optional). Returns: A string 1-vector (1D `Tensor`), with the single element representing the a unique handle to a `SparseTensor` stored by the `SparseTensorMap` underlying this op. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops.add_sparse_to_tensors_map( sp_input.indices, sp_input.values, sp_input.dense_shape, container=container, shared_name=shared_name, name=name) def _add_many_sparse_to_tensors_map(sp_input, container=None, shared_name=None, name=None): """Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. The `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The serialized `SparseTensor` objects going into each row of the output `Tensor` will have rank `R-1`. The minibatch size `N` is extracted from `sparse_shape[0]`. Args: sp_input: The input rank `R` `SparseTensor`. container: The container for the underlying `SparseTensorsMap` (optional). shared_name: The shared name for the underlying `SparseTensorsMap` (optional, defaults to the name of the newly created op). name: A name prefix for the returned tensors (optional). Returns: A string matrix (2-D `Tensor`) with `N` rows and `1` column. Each row represents a unique handle to a `SparseTensor` stored by the `SparseTensorMap` underlying this op. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops.add_many_sparse_to_tensors_map( sp_input.indices, sp_input.values, sp_input.dense_shape, container=container, shared_name=shared_name, name=name) def _take_many_sparse_from_tensors_map(sparse_map_op, sparse_handles, rank=None, name=None): """Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. The input `sparse_handles` must be a string matrix of shape `[N, 1]` where `N` is the minibatch size and the rows correspond to packed outputs of `add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, it has rank one higher than the ranks of the incoming `SparseTensor` objects (they have been concatenated along a new row dimension). The output `SparseTensor` object's shape values for all dimensions but the first are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. Its first shape value is `N`, the minibatch size. The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `sparse.reorder` to restore index ordering. For example, if the serialized input is a `[2, 3]` matrix representing two original `SparseTensor` objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized `SparseTensor` will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: sparse_map_op: The `Operation` that created the original handles. Usually this is, e.g., `add_sparse_to_tensors_map(...).op`. sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`. The serialized and packed `SparseTensor` objects. rank: (optional) Python int, the rank of the `SparseTensor` objects. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` representing the deserialized `SparseTensor`s, concatenated along the `SparseTensor`s' first dimension. All of the serialized `SparseTensor`s must have had the same rank and type. """ if not isinstance(sparse_map_op, ops.Operation): raise TypeError("sparse_map_op be an Operation") if sparse_map_op.type not in ("AddSparseToTensorsMap", "AddManySparseToTensorsMap"): raise TypeError( "sparse_map_op must be one of AddSparseToTensorsMap or " "AddSparseToTensorsMap. Instead, found `%s`." % sparse_map_op.type) with ops.colocate_with(sparse_map_op): shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name output_indices, output_values, output_shape = ( gen_sparse_ops.take_many_sparse_from_tensors_map( sparse_handles, dtype=sparse_map_op.get_attr("T"), container=sparse_map_op.get_attr("container"), shared_name=shared_name, name=name)) # Feed rank data back in, if available output_indices.set_shape([None, rank]) output_shape.set_shape([rank]) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
codeparrot/github-code-clean
"""Tests for UIAWrapper""" from __future__ import print_function from __future__ import unicode_literals import time import os import sys import collections import unittest import mock import six sys.path.append(".") from pywinauto.windows.application import Application # noqa: E402 from pywinauto.base_application import WindowSpecification # noqa: E402 from pywinauto.sysinfo import is_x64_Python, UIA_support # noqa: E402 from pywinauto.timings import Timings, wait_until # noqa: E402 from pywinauto.actionlogger import ActionLogger # noqa: E402 from pywinauto import Desktop from pywinauto import mouse # noqa: E402 from pywinauto import WindowNotFoundError # noqa: E402 if UIA_support: import comtypes import pywinauto.windows.uia_defines as uia_defs import pywinauto.controls.uia_controls as uia_ctls from pywinauto.controls.uiawrapper import UIAWrapper from pywinauto.windows.uia_element_info import UIAElementInfo wpf_samples_folder = os.path.join( os.path.dirname(__file__), r"..\..\apps\WPF_samples") if is_x64_Python(): wpf_samples_folder = os.path.join(wpf_samples_folder, 'x64') wpf_app_1 = os.path.join(wpf_samples_folder, u"WpfApplication1.exe") mfc_samples_folder = os.path.join( os.path.dirname(__file__), r"..\..\apps\MFC_samples") if is_x64_Python(): mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64') mfc_app_rebar_test = os.path.join(mfc_samples_folder, u"RebarTest.exe") winforms_folder = os.path.join( os.path.dirname(__file__), r"..\..\apps\WinForms_samples") if is_x64_Python(): winforms_folder = os.path.join(winforms_folder, 'x64') winfoms_app_grid = os.path.join(winforms_folder, u"DataGridView_TestApp.exe") if sys.version_info[:2] >= (3, 6): qt_python_folder = os.path.join( os.path.dirname(__file__), r"..\..\apps\Qt_Python_samples") qt_py_combobox_app = sys.executable + ' ' + os.path.join(qt_python_folder, u"qt5_combobox.py") if UIA_support: def _set_timings(): """Setup timings for UIA related tests""" Timings.defaults() Timings.window_find_timeout = 20 class UIAWrapperTests(unittest.TestCase): """Unit tests for the UIAWrapper class""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() mouse.move((-500, 500)) # remove the mouse from the screen to avoid side effects # start the application self.app = Application(backend='uia') self.app = self.app.start(wpf_app_1) self.dlg = self.app.WPFSampleApplication def test_get_active_uia(self): focused_element = self.dlg.get_active() self.assertTrue(type(focused_element) is UIAWrapper or issubclass(type(focused_element), UIAWrapper)) def tearDown(self): """Close the application after tests""" self.app.kill() def test_issue_296(self): """Test handling of disappeared descendants""" wrp = self.dlg.find() with mock.patch.object(wrp.element_info._element, 'FindAll') as mock_findall: mock_findall.side_effect = ValueError("Mocked value error") self.assertEqual([], wrp.descendants()) mock_findall.side_effect = comtypes.COMError(-2147220991, "Mocked COM error", ()) self.assertEqual([], wrp.descendants()) def test_issue_278(self): """Test that statement menu = app.MainWindow.Menu works for 'uia' backend""" menu_spec = self.dlg.Menu self.assertTrue(isinstance(menu_spec, WindowSpecification)) # Also check the app binding self.assertTrue(menu_spec.app, self.app) def test_find_nontop_ctl_by_class_name_and_title(self): """Test getting a non-top control by a class name and a title""" # Look up for a non-top button control with 'Apply' caption self.dlg.wait('ready') caption = 'Apply' wins = self.app.windows(top_level_only=False, class_name='Button', name=caption) # Verify the number of found wrappers self.assertEqual(len(wins), 1) # Verify the caption of the found wrapper self.assertEqual(wins[0].texts()[0], caption) def test_find_top_win_by_class_name_and_title(self): """Test getting a top window by a class name and a title""" # Since the top_level_only is True by default # we don't specify it as a criteria argument self.dlg.wait('ready') caption = 'WPF Sample Application' wins = self.app.windows(class_name='Window', name=caption) # Verify the number of found wrappers self.assertEqual(len(wins), 1) # Verify the caption of the found wrapper self.assertEqual(wins[0].texts()[0], caption) def test_class(self): """Test getting the classname of the dialog""" button = self.dlg.by(class_name="Button", name="OK").find() self.assertEqual(button.class_name(), "Button") def test_window_text(self): """Test getting the window Text of the dialog""" label = self.dlg.TestLabel.find() self.assertEqual(label.window_text(), u"TestLabel") self.assertEqual(label.can_be_label, True) def test_control_id(self): """Test getting control ID""" button = self.dlg.by(class_name="Button", name="OK").find() self.assertEqual(button.control_id(), None) def test_runtime_id(self): """Test getting runtime ID""" button = self.dlg.by(class_name="Button", name="OK").find() self.assertNotEqual(button.__hash__(), 0) orig = button.element_info._element.GetRuntimeId exception_err = comtypes.COMError(-2147220991, 'An event was unable to invoke any of the subscribers', ()) button.element_info._element.GetRuntimeId = mock.Mock(side_effect=exception_err) self.assertEqual(button.__hash__(), 0) button.element_info._element.GetRuntimeId = orig # restore the original method def test_automation_id(self): """Test getting automation ID""" alpha_toolbar = self.dlg.by(name="Alpha", control_type="ToolBar") button = alpha_toolbar.by(control_type="Button", auto_id="OverflowButton").find() self.assertEqual(button.automation_id(), "OverflowButton") def test_access_key(self): """Test find element by access key""" file_menu = self.dlg.by(access_key="Alt+F").find() self.assertEqual("MenuItem", file_menu.element_info.control_type) self.assertEqual("File", file_menu.element_info.name) def test_legacy_shortcut(self): """Test find element by keyboard shortcut value from legacy pattern""" file_menu = self.dlg.by(legacy_shortcut="Alt+F").find() self.assertEqual("MenuItem", file_menu.element_info.control_type) self.assertEqual("File", file_menu.element_info.name) def test_value(self): """Test find element by value""" edit = self.dlg.by(auto_id="edit1").find() edit.set_edit_text("Test string") edit_by_value = self.dlg.by(value="Test string").find() self.assertEqual("edit1", edit_by_value.element_info.auto_id) def test_legacy_value(self): """Test find element by value from legacy pattern""" edit = self.dlg.by(auto_id="edit1").find() edit.set_edit_text("Test string") edit_by_value = self.dlg.by(legacy_value="Test string").find() self.assertEqual("edit1", edit_by_value.element_info.auto_id) def test_legacy_action(self): """Test find element by default action name from legacy pattern""" combobox = self.dlg.by(legacy_action="Expand").find() self.assertEqual("ComboBox", combobox.element_info.control_type) self.assertEqual(2, combobox.item_count()) def test_legacy_descr(self): """Test find element by description from legacy pattern""" close_button = self.dlg.by(legacy_descr="Closes the window").find() self.assertEqual("Button", close_button.element_info.control_type) self.assertEqual("Close", close_button.element_info.legacy_name) def test_legacy_help_not_available(self): """Test return empty string if LegacyIAccessible.Help value is not available""" close_button = self.dlg.by(control_type="TitleBar").find() self.assertEqual("", close_button.element_info.legacy_help) def test_is_visible(self): """Test is_visible method of a control""" button = self.dlg.by(class_name="Button", name="OK").find() self.assertEqual(button.is_visible(), True) def test_is_enabled(self): """Test is_enabled method of a control""" button = self.dlg.by(class_name="Button", name="OK").find() self.assertEqual(button.is_enabled(), True) def test_process_id(self): """Test process_id method of a control""" button = self.dlg.by(class_name="Button", name="OK").find() self.assertEqual(button.process_id(), self.dlg.process_id()) self.assertNotEqual(button.process_id(), 0) def test_is_dialog(self): """Test is_dialog method of a control""" button = self.dlg.by(class_name="Button", name="OK").find() self.assertEqual(button.is_dialog(), False) self.assertEqual(self.dlg.is_dialog(), True) def test_move_window(self): """Test move_window without any parameters""" # move_window with default parameters prevRect = self.dlg.rectangle() self.dlg.move_window() self.assertEqual(prevRect, self.dlg.rectangle()) # move_window call for a not supported control button = self.dlg.by(class_name="Button", name="OK") self.assertRaises(AttributeError, button.move_window) # Make RECT stub to avoid import win32structures Rect = collections.namedtuple('Rect', 'left top right bottom') prev_rect = self.dlg.rectangle() new_rect = Rect._make([i + 5 for i in prev_rect]) self.dlg.move_window( new_rect.left, new_rect.top, new_rect.right - new_rect.left, new_rect.bottom - new_rect.top ) time.sleep(0.1) logger = ActionLogger() logger.log("prev_rect = %s", prev_rect) logger.log("new_rect = %s", new_rect) logger.log("self.dlg.rectangle() = %s", self.dlg.rectangle()) self.assertEqual(self.dlg.rectangle(), new_rect) self.dlg.move_window(prev_rect) self.assertEqual(self.dlg.rectangle(), prev_rect) def test_close(self): """Test close method of a control""" wrp = self.dlg.find() # mock a failure in get_elem_interface() method only for 'Window' param orig_get_elem_interface = uia_defs.get_elem_interface with mock.patch.object(uia_defs, 'get_elem_interface') as mock_get_iface: def side_effect(elm_info, ptrn_name): if ptrn_name == "Window": raise uia_defs.NoPatternInterfaceError() else: return orig_get_elem_interface(elm_info, ptrn_name) mock_get_iface.side_effect=side_effect # also mock a failure in type_keys() method with mock.patch.object(UIAWrapper, 'type_keys') as mock_type_keys: exception_err = comtypes.COMError(-2147220991, 'An event was unable to invoke any of the subscribers', ()) mock_type_keys.side_effect = exception_err self.assertRaises(WindowNotFoundError, self.dlg.close) self.dlg.close() self.assertEqual(self.dlg.exists(), False) def test_parent(self): """Test getting a parent of a control""" button = self.dlg.Alpha.find() self.assertEqual(button.parent(), self.dlg.find()) def test_top_level_parent(self): """Test getting a top-level parent of a control""" button = self.dlg.by(class_name="Button", name="OK").find() self.assertEqual(button.top_level_parent(), self.dlg.find()) def test_texts(self): """Test getting texts of a control""" self.assertEqual(self.dlg.texts(), ['WPF Sample Application']) def test_children(self): """Test getting children of a control""" button = self.dlg.by(class_name="Button", name="OK").find() self.assertEqual(len(button.children()), 1) self.assertEqual(button.children()[0].class_name(), "TextBlock") def test_children_generator(self): """Test iterating children of a control""" button = self.dlg.by(class_name="Button", name="OK").find() children = [child for child in button.iter_children()] self.assertEqual(len(children), 1) self.assertEqual(children[0].class_name(), "TextBlock") def test_descendants(self): """Test iterating descendants of a control""" toolbar = self.dlg.by(name="Alpha", control_type="ToolBar").find() descendants = toolbar.descendants() self.assertEqual(len(descendants), 7) def test_descendants_generator(self): toolbar = self.dlg.by(name="Alpha", control_type="ToolBar").find() descendants = [desc for desc in toolbar.iter_descendants()] self.assertSequenceEqual(toolbar.descendants(), descendants) def test_is_child(self): """Test is_child method of a control""" button = self.dlg.Alpha.find() self.assertEqual(button.is_child(self.dlg.find()), True) def test_equals(self): """Test controls comparisons""" button = self.dlg.by(class_name="Button", name="OK").find() self.assertNotEqual(button, self.dlg.find()) self.assertEqual(button, button.element_info) self.assertEqual(button, button) @unittest.skip("To be solved with issue #790") def test_scroll(self): """Test scroll""" # Check an exception on a non-scrollable control button = self.dlg.by(class_name="Button", name="OK").find() six.assertRaisesRegex(self, AttributeError, "not scrollable", button.scroll, "left", "page") # Check an exception on a control without horizontal scroll bar tab = self.dlg.Tree_and_List_Views.set_focus() listview = tab.children(class_name=u"ListView")[0] six.assertRaisesRegex(self, AttributeError, "not horizontally scrollable", listview.scroll, "right", "line") # Check exceptions on wrong arguments self.assertRaises(ValueError, listview.scroll, "bbbb", "line") self.assertRaises(ValueError, listview.scroll, "up", "aaaa") # Store a cell position cell = listview.cell(3, 0) orig_rect = cell.rectangle() self.assertEqual(orig_rect.left > 0, True) # Trigger a horizontal scroll bar on the control hdr = listview.get_header_control() hdr_itm = hdr.children()[1] trf = hdr_itm.iface_transform trf.resize(1000, 20) listview.scroll("right", "page", 2) self.assertEqual(cell.rectangle().left < 0, True) # Check an exception on a control without vertical scroll bar tab = self.dlg.ListBox_and_Grid.set_focus() datagrid = tab.children(class_name=u"DataGrid")[0] six.assertRaisesRegex(self, AttributeError, "not vertically scrollable", datagrid.scroll, "down", "page") # def testVerifyActionable(self): # self.assertRaises() # def testVerifyEnabled(self): # self.assertRaises() # def testVerifyVisible(self): # self.assertRaises() def test_is_keyboard_focusable(self): """Test is_keyboard focusable method of several controls""" edit = self.dlg.TestLabelEdit.find() label = self.dlg.TestLabel.find() button = self.dlg.by(class_name="Button", name="OK").find() self.assertEqual(button.is_keyboard_focusable(), True) self.assertEqual(edit.is_keyboard_focusable(), True) self.assertEqual(label.is_keyboard_focusable(), False) def test_set_focus(self): """Test setting a keyboard focus on a control""" edit = self.dlg.TestLabelEdit.find() edit.set_focus() self.assertEqual(edit.has_keyboard_focus(), True) def test_get_active_desktop_uia(self): focused_element = Desktop(backend="uia").get_active() self.assertTrue(type(focused_element) is UIAWrapper or issubclass(type(focused_element), UIAWrapper)) def test_type_keys(self): """Test sending key types to a control""" edit = self.dlg.TestLabelEdit.find() edit.type_keys("t") self.assertEqual(edit.window_text(), "t") edit.type_keys("e") self.assertEqual(edit.window_text(), "te") edit.type_keys("s") self.assertEqual(edit.window_text(), "tes") edit.type_keys("t") self.assertEqual(edit.window_text(), "test") edit.type_keys("T") self.assertEqual(edit.window_text(), "testT") edit.type_keys("y") self.assertEqual(edit.window_text(), "testTy") def test_no_pattern_interface_error(self): """Test a query interface exception handling""" button = self.dlg.by(class_name="Button", name="OK").find() elem = button.element_info.element self.assertRaises( uia_defs.NoPatternInterfaceError, uia_defs.get_elem_interface, elem, "Selection", ) def test_minimize_maximize(self): """Test window minimize/maximize operations""" wrp = self.dlg.minimize() self.dlg.wait_not('active') self.assertEqual(wrp.is_minimized(), True) wrp.maximize() self.dlg.wait('active') self.assertEqual(wrp.is_maximized(), True) wrp.minimize() self.dlg.wait_not('active') wrp.restore() self.dlg.wait('active') self.assertEqual(wrp.is_normal(), True) def test_get_properties(self): """Test getting writeble properties of a control""" uia_props = set(['class_name', 'friendly_class_name', 'texts', 'control_id', 'rectangle', 'is_visible', 'is_enabled', 'control_count', 'is_keyboard_focusable', 'has_keyboard_focus', 'selection_indices', 'automation_id', ]) edit = self.dlg.TestLabelEdit.find() props = set(edit.get_properties().keys()) self.assertEqual(props, uia_props) # def test_draw_outline(self): # """Test the outline was drawn.""" # # not sure why, but this extra call makes the test stable # self.dlg.draw_outline() # # # outline control # button = self.dlg.OK.wrapper_object() # button.draw_outline() # img1 = button.capture_as_image() # self.assertEqual(img1.getpixel((0, 0)), (0, 255, 0)) # green # # # outline window # self.dlg.draw_outline(colour="blue") # img2 = self.dlg.capture_as_image() # self.assertEqual(img2.getpixel((0, 0)), (0, 0, 255)) # blue def test_get_legacy_properties(self): """Test getting legacy properties of a control""" expected_properties = {'Value': '', 'DefaultAction': 'Press', 'Description': '', 'Name': 'OK', 'Help': '', 'ChildId': 0, 'KeyboardShortcut': '', 'State': 1048576, 'Role': 43} button_wrp = self.dlg.by(class_name="Button", name="OK").find() actual_properties = button_wrp.legacy_properties() self.assertEqual(actual_properties, expected_properties) def test_capture_as_image_multi_monitor(self): with mock.patch('win32api.EnumDisplayMonitors') as mon_device: mon_device.return_value = (1, 2) rect = self.dlg.rectangle() expected = (rect.width(), rect.height()) result = self.dlg.capture_as_image().size self.assertEqual(expected, result) def test_set_value(self): """Test for UIAWrapper.set_value""" edit = self.dlg.by(control_type='Edit', auto_id='edit1').find() self.assertEqual(edit.get_value(), '') edit.set_value('test') self.assertEqual(edit.get_value(), 'test') class UIAWrapperRawViewWalkerTests(UIAWrapperTests): """Unit tests for the UIAWrapper class with enabled RawViewWalker""" def setUp(self): self.default_use_raw_view_walker = UIAElementInfo.use_raw_view_walker UIAElementInfo.use_raw_view_walker = True super(UIAWrapperRawViewWalkerTests, self).setUp() def tearDown(self): UIAElementInfo.use_raw_view_walker = self.default_use_raw_view_walker super(UIAWrapperRawViewWalkerTests, self).tearDown() def test_issue_296(self): """Test handling of disappeared descendants""" wrp = self.dlg.wrapper_object() with mock.patch.object(uia_defs.IUIA().raw_tree_walker, 'GetFirstChildElement') as mock_get_first_child: mock_get_first_child.side_effect = ValueError("Mocked value error") self.assertEqual([], wrp.descendants()) mock_get_first_child.side_effect = comtypes.COMError(-2147220991, "Mocked COM error", ()) self.assertEqual([], wrp.descendants()) class UIAWrapperMouseTests(unittest.TestCase): """Unit tests for mouse actions of the UIAWrapper class""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() self.app = Application(backend='uia') self.app = self.app.start(wpf_app_1) dlg = self.app.WPFSampleApplication self.button = dlg.by(class_name="Button", name="OK").find() self.label = dlg.by(class_name="Text", name="TestLabel").find() self.app.wait_cpu_usage_lower(threshold=1.5, timeout=30, usage_interval=1.0) def tearDown(self): """Close the application after tests""" self.app.kill() # def test_click(self): # pass def test_click_input(self): """Test click_input method of a control""" self.button.click_input() self.assertEqual(self.label.window_text(), "LeftClick") # def test_double_click(self): # pass def test_double_click_input(self): """Test double_click_input method of a control""" self.button.double_click_input() self.assertEqual(self.label.window_text(), "DoubleClick") # def test_right_click(self): # pass def test_right_click_input(self): """Test right_click_input method of a control""" self.button.right_click_input() self.assertEqual(self.label.window_text(), "RightClick") # def test_press_move_release(self): # pass class UiaControlsTests(unittest.TestCase): """Unit tests for the UIA control wrappers""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() # start the application app = Application(backend='uia') self.app = app.start(wpf_app_1) self.dlg = self.app.WPFSampleApplication def tearDown(self): """Close the application after tests""" self.app.kill() def test_pretty_print(self): """Test __str__ and __repr__ methods for UIA based controls""" if six.PY3: assert_regex = self.assertRegex else: assert_regex = self.assertRegexpMatches wrp = self.dlg.OK.find() assert_regex(wrp.__str__(), "^uia_controls\.ButtonWrapper - 'OK', Button$") assert_regex(wrp.__repr__(), "^<uia_controls\.ButtonWrapper - 'OK', Button, [0-9-]+>$") wrp = self.dlg.CheckBox.find() assert_regex(wrp.__str__(), "^uia_controls\.ButtonWrapper - 'CheckBox', CheckBox$", ) assert_regex(wrp.__repr__(), "^<uia_controls\.ButtonWrapper - 'CheckBox', CheckBox, [0-9-]+>$", ) wrp = self.dlg.by(class_name="TextBox").find() assert_regex(wrp.__str__(), "^uia_controls\.EditWrapper - '', Edit$") assert_regex(wrp.__repr__(), "^<uia_controls\.EditWrapper - '', Edit, [0-9-]+>$") assert_regex(wrp.element_info.__str__(), "^uia_element_info.UIAElementInfo - '', TextBox$") assert_regex(wrp.element_info.__repr__(), "^<uia_element_info.UIAElementInfo - '', TextBox, None>$") wrp = self.dlg.TabControl.find() assert_regex(wrp.__str__(), "^uia_controls\.TabControlWrapper - '', TabControl$") assert_regex(wrp.__repr__(), "^<uia_controls\.TabControlWrapper - '', TabControl, [0-9-]+>$") wrp = self.dlg.MenuBar.find() assert_regex(wrp.__str__(), "^uia_controls\.MenuWrapper - 'System', Menu$") assert_regex(wrp.__repr__(), "^<uia_controls\.MenuWrapper - 'System', Menu, [0-9-]+>$") wrp = self.dlg.Slider.find() assert_regex(wrp.__str__(), "^uia_controls\.SliderWrapper - '', Slider$") assert_regex(wrp.__repr__(), "^<uia_controls\.SliderWrapper - '', Slider, [0-9-]+>$") wrp = self.dlg.TestLabel.find() assert_regex(wrp.__str__(), "^uia_controls.StaticWrapper - 'TestLabel', Static$") assert_regex(wrp.__repr__(), "^<uia_controls.StaticWrapper - 'TestLabel', Static, [0-9-]+>$") wrp = self.dlg.find() assert_regex(wrp.__str__(), "^uia_controls\.WindowWrapper - 'WPF Sample Application', Dialog$") assert_regex(wrp.__repr__(), "^<uia_controls\.WindowWrapper - 'WPF Sample Application', Dialog, [0-9-]+>$") # ElementInfo.__str__ assert_regex(wrp.element_info.__str__(), "^uia_element_info.UIAElementInfo - 'WPF Sample Application', Window$") assert_regex(wrp.element_info.__repr__(), "^<uia_element_info.UIAElementInfo - 'WPF Sample Application', Window, [0-9-]+>$") # mock a failure in window_text() method orig = wrp.window_text wrp.window_text = mock.Mock(return_value="") # empty text assert_regex(wrp.__str__(), "^uia_controls\.WindowWrapper - '', Dialog$") assert_regex(wrp.__repr__(), "^<uia_controls\.WindowWrapper - '', Dialog, [0-9-]+>$") wrp.window_text.return_value = u'\xd1\xc1\\\xa1\xb1\ua000' # unicode string assert_regex(wrp.__str__(), "^uia_controls\.WindowWrapper - '.+', Dialog$") wrp.window_text = orig # restore the original method # mock a failure in element_info.name property (it's based on _get_name()) orig = wrp.element_info._get_name wrp.element_info._get_name = mock.Mock(return_value=None) assert_regex(wrp.element_info.__str__(), "^uia_element_info\.UIAElementInfo - 'None', Window$") assert_regex(wrp.element_info.__repr__(), "^<uia_element_info\.UIAElementInfo - 'None', Window, [0-9-]+>$") wrp.element_info._get_name = orig def test_pretty_print_encode_error(self): """Test __repr__ method for BaseWrapper with specific Unicode text (issue #594)""" wrp = self.dlg.find() wrp.window_text = mock.Mock(return_value=u'\xb7') print(wrp) print(repr(wrp)) def test_friendly_class_names(self): """Test getting friendly class names of common controls""" button = self.dlg.OK.find() self.assertEqual(button.friendly_class_name(), "Button") friendly_name = self.dlg.CheckBox.friendly_class_name() self.assertEqual(friendly_name, "CheckBox") friendly_name = self.dlg.Apply.friendly_class_name() self.assertEqual(friendly_name, "Button") friendly_name = self.dlg.ToggleMe.friendly_class_name() self.assertEqual(friendly_name, "Button") friendly_name = self.dlg.Yes.friendly_class_name() self.assertEqual(friendly_name, "RadioButton") friendly_name = self.dlg.TabControl.friendly_class_name() self.assertEqual(friendly_name, "TabControl") edit = self.dlg.by(class_name="TextBox").find() self.assertEqual(edit.friendly_class_name(), "Edit") slider = self.dlg.Slider.find() self.assertEqual(slider.friendly_class_name(), "Slider") self.assertEqual(self.dlg.MenuBar.friendly_class_name(), "Menu") self.assertEqual(self.dlg.Toolbar.friendly_class_name(), "Toolbar") # Switch tab view tab_item_wrp = self.dlg.TreeAndListViews.set_focus() ctrl = tab_item_wrp.children(control_type="DataGrid")[0] self.assertEqual(ctrl.friendly_class_name(), "ListView") i = ctrl.get_item(1) self.assertEqual(i.friendly_class_name(), "DataItem") ctrl = tab_item_wrp.children(control_type="Tree")[0] self.assertEqual(ctrl.friendly_class_name(), "TreeView") ti = self.dlg.Tree_and_List_ViewsTabItem.DateElements self.assertEqual(ti.friendly_class_name(), "TreeItem") def test_check_box(self): """Test 'toggle' and 'toggle_state' for the check box control""" # Get a current state of the check box control check_box = self.dlg.CheckBox.find() cur_state = check_box.get_toggle_state() self.assertEqual(cur_state, uia_defs.toggle_state_inderteminate) # Toggle the next state cur_state = check_box.toggle().get_toggle_state() # Get a new state of the check box control self.assertEqual(cur_state, uia_defs.toggle_state_off) def test_toggle_button(self): """Test 'toggle' and 'toggle_state' for the toggle button control""" # Get a current state of the check box control button = self.dlg.ToggleMe.find() cur_state = button.get_toggle_state() self.assertEqual(cur_state, uia_defs.toggle_state_on) # Toggle the next state cur_state = button.toggle().get_toggle_state() # Get a new state of the check box control self.assertEqual(cur_state, uia_defs.toggle_state_off) # Toggle the next state cur_state = button.toggle().get_toggle_state() self.assertEqual(cur_state, uia_defs.toggle_state_on) def test_button_click(self): """Test the click method for the Button control""" label = self.dlg.by(class_name="Text", name="TestLabel").find() self.dlg.Apply.click() self.assertEqual(label.window_text(), "ApplyClick") def test_radio_button(self): """Test 'select' and 'is_selected' for the radio button control""" yes = self.dlg.Yes.find() cur_state = yes.is_selected() self.assertEqual(cur_state, False) cur_state = yes.select().is_selected() self.assertEqual(cur_state, True) no = self.dlg.No.find() cur_state = no.click().is_selected() self.assertEqual(cur_state, True) def test_combobox_texts(self): """Test items texts for the combo box control""" # The ComboBox on the sample app has following items: # 0. Combo Item 1 # 1. Combo Item 2 ref_texts = ['Combo Item 1', 'Combo Item 2'] combo_box = self.dlg.ComboBox.find() self.assertEqual(combo_box.item_count(), len(ref_texts)) for t in combo_box.texts(): self.assertEqual((t in ref_texts), True) # Mock a 0 pointer to COM element combo_box.iface_item_container.FindItemByProperty = mock.Mock(return_value=0) self.assertEqual(combo_box.texts(), ref_texts) # Mock a combobox without "ItemContainer" pattern combo_box.iface_item_container.FindItemByProperty = mock.Mock(side_effect=uia_defs.NoPatternInterfaceError()) self.assertEqual(combo_box.texts(), ref_texts) # Mock a combobox without "ExpandCollapse" pattern # Expect empty texts combo_box.iface_expand_collapse.Expand = mock.Mock(side_effect=uia_defs.NoPatternInterfaceError()) self.assertEqual(combo_box.texts(), []) def test_combobox_select(self): """Test select related methods for the combo box control""" combo_box = self.dlg.ComboBox.find() # Verify combobox properties and an initial state self.assertEqual(combo_box.can_select_multiple(), 0) self.assertEqual(combo_box.is_selection_required(), False) self.assertEqual(len(combo_box.get_selection()), 0) # The ComboBox on the sample app has following items: # 0. Combo Item 1 # 1. Combo Item 2 combo_box.select(0) self.assertEqual(combo_box.selected_text(), 'Combo Item 1') self.assertEqual(combo_box.selected_index(), 0) collapsed = combo_box.is_collapsed() self.assertEqual(collapsed, True) combo_box.select(1) self.assertEqual(combo_box.selected_text(), 'Combo Item 2') self.assertEqual(combo_box.selected_index(), 1) combo_box.select('Combo Item 1') self.assertEqual(combo_box.selected_text(), 'Combo Item 1') # Try to use unsupported item type as a parameter for select self.assertRaises(ValueError, combo_box.select, 1.2) # Try to select a non-existing item, # verify the selected item didn't change self.assertRaises(IndexError, combo_box.select, 'Combo Item 23455') self.assertEqual(combo_box.selected_text(), 'Combo Item 1') def test_combobox_expand_collapse(self): """Test 'expand' and 'collapse' for the combo box control""" combo_box = self.dlg.ComboBox.find() collapsed = combo_box.is_collapsed() self.assertEqual(collapsed, True) expanded = combo_box.expand().is_expanded() self.assertEqual(expanded, True) collapsed = combo_box.collapse().is_collapsed() self.assertEqual(collapsed, True) class TabControlWrapperTests(unittest.TestCase): """Unit tests for the TabControlWrapper class""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() # start the application app = Application(backend='uia') app = app.start(wpf_app_1) dlg = app.WPFSampleApplication self.app = app self.ctrl = dlg.by(class_name="TabControl").find() self.texts = [u"General", u"Tree and List Views", u"ListBox and Grid"] def tearDown(self): """Close the application after tests""" self.app.kill() def test_tab_count(self): """Test the tab count in the Tab control""" self.assertEqual(self.ctrl.tab_count(), len(self.texts)) def test_get_selected_tab(self): """Test selecting a tab by index or by name and getting an index of the selected tab""" # Select a tab by name, use chaining to get the index of the selected tab idx = self.ctrl.select(u"Tree and List Views").get_selected_tab() self.assertEqual(idx, 1) # Select a tab by index self.ctrl.select(0) self.assertEqual(self.ctrl.get_selected_tab(), 0) def test_texts(self): """Make sure the tabs captions are read correctly""" self.assertEqual(self.ctrl.texts(), self.texts) class EditWrapperTests(unittest.TestCase): """Unit tests for the EditWrapper class""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() # start the application app = Application(backend='uia') app = app.start(wpf_app_1) self.app = app self.dlg = app.WPFSampleApplication self.edit = self.dlg.by(class_name="TextBox").find() def tearDown(self): """Close the application after tests""" self.app.kill() def test_set_window_text(self): """Test setting text value of control (the text in textbox itself)""" text_to_set = "This test" self.assertRaises(UserWarning, self.edit.set_window_text, text_to_set) self.assertEqual(self.edit.text_block(), text_to_set) self.assertRaises(UserWarning, self.edit.set_window_text, " is done", True) self.assertEqual(self.edit.text_block(), text_to_set + " is done") def test_set_text(self): """Test setting the text of the edit control""" self.edit.set_edit_text("Some text") self.assertEqual(self.edit.text_block(), "Some text") self.edit.set_edit_text(579) self.assertEqual(self.edit.text_block(), "579") self.edit.set_edit_text(333, pos_start=1, pos_end=2) self.assertEqual(self.edit.text_block(), "53339") def test_line_count(self): """Test getting the line count of the edit control""" self.edit.set_edit_text("Here is some text") self.assertEqual(self.edit.line_count(), 1) def test_cet_line(self): """Test getting each line of the edit control""" test_data = "Here is some text" self.edit.set_edit_text(test_data) self.assertEqual(self.edit.get_line(0), test_data) def test_get_value(self): """Test getting value of the edit control""" test_data = "Some value" self.edit.set_edit_text(test_data) self.assertEqual(self.edit.get_value(), test_data) def test_text_block(self): """Test getting the text block of the edit control""" test_data = "Here is some text" self.edit.set_edit_text(test_data) self.assertEqual(self.edit.text_block(), test_data) def test_select(self): """Test selecting text in the edit control in various ways""" self.edit.set_edit_text("Some text") self.edit.select(0, 0) self.assertEqual((0, 0), self.edit.selection_indices()) self.edit.select() self.assertEqual((0, 9), self.edit.selection_indices()) self.edit.select(1, 7) self.assertEqual((1, 7), self.edit.selection_indices()) self.edit.select(5, 2) self.assertEqual((2, 5), self.edit.selection_indices()) self.edit.select("me t") self.assertEqual((2, 6), self.edit.selection_indices()) self.assertRaises(RuntimeError, self.edit.select, "123") class SliderWrapperTests(unittest.TestCase): """Unit tests for the EditWrapper class""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() # start the application app = Application(backend='uia') app = app.start(wpf_app_1) self.app = app self.dlg = app.WPFSampleApplication self.slider = self.dlg.by(class_name="Slider").find() def tearDown(self): """Close the application after tests""" self.app.kill() def test_min_value(self): """Test getting minimum value of the Slider""" self.assertEqual(self.slider.min_value(), 0.0) def test_max_value(self): """Test getting maximum value of the Slider""" self.assertEqual(self.slider.max_value(), 100.0) def test_small_change(self): """Test Getting small change of slider's thumb""" self.assertEqual(self.slider.small_change(), 0.1) def test_large_change(self): """Test Getting large change of slider's thumb""" self.assertEqual(self.slider.large_change(), 1.0) def test_value(self): """Test getting current position of slider's thumb""" self.assertEqual(self.slider.value(), 70.0) def test_set_value(self): """Test setting position of slider's thumb""" self.slider.set_value(24) self.assertEqual(self.slider.value(), 24.0) self.slider.set_value(33.3) self.assertEqual(self.slider.value(), 33.3) self.slider.set_value("75.4") self.assertEqual(self.slider.value(), 75.4) self.assertRaises(ValueError, self.slider.set_value, -1) self.assertRaises(ValueError, self.slider.set_value, 102) self.assertRaises(ValueError, self.slider.set_value, [50, ]) class ListViewWrapperTests(unittest.TestCase): """Unit tests for the ListViewWrapper class""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() # start the application app = Application(backend='uia') app = app.start(wpf_app_1) dlg = app.WPFSampleApplication self.app = app self.listview_tab = dlg.Tree_and_List_Views self.listbox_datagrid_tab = dlg.ListBox_and_Grid self.listview_texts = [ [u"1", u"Tomatoe", u"Red"], [u"2", u"Cucumber", u"Green", ], [u"3", u"Reddish", u"Purple", ], [u"4", u"Cauliflower", u"White", ], [u"5", u"Cupsicum", u"Yellow", ], [u"6", u"Cupsicum", u"Red", ], [u"7", u"Cupsicum", u"Green", ], ] self.listbox_texts = [ [u"TextItem 1", ], [u"TextItem 2", ], [u"ButtonItem", ], [u"CheckItem", ], [u"TextItem 3", ], [u"TextItem 4", ], [u"TextItem 5", ], [u"TextItem 6", ], [u"TextItem 7", ], [u"TextItem 8", ], ] self.datagrid_texts = [ [u"0", u"A0", u"B0", u"C0", u"D0", u"E0", u"", ], [u"1", u"A1", u"B1", u"C1", u"D1", u"E1", u"", ], [u"2", u"A2", u"B2", u"C2", u"D2", u"E2", u"", ], [u"3", u"A3", u"B3", u"C3", u"D3", u"E3", u"", ], ] def tearDown(self): """Close the application after tests""" self.app.kill() def test_friendly_class_name(self): """Test friendly class name of the ListView controls""" # ListView self.listview_tab.set_focus() listview = self.listview_tab.children(class_name=u"ListView")[0] self.assertEqual(listview.friendly_class_name(), u"ListView") # ListBox self.listbox_datagrid_tab.set_focus() listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0] self.assertEqual(listbox.friendly_class_name(), u"ListBox") # DataGrid datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0] self.assertEqual(datagrid.friendly_class_name(), u"ListView") def test_item_count(self): """Test the items count in the ListView controls""" # ListView self.listview_tab.set_focus() listview = self.listview_tab.children(class_name=u"ListView")[0] self.assertEqual(listview.item_count(), len(self.listview_texts)) # ListBox self.listbox_datagrid_tab.set_focus() #listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0] # self.assertEqual(listbox.item_count(), len(self.listbox_texts)) # DataGrid datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0] self.assertEqual(datagrid.item_count(), len(self.datagrid_texts)) def test_column_count(self): """Test the columns count in the ListView controls""" # ListView self.listview_tab.set_focus() listview = self.listview_tab.children(class_name=u"ListView")[0] self.assertEqual(listview.column_count(), len(self.listview_texts[0])) # ListBox self.listbox_datagrid_tab.set_focus() listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0] self.assertEqual(listbox.column_count(), 0) # DataGrid datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0] self.assertEqual(datagrid.column_count(), len(self.datagrid_texts[0]) - 1) def test_get_header_control(self): """Test getting a Header control and Header Item control of ListView controls""" # ListView self.listview_tab.set_focus() listview = self.listview_tab.children(class_name=u"ListView")[0] hdr_ctl = listview.get_header_control() self.assertTrue(isinstance(hdr_ctl, uia_ctls.HeaderWrapper)) # HeaderItem of ListView hdr_itm = hdr_ctl.children()[2] self.assertTrue(isinstance(hdr_itm, uia_ctls.HeaderItemWrapper)) self.assertTrue(hdr_itm.iface_transform.CurrentCanResize, True) # ListBox self.listbox_datagrid_tab.set_focus() listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0] self.assertEqual(listbox.get_header_control(), None) # DataGrid datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0] self.assertTrue(isinstance(datagrid.get_header_control(), uia_ctls.HeaderWrapper)) def test_get_column(self): """Test get_column() method for the ListView controls""" # ListView self.listview_tab.set_focus() listview = self.listview_tab.children(class_name=u"ListView")[0] listview_col = listview.get_column(1) self.assertEqual(listview_col.texts()[0], u"Name") # ListBox self.listbox_datagrid_tab.set_focus() listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0] self.assertRaises(IndexError, listbox.get_column, 0) # DataGrid datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0] datagrid_col = datagrid.get_column(2) self.assertEqual(datagrid_col.texts()[0], u"B") self.assertRaises(IndexError, datagrid.get_column, 10) def test_cell(self): """Test getting a cell of the ListView controls""" # ListView self.listview_tab.set_focus() listview = self.listview_tab.children(class_name=u"ListView")[0] cell = listview.cell(3, 2) self.assertEqual(cell.window_text(), self.listview_texts[3][2]) # ListBox self.listbox_datagrid_tab.set_focus() listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0] cell = listbox.cell(10, 10) self.assertEqual(cell, None) # DataGrid datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0] cell = datagrid.cell(2, 0) self.assertEqual(cell.window_text(), self.datagrid_texts[2][0]) self.assertRaises(TypeError, datagrid.cell, 1.5, 1) self.assertRaises(IndexError, datagrid.cell, 10, 10) def test_cells(self): """Test getting a cells of the ListView controls""" def compare_cells(cells, control): for i in range(0, control.item_count()): for j in range(0, control.column_count()): self.assertEqual(cells[i][j], control.cell(i, j)) # ListView self.listview_tab.set_focus() listview = self.listview_tab.children(class_name=u"ListView")[0] compare_cells(listview.cells(), listview) # DataGrid self.listbox_datagrid_tab.set_focus() datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0] compare_cells(datagrid.cells(), datagrid) # ListBox self.listbox_datagrid_tab.set_focus() listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0] cells = listbox.cells() self.assertEqual(cells[listbox.item_count() - 1][0].window_text(), "TextItem 7") self.assertEqual(cells[3][0].window_text(), "CheckItem") def test_get_item(self): """Test getting an item of ListView controls""" # ListView self.listview_tab.set_focus() listview = self.listview_tab.children(class_name=u"ListView")[0] item = listview.get_item(u"Reddish") self.assertEqual(item.texts(), self.listview_texts[2]) self.assertRaises(ValueError, listview.get_item, u"Apple") # ListBox self.listbox_datagrid_tab.set_focus() listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0] item = listbox.get_item(u"TextItem 2") self.assertEqual(item.texts(), self.listbox_texts[1]) item = listbox.get_item(3) self.assertEqual(item.texts(), self.listbox_texts[3]) item = listbox.get_item(u"TextItem 8") self.assertEqual(item.texts(), self.listbox_texts[9]) # DataGrid datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0] item = datagrid.get_item(u"B2") self.assertEqual(item.texts(), self.datagrid_texts[2]) item = datagrid.get_item(3) self.assertEqual(item.texts(), self.datagrid_texts[3]) self.assertRaises(TypeError, datagrid.get_item, 12.3) def test_get_items(self): """Test getting all items of ListView controls""" self.listview_tab.set_focus() listview = self.listview_tab.children(class_name=u"ListView")[0] content = [item.texts() for item in listview.get_items()] self.assertEqual(content, self.listview_texts) # ListBox self.listbox_datagrid_tab.set_focus() listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0] content = [item.texts() for item in listbox.get_items()] # self.assertEqual(content, self.listbox_texts) # DataGrid datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0] content = [item.texts() for item in datagrid.get_items()] self.assertEqual(content, self.datagrid_texts) def test_texts(self): """Test getting all items of ListView controls""" self.listview_tab.set_focus() listview = self.listview_tab.children(class_name=u"ListView")[0] self.assertEqual(listview.texts(), self.listview_texts) # ListBox self.listbox_datagrid_tab.set_focus() #listbox = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0] # self.assertEqual(listbox.texts(), self.listbox_texts) # DataGrid datagrid = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0] self.assertEqual(datagrid.texts(), self.datagrid_texts) def test_select_and_get_item(self): """Test selecting an item of the ListView control""" self.listview_tab.set_focus() self.ctrl = self.listview_tab.children(class_name=u"ListView")[0] # Verify get_selected_count self.assertEqual(self.ctrl.get_selected_count(), 0) # Select by an index row = 1 i = self.ctrl.get_item(row) self.assertEqual(i.is_selected(), False) self.assertRaises(uia_defs.NoPatternInterfaceError, i.is_checked) i.select() self.assertEqual(i.is_selected(), True) cnt = self.ctrl.get_selected_count() self.assertEqual(cnt, 1) rect = self.ctrl.get_item_rect(row) self.assertEqual(rect, i.rectangle()) # Select by text row = '3' i = self.ctrl.get_item(row) i.select() self.assertEqual(i.is_selected(), True) row = 'White' i = self.ctrl.get_item(row) i.select() i = self.ctrl.get_item(3) # re-get the item by a row index self.assertEqual(i.is_selected(), True) row = None self.assertRaises(TypeError, self.ctrl.get_item, row) class ListViewWrapperTestsWinForms(unittest.TestCase): """Unit tests for the ListViewWrapper class""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() self.app = Application(backend='uia').start(winfoms_app_grid) self.dlg = self.app.Dialog self.add_col_button = self.dlg.AddCol self.add_row_button = self.dlg.AddRow self.row_header_button = self.dlg.RowHeader self.col_header_button = self.dlg.ColHeader self.list_box = self.dlg.ListBox def tearDown(self): """Close the application after tests""" self.app.kill() def test_list_box_item_selection(self): """Test get_item method""" self.list_box.set_focus() list_box_item = self.list_box.get_item('item (2)') self.assertFalse(list_box_item.is_selected()) list_box_item.select() self.assertTrue(list_box_item.is_selected()) def test_list_box_getitem_overload(self): """Test __getitem__ method""" self.list_box.set_focus() list_box_item = self.list_box['item (2)'] self.assertFalse(list_box_item.is_selected()) list_box_item.select() self.assertTrue(list_box_item.is_selected()) def test_empty_grid(self): """Test some error cases handling""" self.dlg.set_focus() table = self.dlg.Table self.assertEqual(len(table.cells()), 0) self.assertRaises(IndexError, table.cell, 0, 0) self.assertRaises(IndexError, table.get_item, 0) def test_skip_headers(self): """Test some error cases handling""" self.dlg.set_focus() self.add_col_button.click() table = self.dlg.Table cells = table.cells() self.assertEqual(len(cells), 1) self.assertEqual(len(cells[0]), 1) self.assertFalse(isinstance(cells[0][0], uia_ctls.HeaderWrapper)) def test_cell_and_cells_equals(self): """Test equivalence of cell and cells methods""" def compare_cells(): table = self.dlg.Table cells = table.cells() self.assertEqual(len(cells), 3) self.assertEqual(len(cells[0]), 2) for row_ind in range(0, 3): for col_ind in range(0, 2): self.assertEqual(cells[row_ind][col_ind], table.cell(row_ind, col_ind)) self.add_col_button.click() self.add_col_button.click() self.add_row_button.click() self.add_row_button.click() compare_cells() self.row_header_button.click() compare_cells() self.row_header_button.click() self.col_header_button.click() compare_cells() def test_unsupported_columns(self): """Test raise NotImplemented errors for columns methods""" self.dlg.set_focus() table = self.dlg.Table self.assertRaises(NotImplementedError, table.column_count) self.assertRaises(NotImplementedError, table.get_column, 0) def test_get_header_controls(self): """Test get header controls method""" self.add_col_button.click() table = self.dlg.Table headers = table.get_header_controls() self.assertEqual(len(headers), 3) self.col_header_button.click() headers = table.get_header_controls() self.assertEqual(len(headers), 1) self.row_header_button.click() headers = table.get_header_controls() self.assertEqual(len(headers), 0) class MenuBarTestsWinForms(unittest.TestCase): """Unit tests for the MenuBar class""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() self.app = Application(backend='uia').start(winfoms_app_grid) self.dlg = self.app.Dialog def tearDown(self): """Close the application after tests""" self.app.kill() def test_can_select_multiple_items(self): """Test menu_select multimple items with action""" table = self.dlg.Table cells = table.cells() self.assertEqual(len(table.cells()), 0) self.dlg.menu_select('#0 -> #1 -> #1 -> #0 -> #0 -> #4 ->#0') cells = table.cells() self.assertEqual(len(cells), 1) self.assertEqual(len(cells[0]), 1) def test_can_select_top_menu(self): """Test menu_select with single item""" first_menu_item = self.dlg['menuStrip1'].children()[0] point = first_menu_item.rectangle().mid_point() child_from_point = self.dlg.from_point(point.x, point.y + 20) self.assertEqual(child_from_point.element_info.name, 'Form1') self.dlg.menu_select('tem1') time.sleep(0.1) child_from_point = self.dlg.from_point(point.x, point.y + 20) self.assertEqual(child_from_point.element_info.name, 'tem1DropDown') class EditTestsWinForms(unittest.TestCase): """Unit tests for the WinFormEdit class""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() self.app = Application(backend='uia').start(winfoms_app_grid) self.dlg = self.app.Dialog def tearDown(self): """Close the application after tests""" self.app.kill() def test_readonly_and_editable_edits(self): """Test editable method for editable edit""" self.assertEqual(self.dlg.Edit2.get_value(), "Editable") self.assertTrue(self.dlg.Edit2.is_editable()) self.assertEqual(self.dlg.Edit1.get_value(), "ReadOnly") self.assertFalse(self.dlg.Edit1.is_editable()) class ComboBoxTestsWinForms(unittest.TestCase): """Unit tests for the ComboBoxWrapper class with WinForms app""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() # start the application app = Application(backend='uia') self.app = app.start(winfoms_app_grid) self.dlg = dlg = app.Form1 self.combo_editable = dlg.by(auto_id="comboRowType", control_type="ComboBox").find() self.combo_fixed = dlg.by(auto_id="comboBoxReadOnly", control_type="ComboBox").find() self.combo_simple = dlg.by(auto_id="comboBoxSimple", control_type="ComboBox").find() def tearDown(self): """Close the application after tests""" self.app.kill() def test_expand_collapse(self): """Test methods .expand() and .collapse() for WinForms combo box""" self.dlg.set_focus() test_data = [(self.combo_editable, 'editable'), (self.combo_fixed, 'fixed'), (self.combo_simple, 'simple')] for combo, combo_name in test_data: if combo != self.combo_simple: self.assertFalse(combo.is_expanded(), msg='{} combo box must be collapsed initially'.format(combo_name)) # test that method allows chaining self.assertEqual(combo.expand(), combo, msg='Method .expand() for {} combo box must return self'.format(combo_name)) self.assertTrue(combo.is_expanded(), msg='{} combo box has not been expanded!'.format(combo_name)) # .expand() keeps already expanded state (and still allows chaining) self.assertEqual(combo.expand(), combo, msg='Method .expand() for {} combo box must return self, always!'.format(combo_name)) self.assertTrue(combo.is_expanded(), msg='{} combo box does NOT keep expanded state!'.format(combo_name)) # collapse self.assertEqual(combo.collapse(), combo, msg='Method .collapse() for {} combo box must return self'.format(combo_name)) if combo != self.combo_simple: self.assertFalse(combo.is_expanded(), msg='{} combo box has not been collapsed!'.format(combo_name)) # collapse already collapsed should keep collapsed state self.assertEqual(combo.collapse(), combo, msg='Method .collapse() for {} combo box must return self, always!'.format(combo_name)) if combo != self.combo_simple: self.assertFalse(combo.is_expanded(), msg='{} combo box does NOT keep collapsed state!'.format(combo_name)) def test_texts(self): """Test method .texts() for WinForms combo box""" self.dlg.set_focus() editable_texts = [u'Numbers', u'Letters', u'Special symbols'] fixed_texts = [u'Item 1', u'Item 2', u'Last Item'] simple_texts = [u'Simple 1', u'Simple Two', u'The Simplest'] self.assertEqual(self.combo_editable.texts(), editable_texts) self.assertEqual(self.combo_editable.expand().texts(), editable_texts) self.assertTrue(self.combo_editable.is_expanded()) self.combo_editable.collapse() self.assertEqual(self.combo_fixed.texts(), fixed_texts) self.assertEqual(self.combo_fixed.expand().texts(), fixed_texts) self.assertTrue(self.combo_fixed.is_expanded()) self.combo_fixed.collapse() self.assertEqual(self.combo_simple.texts(), simple_texts) self.assertEqual(self.combo_simple.expand().texts(), simple_texts) self.assertTrue(self.combo_simple.is_expanded()) self.combo_simple.collapse() def test_select(self): """Test method .select() for WinForms combo box""" self.dlg.set_focus() self.combo_editable.select(u'Letters') self.assertEqual(self.combo_editable.selected_text(), u'Letters') self.assertEqual(self.combo_editable.selected_index(), 1) self.combo_editable.select(2) self.assertEqual(self.combo_editable.selected_text(), u'Special symbols') self.assertEqual(self.combo_editable.selected_index(), 2) self.combo_fixed.select(u'Last Item') self.assertEqual(self.combo_fixed.selected_text(), u'Last Item') self.assertEqual(self.combo_fixed.selected_index(), 2) self.combo_fixed.select(1) self.assertEqual(self.combo_fixed.selected_text(), u'Item 2') self.assertEqual(self.combo_fixed.selected_index(), 1) self.combo_simple.select(u'The Simplest') self.assertEqual(self.combo_simple.selected_text(), u'The Simplest') self.assertEqual(self.combo_simple.selected_index(), 2) self.combo_simple.select(0) self.assertEqual(self.combo_simple.selected_text(), u'Simple 1') self.assertEqual(self.combo_simple.selected_index(), 0) def test_select_errors(self): """Test errors in method .select() for WinForms combo box""" self.dlg.set_focus() for combo in [self.combo_editable, self.combo_fixed, self.combo_simple]: self.assertRaises(ValueError, combo.select, u'FFFF') self.assertRaises(IndexError, combo.select, 50) def test_item_count(self): """Test method .item_count() for WinForms combo box""" self.dlg.set_focus() self.assertEqual(self.combo_editable.item_count(), 3) self.assertEqual(self.combo_fixed.item_count(), 3) self.assertEqual(self.combo_simple.item_count(), 3) def test_from_point(self): """Test method .from_point() for WinForms combo box""" self.dlg.set_focus() x, y = self.combo_fixed.rectangle().mid_point() combo_from_point = self.dlg.from_point(x, y) self.assertEqual(combo_from_point, self.combo_fixed) combo2_from_point = Desktop(backend="uia").from_point(x, y) self.assertEqual(combo2_from_point, self.combo_fixed) def test_top_from_point(self): """Test method .top_from_point() for WinForms combo box""" dlg_wrapper = self.dlg.set_focus() x, y = self.combo_fixed.rectangle().mid_point() dlg_from_point = self.dlg.top_from_point(x, y) self.assertEqual(dlg_from_point, dlg_wrapper) dlg2_from_point = Desktop(backend="uia").top_from_point(x, y) self.assertEqual(dlg2_from_point, dlg_wrapper) if sys.version_info[:2] >= (3, 6): class ComboBoxTestsQt(unittest.TestCase): """Unit tests for the ComboBoxWrapper class with PyQt5 app""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() # start the application app = Application(backend='uia').start(qt_py_combobox_app, wait_for_idle=False) self.app = Application(backend='uia').connect(pid=app.process) self.dlg = dlg = self.app.window(name='QTRV') self.combo1 = dlg.by(name="Q1", control_type="ComboBox").find() self.combo2 = dlg.by(name="Q2", control_type="ComboBox").find() def tearDown(self): """Close the application after tests""" self.app.kill() def test_select(self): """Test method .select() for Qt combo box""" self.dlg.set_focus() self.combo1.select(u'Image on right') self.assertEqual(self.combo1.selected_text(), u'Image on right') self.assertEqual(self.combo1.selected_index(), 1) self.combo1.select(2) self.assertEqual(self.combo1.selected_text(), u'Image on top') self.assertEqual(self.combo1.selected_index(), 2) self.combo2.select(u'Image and Text') self.assertEqual(self.combo2.selected_text(), u'Image and Text') self.assertEqual(self.combo2.selected_index(), 2) self.combo2.select(0) self.assertEqual(self.combo2.selected_text(), u'Image') self.assertEqual(self.combo2.selected_index(), 0) class ListItemWrapperTests(unittest.TestCase): """Unit tests for the ListItemWrapper class""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() # start the application app = Application(backend='uia') app = app.start(wpf_app_1) dlg = app.WPFSampleApplication self.app = app self.listview_tab = dlg.Tree_and_List_Views self.listbox_datagrid_tab = dlg.ListBox_and_Grid def tearDown(self): """Close the application after tests""" self.app.kill() def test_friendly_class_name(self): """Test getting friendly class name""" # DataItem self.listview_tab.set_focus() listview_item = self.listview_tab.children(class_name=u"ListView")[0].get_item(2) self.assertEqual(listview_item.friendly_class_name(), u"DataItem") # ListBoxItem self.listbox_datagrid_tab.set_focus() listbox_item = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0].get_item(3) self.assertEqual(listbox_item.friendly_class_name(), u"ListItem") # DataGridRow datagrid_row = self.listbox_datagrid_tab.children(class_name=u"DataGrid")[0].get_item(1) self.assertEqual(datagrid_row.friendly_class_name(), u"DataItem") def test_selection(self): """Test selection of ListItem""" self.listview_tab.set_focus() listview_item = self.listview_tab.children(class_name=u"ListView")[0].get_item(2) self.assertFalse(listview_item.is_selected()) listview_item.select() self.assertTrue(listview_item.is_selected()) def test_is_checked(self): """Test is_checked() method of ListItemWrapper""" self.listbox_datagrid_tab.set_focus() listbox_item = self.listbox_datagrid_tab.children(class_name=u"ListBox")[0].get_item(u"CheckItem") self.assertRaises(uia_defs.NoPatternInterfaceError, listbox_item.is_checked) def test_texts(self): """Test getting texts of ListItem""" self.listview_tab.set_focus() listview_item = self.listview_tab.children(class_name=u"ListView")[0].get_item(1) texts = [u"2", u"Cucumber", u"Green"] self.assertEqual(listview_item.texts(), texts) class MenuWrapperWpfTests(unittest.TestCase): """Unit tests for the MenuWrapper class on WPF demo""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() # start the application self.app = Application(backend='uia') self.app = self.app.start(wpf_app_1) self.dlg = self.app.WPFSampleApplication def tearDown(self): """Close the application after tests""" self.app.kill() def test_menu_by_index(self): """Test selecting a WPF menu item by index""" path = "#0->#1->#1" # "File->Close->Later" self.dlg.menu_select(path) label = self.dlg.MenuLaterClickStatic.find() self.assertEqual(label.window_text(), u"MenuLaterClick") # Non-existing paths path = "#5->#1" self.assertRaises(IndexError, self.dlg.menu_select, path) path = "#0->#1->#1->#2->#3" self.assertRaises(IndexError, self.dlg.menu_select, path) def test_menu_by_exact_text(self): """Test selecting a WPF menu item by exact text match""" path = "File->Close->Later" self.dlg.menu_select(path, True) label = self.dlg.MenuLaterClickStatic.find() self.assertEqual(label.window_text(), u"MenuLaterClick") # A non-exact menu name path = "File->About" self.assertRaises(IndexError, self.dlg.menu_select, path, True) def test_menu_by_best_match_text(self): """Test selecting a WPF menu item by best match text""" path = "file-> close -> later" self.dlg.menu_select(path, False) label = self.dlg.MenuLaterClickStatic.find() self.assertEqual(label.window_text(), u"MenuLaterClick") def test_menu_by_mixed_match(self): """Test selecting a WPF menu item by a path with mixed specifiers""" path = "file-> #1 -> later" self.dlg.menu_select(path, False) label = self.dlg.MenuLaterClickStatic.find() self.assertEqual(label.window_text(), u"MenuLaterClick") # Bad specifiers path = "file-> 1 -> later" self.assertRaises(IndexError, self.dlg.menu_select, path) path = "#0->#1->1" self.assertRaises(IndexError, self.dlg.menu_select, path) path = "0->#1->1" self.assertRaises(IndexError, self.dlg.menu_select, path) class MenuWrapperNotepadTests(unittest.TestCase): """Unit tests for the MenuWrapper class on Notepad""" def setUp(self): """Set some data and ensure the application is in the state we want""" Timings.defaults() # start the application self.app = Application(backend='uia') self.app = self.app.start("notepad.exe") self.dlg = self.app.UntitledNotepad ActionLogger().log("MenuWrapperNotepadTests::setUp, wait till Notepad dialog is ready") self.dlg.wait("ready") def tearDown(self): """Close the application after tests""" self.app.kill() def test_friendly_class_name(self): """Test getting the friendly class name of the menu""" menu = self.dlg.descendants(control_type="MenuBar")[0] self.assertEqual(menu.friendly_class_name(), "Menu") def test_menu_by_index(self): """Test selecting a menu item by index""" path = "#4->#1" # "Help->About Notepad" self.dlg.menu_select(path) # 'About Notepad' dialog showed upon execution of menu_select self.assertEqual(self.dlg.AboutNotepad.is_active(), True) # menu_select rises the AttributeError when a dialog doesn't have menus self.assertRaises(AttributeError, self.dlg.AboutNotepad.menu_select, "#10->#2") self.dlg.AboutNotepad.close() # A non-existing path path = "#5->#1" self.assertRaises(IndexError, self.dlg.menu_select, path) # Get a menu item by index menu = self.dlg.children(control_type="MenuBar")[0] item = menu.item_by_index(4) self.assertEqual(isinstance(item, uia_ctls.MenuItemWrapper), True) self.assertEqual(item.window_text(), 'Help') item.select() item.close() def test_is_dialog(self): """Test that method is_dialog() works as expected""" self.assertEqual(self.dlg.is_dialog(), True) self.assertEqual(self.dlg.Edit.is_dialog(), False) def test_issue_532(self): """Test selecting a combobox item when it's wrapped in ListView""" path = "Format -> Font" self.dlg.menu_select(path) combo_box = self.app.top_window().Font.ScriptComboBox.find() combo_box.select('Greek') self.assertEqual(combo_box.selected_text(), 'Greek') self.assertRaises(ValueError, combo_box.select, 'NonExistingScript') def test_menu_by_exact_text(self): """Test selecting a menu item by exact text match""" path = "Help->About Notepad" self.dlg.menu_select(path, True) self.assertEqual(self.dlg.AboutNotepad.is_dialog(), True) self.dlg.AboutNotepad.close() # A non-exact menu name path = "help ->About Notepad" self.assertRaises(IndexError, self.dlg.menu_select, path, True) def test_menu_by_best_match_text(self): """Test selecting a Win32 menu item by best match text""" path = "help->aboutnotepad" self.dlg.menu_select(path, False) self.dlg.AboutNotepad.close() path = "Help ->about notepad " self.dlg.menu_select(path, False) self.dlg.AboutNotepad.close() # Bad match path = "HELP -> About Notepad" self.assertRaises(IndexError, self.dlg.menu_select, path) path = "help -> ABOUT NOTEPAD" self.assertRaises(IndexError, self.dlg.menu_select, path) path = "help -> # 2" self.assertRaises(IndexError, self.dlg.menu_select, path) def test_menu_by_mixed_match(self): """Test selecting a menu item by a path with mixed specifiers""" path = "#4->aboutnotepad" self.dlg.menu_select(path, False) self.dlg.AboutNotepad.close() # An index and the exact text match path = "Help->#1" self.dlg.menu_select(path, True) self.dlg.AboutNotepad.close() # An index and non-exact text match path = "#4 -> about notepad " self.dlg.menu_select(path, False) self.dlg.AboutNotepad.close() # Bad specifiers path = "#0->#1->1" self.assertRaises(IndexError, self.dlg.menu_select, path) path = "0->#1->1" self.assertRaises(IndexError, self.dlg.menu_select, path) path = " -> #1 -> #2" self.assertRaises(IndexError, self.dlg.menu_select, path) class ToolbarWpfTests(unittest.TestCase): """Unit tests for ToolbarWrapper class on WPF demo""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() # start the application self.app = Application(backend='uia') self.app = self.app.start(wpf_app_1) self.dlg = self.app.WPFSampleApplication def tearDown(self): """Close the application after tests""" self.app.kill() def test_button_access_wpf(self): """Test getting access to buttons on Toolbar of WPF demo""" # Read a second toolbar with buttons: "button1, button2" tb = self.dlg.Toolbar2.find() self.assertEqual(tb.button_count(), 5) self.assertEqual(len(tb.texts()), 5) # Test if it's in writable properties props = set(tb.get_properties().keys()) self.assertEqual('button_count' in props, True) expect_txt = "button 1" self.assertEqual(tb.button(3).window_text(), expect_txt) found_txt = tb.button(expect_txt, exact=True).window_text() self.assertEqual(found_txt, expect_txt) found_txt = tb.button("b 1", exact=False).window_text() self.assertEqual(found_txt, expect_txt) expect_txt = "button 2" found_txt = tb.button(expect_txt, exact=True).window_text() self.assertEqual(found_txt, expect_txt) expect_txt = "" btn = tb.button(expect_txt, exact=True) found_txt = btn.window_text() self.assertEqual(found_txt, expect_txt) # Notice that findbestmatch.MatchError is subclassed from IndexError self.assertRaises(IndexError, tb.button, "BaD n_$E ", exact=False) class ToolbarNativeTests(unittest.TestCase): """Unit tests for ToolbarWrapper class on a native application""" def setUp(self): """Set some data and ensure the application is in the state we want""" Timings.defaults() self.app = Application(backend='uia') self.app.start(os.path.join(mfc_samples_folder, u"RowList.exe")) self.dlg = self.app.RowListSampleApplication self.ctrl = self.dlg.ToolBar.find() def tearDown(self): """Close the application after tests""" self.app.kill() def test_tooltips(self): """Test working with tooltips""" self.ctrl.set_focus() self.ctrl.move_mouse_input(coords=(10, 10), absolute=False) # Find a tooltip by class name tt = self.app.window(top_level_only=False, class_name="tooltips_class32").wait('visible') self.assertEqual(isinstance(tt, uia_ctls.TooltipWrapper), True) self.assertEqual(tt.window_text(), "Large Icons") # Find a tooltip window by control type tt = self.app.top_window().children(control_type='ToolTip')[0] self.assertEqual(isinstance(tt, uia_ctls.TooltipWrapper), True) self.assertEqual(tt.window_text(), "Large Icons") def test_button_click(self): """Test button click""" # Check the "Full Row Details" button self.ctrl.check_button("Full Row Details", True) lst_ctl = self.dlg.ListBox itm = lst_ctl.children()[1] self.assertEqual(itm.texts()[0], u'Yellow') # Check the second time it shouldn't change self.ctrl.check_button("Full Row Details", True) self.assertEqual(itm.texts()[0], u'Yellow') # Switch to another view self.ctrl.check_button("Small Icons", True) itm = lst_ctl.children()[1] self.assertEqual(itm.texts()[0], u'Red') class ToolbarMfcTests(unittest.TestCase): """Unit tests for ToolbarWrapper class on MFC demo""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() # start the application self.app = Application(backend='uia').start(mfc_app_rebar_test) self.dlg = self.app.RebarTest self.menu_bar = self.dlg.MenuBar.find() self.toolbar = self.dlg.StandardToolbar.find() self.window_edge_point = (self.dlg.rectangle().width() + 50, self.dlg.rectangle().height() + 50) def tearDown(self): """Close the application after tests""" self.menu_bar.move_mouse_input(coords=self.window_edge_point, absolute=False) self.app.kill() def test_button_access_mfc(self): """Test getting access to buttons on Toolbar for MFC demo""" # Read a first toolbar with buttons: "File, View, Help" self.assertEqual(self.menu_bar.button_count(), 4) self.assertEqual(self.toolbar.button_count(), 11) # Test if it's in writable properties props = set(self.menu_bar.get_properties().keys()) self.assertEqual('button_count' in props, True) self.assertEqual("File", self.menu_bar.button(0).window_text()) self.assertEqual("View", self.menu_bar.button(1).window_text()) self.assertEqual("Help", self.menu_bar.button(2).window_text()) found_txt = self.menu_bar.button("File", exact=True).window_text() self.assertEqual("File", found_txt) found_txt = self.menu_bar.button("File", exact=False).window_text() self.assertEqual("File", found_txt) def test_texts(self): """Test method .texts() for MFC Toolbar""" self.assertEqual(self.menu_bar.texts(), [u'File', u'View', u'Help', u'Help']) self.assertEqual(self.toolbar.texts(), [u'New', u'Open', u'Save', u'Save', u'Cut', u'Copy', u'Paste', u'Paste', u'Print', u'About', u'About']) class TreeViewWpfTests(unittest.TestCase): """Unit tests for TreeViewWrapper class on WPF demo""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() # start the application self.app = Application(backend='uia') self.app = self.app.start(wpf_app_1) self.dlg = self.app.WPFSampleApplication tab_itm = self.dlg.TreeAndListViews.set_focus() self.ctrl = tab_itm.children(control_type="Tree")[0] def tearDown(self): """Close the application after tests""" self.app.kill() def test_tv_item_count_and_roots(self): """Test getting roots and a total number of items in TreeView""" # By default the tree view on WPF demo is partially expanded # with only 12 visible nodes self.assertEqual(self.ctrl.item_count(), 12) # Test if it's in writable properties props = set(self.ctrl.get_properties().keys()) self.assertEqual('item_count' in props, True) roots = self.ctrl.roots() self.assertEqual(len(roots), 1) self.assertEqual(roots[0].texts()[0], u'Date Elements') sub_items = roots[0].sub_elements() self.assertEqual(len(sub_items), 11) self.assertEqual(sub_items[0].window_text(), u'Empty Date') self.assertEqual(sub_items[-1].window_text(), u'Years') expected_str = "Date Elements\n Empty Date\n Week\n Monday\n Tuesday\n Wednsday\n" expected_str += " Thursday\n Friday\n Saturday\n Sunday\n Months\n Years\n" self.assertEqual(self.ctrl.print_items(), expected_str) def test_tv_item_select(self): """Test selecting an item from TreeView""" # Find by a path with indexes itm = self.ctrl.get_item((0, 2, 3)) self.assertEqual(itm.is_selected(), False) # Select itm.select() self.assertEqual(itm.is_selected(), True) # A second call to Select doesn't remove selection itm.select() self.assertEqual(itm.is_selected(), True) itm = self.ctrl.get_item((0, 3, 2)) itm.ensure_visible() self.assertEqual(itm.is_selected(), False) coords = itm.children(control_type='Text')[0].rectangle().mid_point() itm.click_input(coords=coords, absolute=True) self.assertEqual(itm.is_selected(), True) def test_tv_get_item(self): """Test getting an item from TreeView""" # Find by a path with indexes itm = self.ctrl.get_item((0, 2, 3)) self.assertEqual(isinstance(itm, uia_ctls.TreeItemWrapper), True) self.assertEqual(itm.window_text(), u'April') # Find by a path with strings itm = self.ctrl.get_item('\\Date Elements\\Months\\April', exact=True) self.assertEqual(isinstance(itm, uia_ctls.TreeItemWrapper), True) self.assertEqual(itm.window_text(), u'April') itm = self.ctrl.get_item('\\ Date Elements \\ months \\ april', exact=False) self.assertEqual(isinstance(itm, uia_ctls.TreeItemWrapper), True) self.assertEqual(itm.window_text(), u'April') itm = self.ctrl.get_item('\\Date Elements', exact=False) self.assertEqual(isinstance(itm, uia_ctls.TreeItemWrapper), True) self.assertEqual(itm.window_text(), u'Date Elements') # Try to find the last item in the tree hierarchy itm = self.ctrl.get_item('\\Date Elements\\Years\\2018', exact=False) self.assertEqual(isinstance(itm, uia_ctls.TreeItemWrapper), True) self.assertEqual(itm.window_text(), u'2018') itm = self.ctrl.get_item((0, 3, 3)) self.assertEqual(isinstance(itm, uia_ctls.TreeItemWrapper), True) self.assertEqual(itm.window_text(), u'2018') # Verify errors handling self.assertRaises(uia_defs.NoPatternInterfaceError, itm.is_checked) self.assertRaises(RuntimeError, self.ctrl.get_item, 'Date Elements\\months', exact=False) self.assertRaises(IndexError, self.ctrl.get_item, '\\_X_- \\months', exact=False) self.assertRaises(IndexError, self.ctrl.get_item, '\\_X_- \\ months', exact=True) self.assertRaises(IndexError, self.ctrl.get_item, '\\Date Elements\\ months \\ aprel', exact=False) self.assertRaises(IndexError, self.ctrl.get_item, '\\Date Elements\\ months \\ april\\', exact=False) self.assertRaises(IndexError, self.ctrl.get_item, '\\Date Elements\\ months \\ aprel', exact=True) self.assertRaises(IndexError, self.ctrl.get_item, (0, 200, 1)) self.assertRaises(IndexError, self.ctrl.get_item, (130, 2, 1)) def test_tv_drag_n_drop(self): """Test moving an item with mouse over TreeView""" # Make sure the both nodes are visible self.ctrl.get_item('\\Date Elements\\weeks').collapse() itm_from = self.ctrl.get_item('\\Date Elements\\Years') itm_to = self.ctrl.get_item('\\Date Elements\\Empty Date') itm_from.drag_mouse_input(itm_to) # Verify that the item and its sub-items are attached to the new node itm = self.ctrl.get_item('\\Date Elements\\Empty Date\\Years') self.assertEqual(itm.window_text(), 'Years') itm = self.ctrl.get_item((0, 0, 0, 0)) self.assertEqual(itm.window_text(), '2015') itm = self.ctrl.get_item('\\Date Elements\\Empty Date\\Years') itm.collapse() itm_from = self.ctrl.get_item('\\Date Elements\\Empty Date\\Years') itm_to = self.ctrl.get_item(r'\Date Elements\Months') self.ctrl.drag_mouse_input(itm_to, itm_from) itm = self.ctrl.get_item(r'\Date Elements\Months\Years') self.assertEqual(itm.window_text(), 'Years') # Error handling: drop on itself self.assertRaises(AttributeError, self.ctrl.drag_mouse_input, itm_from, itm_from) # Drag-n-drop by manually calculated absolute coordinates itm_from = self.ctrl.get_item(r'\Date Elements\Months') itm_from.collapse() r = itm_from.rectangle() coords_from = (int(r.left + (r.width() / 4.0)), int(r.top + (r.height() / 2.0))) r = self.ctrl.get_item(r'\Date Elements\Weeks').rectangle() coords_to = (int(r.left + (r.width() / 4.0)), int(r.top + (r.height() / 2.0))) self.ctrl.drag_mouse_input(coords_to, coords_from) itm = self.ctrl.get_item(r'\Date Elements\Weeks\Months') self.assertEqual(itm.window_text(), 'Months') class WindowWrapperTests(unittest.TestCase): """Unit tests for the UIAWrapper class for Window elements""" def setUp(self): """Set some data and ensure the application is in the state we want""" _set_timings() test_folder = os.path.join(os.path.dirname(os.path.dirname( os.path.dirname(os.path.abspath(__file__)))), r"apps/MouseTester") self.qt5_app = os.path.join(test_folder, "mousebuttons.exe") # start the application self.app = Application(backend='uia') self.app = self.app.start(self.qt5_app) self.dlg = self.app.MouseButtonTester.find() self.another_app = None def tearDown(self): """Close the application after tests""" self.app.kill() if self.another_app: self.another_app.kill() self.another_app = None def test_issue_443(self): """Test .set_focus() for window that is not keyboard focusable""" self.dlg.minimize() wait_until(1, 0.2, self.dlg.is_minimized) self.dlg.set_focus() wait_until(1, 0.2, self.dlg.is_minimized, value=False) self.assertEqual(self.dlg.is_normal(), True) # run another app instance (in focus now) self.another_app = Application(backend="win32").start(self.qt5_app) # eliminate clickable point at original app by maximizing second window self.another_app.MouseButtonTester.maximize() self.another_app.MouseButtonTester.set_focus() self.assertEqual(self.another_app.MouseButtonTester.has_focus(), True) self.dlg.set_focus() # another app instance has lost focus self.assertEqual(self.another_app.MouseButtonTester.has_focus(), False) # our window has been brought to the focus (clickable point exists) self.assertEqual(self.dlg.element_info.element.GetClickablePoint()[-1], 1) if __name__ == "__main__": if UIA_support: unittest.main()
codeparrot/github-code-clean
from __future__ import division, absolute_import, print_function import copy import pickle import sys import platform import gc import copy import warnings import tempfile from os import path from io import BytesIO import numpy as np from numpy.testing import ( run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_raises, assert_warns, dec ) from numpy.testing.utils import _assert_valid_refcount from numpy.compat import asbytes, asunicode, asbytes_nested, long, sixu rlevel = 1 class TestRegression(TestCase): def test_invalid_round(self,level=rlevel): """Ticket #3""" v = 4.7599999999999998 assert_array_equal(np.array([v]), np.array(v)) def test_mem_empty(self,level=rlevel): """Ticket #7""" np.empty((1,), dtype=[('x', np.int64)]) def test_pickle_transposed(self,level=rlevel): """Ticket #16""" a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]])) f = BytesIO() pickle.dump(a, f) f.seek(0) b = pickle.load(f) f.close() assert_array_equal(a, b) def test_typeNA(self,level=rlevel): """Ticket #31""" assert_equal(np.typeNA[np.int64], 'Int64') assert_equal(np.typeNA[np.uint64], 'UInt64') def test_dtype_names(self,level=rlevel): """Ticket #35""" dt = np.dtype([(('name', 'label'), np.int32, 3)]) def test_reduce(self,level=rlevel): """Ticket #40""" assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5) def test_zeros_order(self,level=rlevel): """Ticket #43""" np.zeros([3], int, 'C') np.zeros([3], order='C') np.zeros([3], int, order='C') def test_asarray_with_order(self,level=rlevel): """Check that nothing is done when order='F' and array C/F-contiguous""" a = np.ones(2) assert_(a is np.asarray(a, order='F')) def test_ravel_with_order(self,level=rlevel): """Check that ravel works when order='F' and array C/F-contiguous""" a = np.ones(2) assert_(not a.ravel('F').flags.owndata) def test_sort_bigendian(self,level=rlevel): """Ticket #47""" a = np.linspace(0, 10, 11) c = a.astype(np.dtype('<f8')) c.sort() assert_array_almost_equal(c, a) def test_negative_nd_indexing(self,level=rlevel): """Ticket #49""" c = np.arange(125).reshape((5, 5, 5)) origidx = np.array([-1, 0, 1]) idx = np.array(origidx) c[idx] assert_array_equal(idx, origidx) def test_char_dump(self,level=rlevel): """Ticket #50""" f = BytesIO() ca = np.char.array(np.arange(1000, 1010), itemsize=4) ca.dump(f) f.seek(0) ca = np.load(f) f.close() def test_noncontiguous_fill(self,level=rlevel): """Ticket #58.""" a = np.zeros((5, 3)) b = a[:, :2,] def rs(): b.shape = (10,) self.assertRaises(AttributeError, rs) def test_bool(self,level=rlevel): """Ticket #60""" x = np.bool_(1) def test_indexing1(self,level=rlevel): """Ticket #64""" descr = [('x', [('y', [('z', 'c16', (2,)),]),]),] buffer = ((([6j, 4j],),),) h = np.array(buffer, dtype=descr) h['x']['y']['z'] def test_indexing2(self,level=rlevel): """Ticket #65""" descr = [('x', 'i4', (2,))] buffer = ([3, 2],) h = np.array(buffer, dtype=descr) h['x'] def test_round(self,level=rlevel): """Ticket #67""" x = np.array([1+2j]) assert_almost_equal(x**(-1), [1/(1+2j)]) def test_scalar_compare(self,level=rlevel): """Ticket #72""" a = np.array(['test', 'auto']) assert_array_equal(a == 'auto', np.array([False, True])) self.assertTrue(a[1] == 'auto') self.assertTrue(a[0] != 'auto') b = np.linspace(0, 10, 11) self.assertTrue(b != 'auto') self.assertTrue(b[0] != 'auto') def test_unicode_swapping(self,level=rlevel): """Ticket #79""" ulen = 1 ucs_value = sixu('\U0010FFFF') ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen) ua2 = ua.newbyteorder() def test_object_array_fill(self,level=rlevel): """Ticket #86""" x = np.zeros(1, 'O') x.fill([]) def test_mem_dtype_align(self,level=rlevel): """Ticket #93""" self.assertRaises(TypeError, np.dtype, {'names':['a'],'formats':['foo']}, align=1) @dec.knownfailureif((sys.version_info[0] >= 3) or (sys.platform == "win32" and platform.architecture()[0] == "64bit"), "numpy.intp('0xff', 16) not supported on Py3, " "as it does not inherit from Python int") def test_intp(self,level=rlevel): """Ticket #99""" i_width = np.int_(0).nbytes*2 - 1 np.intp('0x' + 'f'*i_width, 16) self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16) self.assertRaises(ValueError, np.intp, '0x1', 32) assert_equal(255, np.intp('0xFF', 16)) assert_equal(1024, np.intp(1024)) def test_endian_bool_indexing(self,level=rlevel): """Ticket #105""" a = np.arange(10., dtype='>f8') b = np.arange(10., dtype='<f8') xa = np.where((a>2) & (a<6)) xb = np.where((b>2) & (b<6)) ya = ((a>2) & (a<6)) yb = ((b>2) & (b<6)) assert_array_almost_equal(xa, ya.nonzero()) assert_array_almost_equal(xb, yb.nonzero()) assert_(np.all(a[ya] > 0.5)) assert_(np.all(b[yb] > 0.5)) def test_endian_where(self,level=rlevel): """GitHub issue #369""" net = np.zeros(3, dtype='>f4') net[1] = 0.00458849 net[2] = 0.605202 max_net = net.max() test = np.where(net <= 0., max_net, net) correct = np.array([ 0.60520202, 0.00458849, 0.60520202]) assert_array_almost_equal(test, correct) def test_endian_recarray(self,level=rlevel): """Ticket #2185""" dt = np.dtype([ ('head', '>u4'), ('data', '>u4', 2), ]) buf = np.recarray(1, dtype=dt) buf[0]['head'] = 1 buf[0]['data'][:] = [1, 1] h = buf[0]['head'] d = buf[0]['data'][0] buf[0]['head'] = h buf[0]['data'][0] = d assert_(buf[0]['head'] == 1) def test_mem_dot(self,level=rlevel): """Ticket #106""" x = np.random.randn(0, 1) y = np.random.randn(10, 1) # Dummy array to detect bad memory access: _z = np.ones(10) _dummy = np.empty((0, 10)) z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) np.dot(x, np.transpose(y), out=z) assert_equal(_z, np.ones(10)) # Do the same for the built-in dot: np.core.multiarray.dot(x, np.transpose(y), out=z) assert_equal(_z, np.ones(10)) def test_arange_endian(self,level=rlevel): """Ticket #111""" ref = np.arange(10) x = np.arange(10, dtype='<f8') assert_array_equal(ref, x) x = np.arange(10, dtype='>f8') assert_array_equal(ref, x) # Longfloat support is not consistent enough across # platforms for this test to be meaningful. # def test_longfloat_repr(self,level=rlevel): # """Ticket #112""" # if np.longfloat(0).itemsize > 8: # a = np.exp(np.array([1000],dtype=np.longfloat)) # assert_(str(a)[1:9] == str(a[0])[:8]) def test_argmax(self,level=rlevel): """Ticket #119""" a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): aargmax = a.argmax(i) def test_mem_divmod(self,level=rlevel): """Ticket #126""" for i in range(10): divmod(np.array([i])[0], 10) def test_hstack_invalid_dims(self,level=rlevel): """Ticket #128""" x = np.arange(9).reshape((3, 3)) y = np.array([0, 0, 0]) self.assertRaises(ValueError, np.hstack, (x, y)) def test_squeeze_type(self,level=rlevel): """Ticket #133""" a = np.array([3]) b = np.array(3) assert_(type(a.squeeze()) is np.ndarray) assert_(type(b.squeeze()) is np.ndarray) def test_add_identity(self,level=rlevel): """Ticket #143""" assert_equal(0, np.add.identity) def test_numpy_float_python_long_addition(self): # Check that numpy float and python longs can be added correctly. a = np.float_(23.) + 2**135 assert_equal(a, 23. + 2**135) def test_binary_repr_0(self,level=rlevel): """Ticket #151""" assert_equal('0', np.binary_repr(0)) def test_rec_iterate(self,level=rlevel): """Ticket #160""" descr = np.dtype([('i', int), ('f', float), ('s', '|S3')]) x = np.rec.array([(1, 1.1, '1.0'), (2, 2.2, '2.0')], dtype=descr) x[0].tolist() [i for i in x[0]] def test_unicode_string_comparison(self,level=rlevel): """Ticket #190""" a = np.array('hello', np.unicode_) b = np.array('world') a == b def test_tobytes_FORTRANORDER_discontiguous(self,level=rlevel): """Fix in r2836""" # Create non-contiguous Fortran ordered array x = np.array(np.random.rand(3, 3), order='F')[:, :2] assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes())) def test_flat_assignment(self,level=rlevel): """Correct behaviour of ticket #194""" x = np.empty((3, 1)) x.flat = np.arange(3) assert_array_almost_equal(x, [[0], [1], [2]]) x.flat = np.arange(3, dtype=float) assert_array_almost_equal(x, [[0], [1], [2]]) def test_broadcast_flat_assignment(self,level=rlevel): """Ticket #194""" x = np.empty((3, 1)) def bfa(): x[:] = np.arange(3) def bfb(): x[:] = np.arange(3, dtype=float) self.assertRaises(ValueError, bfa) self.assertRaises(ValueError, bfb) def test_nonarray_assignment(self): # See also Issue gh-2870, test for non-array assignment # and equivalent unsafe casted array assignment a = np.arange(10) b = np.ones(10, dtype=bool) r = np.arange(10) def assign(a, b, c): a[b] = c assert_raises(ValueError, assign, a, b, np.nan) a[b] = np.array(np.nan) # but not this. assert_raises(ValueError, assign, a, r, np.nan) a[r] = np.array(np.nan) def test_unpickle_dtype_with_object(self,level=rlevel): """Implemented in r2840""" dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')]) f = BytesIO() pickle.dump(dt, f) f.seek(0) dt_ = pickle.load(f) f.close() assert_equal(dt, dt_) def test_mem_array_creation_invalid_specification(self,level=rlevel): """Ticket #196""" dt = np.dtype([('x', int), ('y', np.object_)]) # Wrong way self.assertRaises(ValueError, np.array, [1, 'object'], dt) # Correct way np.array([(1, 'object')], dt) def test_recarray_single_element(self,level=rlevel): """Ticket #202""" a = np.array([1, 2, 3], dtype=np.int32) b = a.copy() r = np.rec.array(a, shape=1, formats=['3i4'], names=['d']) assert_array_equal(a, b) assert_equal(a, r[0][0]) def test_zero_sized_array_indexing(self,level=rlevel): """Ticket #205""" tmp = np.array([]) def index_tmp(): tmp[np.array(10)] self.assertRaises(IndexError, index_tmp) def test_chararray_rstrip(self,level=rlevel): """Ticket #222""" x = np.chararray((1,), 5) x[0] = asbytes('a ') x = x.rstrip() assert_equal(x[0], asbytes('a')) def test_object_array_shape(self,level=rlevel): """Ticket #239""" assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,)) assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2)) assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2)) assert_equal(np.array([], dtype=object).shape, (0,)) assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0)) assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,)) def test_mem_around(self,level=rlevel): """Ticket #243""" x = np.zeros((1,)) y = [0] decimal = 6 np.around(abs(x-y), decimal) <= 10.0**(-decimal) def test_character_array_strip(self,level=rlevel): """Ticket #246""" x = np.char.array(("x", "x ", "x ")) for c in x: assert_equal(c, "x") def test_lexsort(self,level=rlevel): """Lexsort memory error""" v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) assert_equal(np.lexsort(v), 0) def test_lexsort_invalid_sequence(self): # Issue gh-4123 class BuggySequence(object): def __len__(self): return 4 def __getitem__(self, key): raise KeyError assert_raises(KeyError, np.lexsort, BuggySequence()) def test_pickle_py2_bytes_encoding(self): # Check that arrays and scalars pickled on Py2 are # unpickleable on Py3 using encoding='bytes' test_data = [ # (original, py2_pickle) (np.unicode_('\u6f2c'), asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" "(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n" "I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n.")), (np.array([9e123], dtype=np.float64), asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n" "p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n" "p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n" "I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb.")), (np.array([(9e123,)], dtype=[('name', float)]), asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n" "(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n" "(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n" "(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n" "I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n" "bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb.")), ] if sys.version_info[:2] >= (3, 4): # encoding='bytes' was added in Py3.4 for original, data in test_data: result = pickle.loads(data, encoding='bytes') assert_equal(result, original) if isinstance(result, np.ndarray) and result.dtype.names: for name in result.dtype.names: assert_(isinstance(name, str)) def test_pickle_dtype(self,level=rlevel): """Ticket #251""" pickle.dumps(np.float) def test_swap_real(self, level=rlevel): """Ticket #265""" assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0) assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0) assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0) assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0) def test_object_array_from_list(self, level=rlevel): """Ticket #270""" a = np.array([1, 'A', None]) def test_multiple_assign(self, level=rlevel): """Ticket #273""" a = np.zeros((3, 1), int) a[[1, 2]] = 1 def test_empty_array_type(self, level=rlevel): assert_equal(np.array([]).dtype, np.zeros(0).dtype) def test_void_copyswap(self, level=rlevel): dt = np.dtype([('one', '<i4'), ('two', '<i4')]) x = np.array((1, 2), dtype=dt) x = x.byteswap() assert_(x['one'] > 1 and x['two'] > 2) def test_method_args(self, level=rlevel): # Make sure methods and functions have same default axis # keyword and arguments funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'), ('sometrue', 'any'), ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'), 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean', 'round', 'min', 'max', 'argsort', 'sort'] funcs2 = ['compress', 'take', 'repeat'] for func in funcs1: arr = np.random.rand(8, 7) arr2 = arr.copy() if isinstance(func, tuple): func_meth = func[1] func = func[0] else: func_meth = func res1 = getattr(arr, func_meth)() res2 = getattr(np, func)(arr2) if res1 is None: res1 = arr if res1.dtype.kind in 'uib': assert_((res1 == res2).all(), func) else: assert_(abs(res1-res2).max() < 1e-8, func) for func in funcs2: arr1 = np.random.rand(8, 7) arr2 = np.random.rand(8, 7) res1 = None if func == 'compress': arr1 = arr1.ravel() res1 = getattr(arr2, func)(arr1) else: arr2 = (15*arr2).astype(int).ravel() if res1 is None: res1 = getattr(arr1, func)(arr2) res2 = getattr(np, func)(arr1, arr2) assert_(abs(res1-res2).max() < 1e-8, func) def test_mem_lexsort_strings(self, level=rlevel): """Ticket #298""" lst = ['abc', 'cde', 'fgh'] np.lexsort((lst,)) def test_fancy_index(self, level=rlevel): """Ticket #302""" x = np.array([1, 2])[np.array([0])] assert_equal(x.shape, (1,)) def test_recarray_copy(self, level=rlevel): """Ticket #312""" dt = [('x', np.int16), ('y', np.float64)] ra = np.array([(1, 2.3)], dtype=dt) rb = np.rec.array(ra, dtype=dt) rb['x'] = 2. assert_(ra['x'] != rb['x']) def test_rec_fromarray(self, level=rlevel): """Ticket #322""" x1 = np.array([[1, 2], [3, 4], [5, 6]]) x2 = np.array(['a', 'dd', 'xyz']) x3 = np.array([1.1, 2, 3]) np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8") def test_object_array_assign(self, level=rlevel): x = np.empty((2, 2), object) x.flat[2] = (1, 2, 3) assert_equal(x.flat[2], (1, 2, 3)) def test_ndmin_float64(self, level=rlevel): """Ticket #324""" x = np.array([1, 2, 3], dtype=np.float64) assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) def test_ndmin_order(self, level=rlevel): """Issue #465 and related checks""" assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) def test_mem_axis_minimization(self, level=rlevel): """Ticket #327""" data = np.arange(5) data = np.add.outer(data, data) def test_mem_float_imag(self, level=rlevel): """Ticket #330""" np.float64(1.0).imag def test_dtype_tuple(self, level=rlevel): """Ticket #334""" assert_(np.dtype('i4') == np.dtype(('i4', ()))) def test_dtype_posttuple(self, level=rlevel): """Ticket #335""" np.dtype([('col1', '()i4')]) def test_numeric_carray_compare(self, level=rlevel): """Ticket #341""" assert_equal(np.array(['X'], 'c'), asbytes('X')) def test_string_array_size(self, level=rlevel): """Ticket #342""" self.assertRaises(ValueError, np.array, [['X'], ['X', 'X', 'X']], '|S1') def test_dtype_repr(self, level=rlevel): """Ticket #344""" dt1=np.dtype(('uint32', 2)) dt2=np.dtype(('uint32', (2,))) assert_equal(dt1.__repr__(), dt2.__repr__()) def test_reshape_order(self, level=rlevel): """Make sure reshape order works.""" a = np.arange(6).reshape(2, 3, order='F') assert_equal(a, [[0, 2, 4], [1, 3, 5]]) a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) b = a[:, 1] assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) def test_reshape_zero_strides(self, level=rlevel): """Issue #380, test reshaping of zero strided arrays""" a = np.ones(1) a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) assert_(a.reshape(5, 1).strides[0] == 0) def test_reshape_zero_size(self, level=rlevel): """GitHub Issue #2700, setting shape failed for 0-sized arrays""" a = np.ones((0, 2)) a.shape = (-1, 2) # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous. @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max) def test_reshape_trailing_ones_strides(self): # GitHub issue gh-2949, bad strides for trailing ones of new shape a = np.zeros(12, dtype=np.int32)[::2] # not contiguous strides_c = (16, 8, 8, 8) strides_f = (8, 24, 48, 48) assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c) assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) def test_repeat_discont(self, level=rlevel): """Ticket #352""" a = np.arange(12).reshape(4, 3)[:, 2] assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) def test_array_index(self, level=rlevel): """Make sure optimization is not called in this case.""" a = np.array([1, 2, 3]) a2 = np.array([[1, 2, 3]]) assert_equal(a[np.where(a==3)], a2[np.where(a2==3)]) def test_object_argmax(self, level=rlevel): a = np.array([1, 2, 3], dtype=object) assert_(a.argmax() == 2) def test_recarray_fields(self, level=rlevel): """Ticket #372""" dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) for a in [np.array([(1, 2), (3, 4)], "i4,i4"), np.rec.array([(1, 2), (3, 4)], "i4,i4"), np.rec.array([(1, 2), (3, 4)]), np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"), np.rec.fromarrays([(1, 2), (3, 4)])]: assert_(a.dtype in [dt0, dt1]) def test_random_shuffle(self, level=rlevel): """Ticket #374""" a = np.arange(5).reshape((5, 1)) b = a.copy() np.random.shuffle(b) assert_equal(np.sort(b, axis=0), a) @dec.skipif(not hasattr(sys, 'getrefcount')) def test_refcount_vdot(self, level=rlevel): """Changeset #3443""" _assert_valid_refcount(np.vdot) def test_startswith(self, level=rlevel): ca = np.char.array(['Hi', 'There']) assert_equal(ca.startswith('H'), [True, False]) def test_noncommutative_reduce_accumulate(self, level=rlevel): """Ticket #413""" tosubtract = np.arange(5) todivide = np.array([2.0, 0.5, 0.25]) assert_equal(np.subtract.reduce(tosubtract), -10) assert_equal(np.divide.reduce(todivide), 16.0) assert_array_equal(np.subtract.accumulate(tosubtract), np.array([0, -1, -3, -6, -10])) assert_array_equal(np.divide.accumulate(todivide), np.array([2., 4., 16.])) def test_convolve_empty(self, level=rlevel): """Convolve should raise an error for empty input array.""" self.assertRaises(ValueError, np.convolve, [], [1]) self.assertRaises(ValueError, np.convolve, [1], []) def test_multidim_byteswap(self, level=rlevel): """Ticket #449""" r=np.array([(1, (0, 1, 2))], dtype="i2,3i2") assert_array_equal(r.byteswap(), np.array([(256, (0, 256, 512))], r.dtype)) def test_string_NULL(self, level=rlevel): """Changeset 3557""" assert_equal(np.array("a\x00\x0b\x0c\x00").item(), 'a\x00\x0b\x0c') def test_junk_in_string_fields_of_recarray(self, level=rlevel): """Ticket #483""" r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')]) assert_(asbytes(r['var1'][0][0]) == asbytes('abc')) def test_take_output(self, level=rlevel): """Ensure that 'take' honours output parameter.""" x = np.arange(12).reshape((3, 4)) a = np.take(x, [0, 2], axis=1) b = np.zeros_like(a) np.take(x, [0, 2], axis=1, out=b) assert_array_equal(a, b) def test_take_object_fail(self): # Issue gh-3001 d = 123. a = np.array([d, 1], dtype=object) ref_d = sys.getrefcount(d) try: a.take([0, 100]) except IndexError: pass assert_(ref_d == sys.getrefcount(d)) def test_array_str_64bit(self, level=rlevel): """Ticket #501""" s = np.array([1, np.nan], dtype=np.float64) with np.errstate(all='raise'): sstr = np.array_str(s) def test_frompyfunc_endian(self, level=rlevel): """Ticket #503""" from math import radians uradians = np.frompyfunc(radians, 1, 1) big_endian = np.array([83.4, 83.5], dtype='>f8') little_endian = np.array([83.4, 83.5], dtype='<f8') assert_almost_equal(uradians(big_endian).astype(float), uradians(little_endian).astype(float)) def test_mem_string_arr(self, level=rlevel): """Ticket #514""" s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" t = [] np.hstack((t, s )) def test_arr_transpose(self, level=rlevel): """Ticket #516""" x = np.random.rand(*(2,)*16) y = x.transpose(list(range(16))) @dec.skipif('__pypy__' in sys.builtin_module_names) def test_string_mergesort(self, level=rlevel): """Ticket #540""" x = np.array(['a']*32) assert_array_equal(x.argsort(kind='m'), np.arange(32)) def test_argmax_byteorder(self, level=rlevel): """Ticket #546""" a = np.arange(3, dtype='>f') assert_(a[a.argmax()] == a.max()) def test_rand_seed(self, level=rlevel): """Ticket #555""" for l in np.arange(4): np.random.seed(l) def test_mem_deallocation_leak(self, level=rlevel): """Ticket #562""" a = np.zeros(5, dtype=float) b = np.array(a, dtype=float) del a, b def test_mem_on_invalid_dtype(self): "Ticket #583" self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str) def test_dot_negative_stride(self, level=rlevel): """Ticket #588""" x = np.array([[1, 5, 25, 125., 625]]) y = np.array([[20.], [160.], [640.], [1280.], [1024.]]) z = y[::-1].copy() y2 = y[::-1] assert_equal(np.dot(x, z), np.dot(x, y2)) def test_object_casting(self, level=rlevel): # This used to trigger the object-type version of # the bitwise_or operation, because float64 -> object # casting succeeds def rs(): x = np.ones([484, 286]) y = np.zeros([484, 286]) x |= y self.assertRaises(TypeError, rs) def test_unicode_scalar(self, level=rlevel): """Ticket #600""" x = np.array(["DROND", "DROND1"], dtype="U6") el = x[1] new = pickle.loads(pickle.dumps(el)) assert_equal(new, el) def test_arange_non_native_dtype(self, level=rlevel): """Ticket #616""" for T in ('>f4', '<f4'): dt = np.dtype(T) assert_equal(np.arange(0, dtype=dt).dtype, dt) assert_equal(np.arange(0.5, dtype=dt).dtype, dt) assert_equal(np.arange(5, dtype=dt).dtype, dt) def test_bool_indexing_invalid_nr_elements(self, level=rlevel): s = np.ones(10, dtype=float) x = np.array((15,), dtype=float) def ia(x, s, v): x[(s>0)]=v # After removing deprecation, the following are ValueErrors. # This might seem odd as compared to the value error below. This # is due to the fact that the new code always uses "nonzero" logic # and the boolean special case is not taken. self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float)) self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float)) # Old special case (different code path): self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) def test_mem_scalar_indexing(self, level=rlevel): """Ticket #603""" x = np.array([0], dtype=float) index = np.array(0, dtype=np.int32) x[index] def test_binary_repr_0_width(self, level=rlevel): assert_equal(np.binary_repr(0, width=3), '000') def test_fromstring(self, level=rlevel): assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), [12, 9, 9]) def test_searchsorted_variable_length(self, level=rlevel): x = np.array(['a', 'aa', 'b']) y = np.array(['d', 'e']) assert_equal(x.searchsorted(y), [3, 3]) def test_string_argsort_with_zeros(self, level=rlevel): """Check argsort for strings containing zeros.""" x = np.fromstring("\x00\x02\x00\x01", dtype="|S2") assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) def test_string_sort_with_zeros(self, level=rlevel): """Check sort for strings containing zeros.""" x = np.fromstring("\x00\x02\x00\x01", dtype="|S2") y = np.fromstring("\x00\x01\x00\x02", dtype="|S2") assert_array_equal(np.sort(x, kind="q"), y) def test_copy_detection_zero_dim(self, level=rlevel): """Ticket #658""" np.indices((0, 3, 4)).T.reshape(-1, 3) def test_flat_byteorder(self, level=rlevel): """Ticket #657""" x = np.arange(10) assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:]) assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4')) def test_uint64_from_negative(self, level=rlevel) : assert_equal(np.uint64(-2), np.uint64(18446744073709551614)) def test_sign_bit(self, level=rlevel): x = np.array([0, -0.0, 0]) assert_equal(str(np.abs(x)), '[ 0. 0. 0.]') def test_flat_index_byteswap(self, level=rlevel): for dt in (np.dtype('<i4'), np.dtype('>i4')): x = np.array([-1, 0, 1], dtype=dt) assert_equal(x.flat[0].dtype, x[0].dtype) def test_copy_detection_corner_case(self, level=rlevel): """Ticket #658""" np.indices((0, 3, 4)).T.reshape(-1, 3) # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides. # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous, # 0-sized reshape itself is tested elsewhere. @dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max) def test_copy_detection_corner_case2(self, level=rlevel): """Ticket #771: strides are not set correctly when reshaping 0-sized arrays""" b = np.indices((0, 3, 4)).T.reshape(-1, 3) assert_equal(b.strides, (3 * b.itemsize, b.itemsize)) def test_object_array_refcounting(self, level=rlevel): """Ticket #633""" if not hasattr(sys, 'getrefcount'): return # NB. this is probably CPython-specific cnt = sys.getrefcount a = object() b = object() c = object() cnt0_a = cnt(a) cnt0_b = cnt(b) cnt0_c = cnt(c) # -- 0d -> 1-d broadcast slice assignment arr = np.zeros(5, dtype=np.object_) arr[:] = a assert_equal(cnt(a), cnt0_a + 5) arr[:] = b assert_equal(cnt(a), cnt0_a) assert_equal(cnt(b), cnt0_b + 5) arr[:2] = c assert_equal(cnt(b), cnt0_b + 3) assert_equal(cnt(c), cnt0_c + 2) del arr # -- 1-d -> 2-d broadcast slice assignment arr = np.zeros((5, 2), dtype=np.object_) arr0 = np.zeros(2, dtype=np.object_) arr0[0] = a assert_(cnt(a) == cnt0_a + 1) arr0[1] = b assert_(cnt(b) == cnt0_b + 1) arr[:,:] = arr0 assert_(cnt(a) == cnt0_a + 6) assert_(cnt(b) == cnt0_b + 6) arr[:, 0] = None assert_(cnt(a) == cnt0_a + 1) del arr, arr0 # -- 2-d copying + flattening arr = np.zeros((5, 2), dtype=np.object_) arr[:, 0] = a arr[:, 1] = b assert_(cnt(a) == cnt0_a + 5) assert_(cnt(b) == cnt0_b + 5) arr2 = arr.copy() assert_(cnt(a) == cnt0_a + 10) assert_(cnt(b) == cnt0_b + 10) arr2 = arr[:, 0].copy() assert_(cnt(a) == cnt0_a + 10) assert_(cnt(b) == cnt0_b + 5) arr2 = arr.flatten() assert_(cnt(a) == cnt0_a + 10) assert_(cnt(b) == cnt0_b + 10) del arr, arr2 # -- concatenate, repeat, take, choose arr1 = np.zeros((5, 1), dtype=np.object_) arr2 = np.zeros((5, 1), dtype=np.object_) arr1[...] = a arr2[...] = b assert_(cnt(a) == cnt0_a + 5) assert_(cnt(b) == cnt0_b + 5) arr3 = np.concatenate((arr1, arr2)) assert_(cnt(a) == cnt0_a + 5 + 5) assert_(cnt(b) == cnt0_b + 5 + 5) arr3 = arr1.repeat(3, axis=0) assert_(cnt(a) == cnt0_a + 5 + 3*5) arr3 = arr1.take([1, 2, 3], axis=0) assert_(cnt(a) == cnt0_a + 5 + 3) x = np.array([[0], [1], [0], [1], [1]], int) arr3 = x.choose(arr1, arr2) assert_(cnt(a) == cnt0_a + 5 + 2) assert_(cnt(b) == cnt0_b + 5 + 3) def test_mem_custom_float_to_array(self, level=rlevel): """Ticket 702""" class MyFloat(object): def __float__(self): return 1.0 tmp = np.atleast_1d([MyFloat()]) tmp2 = tmp.astype(float) def test_object_array_refcount_self_assign(self, level=rlevel): """Ticket #711""" class VictimObject(object): deleted = False def __del__(self): self.deleted = True d = VictimObject() arr = np.zeros(5, dtype=np.object_) arr[:] = d del d arr[:] = arr # refcount of 'd' might hit zero here assert_(not arr[0].deleted) arr[:] = arr # trying to induce a segfault by doing it again... assert_(not arr[0].deleted) def test_mem_fromiter_invalid_dtype_string(self, level=rlevel): x = [1, 2, 3] self.assertRaises(ValueError, np.fromiter, [xi for xi in x], dtype='S') def test_reduce_big_object_array(self, level=rlevel): """Ticket #713""" oldsize = np.setbufsize(10*16) a = np.array([None]*161, object) assert_(not np.any(a)) np.setbufsize(oldsize) def test_mem_0d_array_index(self, level=rlevel): """Ticket #714""" np.zeros(10)[np.array(0)] def test_floats_from_string(self, level=rlevel): """Ticket #640, floats from string""" fsingle = np.single('1.234') fdouble = np.double('1.234') flongdouble = np.longdouble('1.234') assert_almost_equal(fsingle, 1.234) assert_almost_equal(fdouble, 1.234) assert_almost_equal(flongdouble, 1.234) def test_nonnative_endian_fill(self, level=rlevel): """ Non-native endian arrays were incorrectly filled with scalars before r5034. """ if sys.byteorder == 'little': dtype = np.dtype('>i4') else: dtype = np.dtype('<i4') x = np.empty([1], dtype=dtype) x.fill(1) assert_equal(x, np.array([1], dtype=dtype)) def test_dot_alignment_sse2(self, level=rlevel): """Test for ticket #551, changeset r5140""" x = np.zeros((30, 40)) y = pickle.loads(pickle.dumps(x)) # y is now typically not aligned on a 8-byte boundary z = np.ones((1, y.shape[0])) # This shouldn't cause a segmentation fault: np.dot(z, y) def test_astype_copy(self, level=rlevel): """Ticket #788, changeset r5155""" # The test data file was generated by scipy.io.savemat. # The dtype is float64, but the isbuiltin attribute is 0. data_dir = path.join(path.dirname(__file__), 'data') filename = path.join(data_dir, "astype_copy.pkl") if sys.version_info[0] >= 3: f = open(filename, 'rb') xp = pickle.load(f, encoding='latin1') f.close() else: f = open(filename) xp = pickle.load(f) f.close() xpd = xp.astype(np.float64) assert_((xp.__array_interface__['data'][0] != xpd.__array_interface__['data'][0])) def test_compress_small_type(self, level=rlevel): """Ticket #789, changeset 5217. """ # compress with out argument segfaulted if cannot cast safely import numpy as np a = np.array([[1, 2], [3, 4]]) b = np.zeros((2, 1), dtype = np.single) try: a.compress([True, False], axis = 1, out = b) raise AssertionError("compress with an out which cannot be " "safely casted should not return " "successfully") except TypeError: pass def test_attributes(self, level=rlevel): """Ticket #791 """ class TestArray(np.ndarray): def __new__(cls, data, info): result = np.array(data) result = result.view(cls) result.info = info return result def __array_finalize__(self, obj): self.info = getattr(obj, 'info', '') dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') assert_(dat.info == 'jubba') dat.resize((4, 2)) assert_(dat.info == 'jubba') dat.sort() assert_(dat.info == 'jubba') dat.fill(2) assert_(dat.info == 'jubba') dat.put([2, 3, 4], [6, 3, 4]) assert_(dat.info == 'jubba') dat.setfield(4, np.int32, 0) assert_(dat.info == 'jubba') dat.setflags() assert_(dat.info == 'jubba') assert_(dat.all(1).info == 'jubba') assert_(dat.any(1).info == 'jubba') assert_(dat.argmax(1).info == 'jubba') assert_(dat.argmin(1).info == 'jubba') assert_(dat.argsort(1).info == 'jubba') assert_(dat.astype(TestArray).info == 'jubba') assert_(dat.byteswap().info == 'jubba') assert_(dat.clip(2, 7).info == 'jubba') assert_(dat.compress([0, 1, 1]).info == 'jubba') assert_(dat.conj().info == 'jubba') assert_(dat.conjugate().info == 'jubba') assert_(dat.copy().info == 'jubba') dat2 = TestArray([2, 3, 1, 0], 'jubba') choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]] assert_(dat2.choose(choices).info == 'jubba') assert_(dat.cumprod(1).info == 'jubba') assert_(dat.cumsum(1).info == 'jubba') assert_(dat.diagonal().info == 'jubba') assert_(dat.flatten().info == 'jubba') assert_(dat.getfield(np.int32, 0).info == 'jubba') assert_(dat.imag.info == 'jubba') assert_(dat.max(1).info == 'jubba') assert_(dat.mean(1).info == 'jubba') assert_(dat.min(1).info == 'jubba') assert_(dat.newbyteorder().info == 'jubba') assert_(dat.nonzero()[0].info == 'jubba') assert_(dat.nonzero()[1].info == 'jubba') assert_(dat.prod(1).info == 'jubba') assert_(dat.ptp(1).info == 'jubba') assert_(dat.ravel().info == 'jubba') assert_(dat.real.info == 'jubba') assert_(dat.repeat(2).info == 'jubba') assert_(dat.reshape((2, 4)).info == 'jubba') assert_(dat.round().info == 'jubba') assert_(dat.squeeze().info == 'jubba') assert_(dat.std(1).info == 'jubba') assert_(dat.sum(1).info == 'jubba') assert_(dat.swapaxes(0, 1).info == 'jubba') assert_(dat.take([2, 3, 5]).info == 'jubba') assert_(dat.transpose().info == 'jubba') assert_(dat.T.info == 'jubba') assert_(dat.var(1).info == 'jubba') assert_(dat.view(TestArray).info == 'jubba') def test_recarray_tolist(self, level=rlevel): """Ticket #793, changeset r5215 """ # Comparisons fail for NaN, so we can't use random memory # for the test. buf = np.zeros(40, dtype=np.int8) a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf) b = a.tolist() assert_( a[0].tolist() == b[0]) assert_( a[1].tolist() == b[1]) def test_nonscalar_item_method(self): # Make sure that .item() fails graciously when it should a = np.arange(5) assert_raises(ValueError, a.item) def test_char_array_creation(self, level=rlevel): a = np.array('123', dtype='c') b = np.array(asbytes_nested(['1', '2', '3'])) assert_equal(a, b) def test_unaligned_unicode_access(self, level=rlevel) : """Ticket #825""" for i in range(1, 9) : msg = 'unicode offset: %d chars'%i t = np.dtype([('a', 'S%d'%i), ('b', 'U2')]) x = np.array([(asbytes('a'), sixu('b'))], dtype=t) if sys.version_info[0] >= 3: assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg) else: assert_equal(str(x), "[('a', u'b')]", err_msg=msg) def test_sign_for_complex_nan(self, level=rlevel): """Ticket 794.""" with np.errstate(invalid='ignore'): C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan]) have = np.sign(C) want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan]) assert_equal(have, want) def test_for_equal_names(self, level=rlevel): """Ticket #674""" dt = np.dtype([('foo', float), ('bar', float)]) a = np.zeros(10, dt) b = list(a.dtype.names) b[0] = "notfoo" a.dtype.names = b assert_(a.dtype.names[0] == "notfoo") assert_(a.dtype.names[1] == "bar") def test_for_object_scalar_creation(self, level=rlevel): """Ticket #816""" a = np.object_() b = np.object_(3) b2 = np.object_(3.0) c = np.object_([4, 5]) d = np.object_([None, {}, []]) assert_(a is None) assert_(type(b) is int) assert_(type(b2) is float) assert_(type(c) is np.ndarray) assert_(c.dtype == object) assert_(d.dtype == object) def test_array_resize_method_system_error(self): """Ticket #840 - order should be an invalid keyword.""" x = np.array([[0, 1], [2, 3]]) self.assertRaises(TypeError, x.resize, (2, 2), order='C') def test_for_zero_length_in_choose(self, level=rlevel): "Ticket #882" a = np.array(1) self.assertRaises(ValueError, lambda x: x.choose([]), a) def test_array_ndmin_overflow(self): "Ticket #947." self.assertRaises(ValueError, lambda: np.array([1], ndmin=33)) def test_errobj_reference_leak(self, level=rlevel): """Ticket #955""" with np.errstate(all="ignore"): z = int(0) p = np.int32(-1) gc.collect() n_before = len(gc.get_objects()) z**p # this shouldn't leak a reference to errobj gc.collect() n_after = len(gc.get_objects()) assert_(n_before >= n_after, (n_before, n_after)) def test_void_scalar_with_titles(self, level=rlevel): """No ticket""" data = [('john', 4), ('mary', 5)] dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] arr = np.array(data, dtype=dtype1) assert_(arr[0][0] == 'john') assert_(arr[0][1] == 4) @dec.skipif('__pypy__' in sys.builtin_module_names) def test_void_scalar_constructor(self): #Issue #1550 #Create test string data, construct void scalar from data and assert #that void scalar contains original data. test_string = np.array("test") test_string_void_scalar = np.core.multiarray.scalar( np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes()) assert_(test_string_void_scalar.view(test_string.dtype) == test_string) #Create record scalar, construct from data and assert that #reconstructed scalar is correct. test_record = np.ones((), "i,i") test_record_void_scalar = np.core.multiarray.scalar( test_record.dtype, test_record.tobytes()) assert_(test_record_void_scalar == test_record) #Test pickle and unpickle of void and record scalars assert_(pickle.loads(pickle.dumps(test_string)) == test_string) assert_(pickle.loads(pickle.dumps(test_record)) == test_record) def test_blasdot_uninitialized_memory(self): """Ticket #950""" for m in [0, 1, 2]: for n in [0, 1, 2]: for k in range(3): # Try to ensure that x->data contains non-zero floats x = np.array([123456789e199], dtype=np.float64) x.resize((m, 0)) y = np.array([123456789e199], dtype=np.float64) y.resize((0, n)) # `dot` should just return zero (m,n) matrix z = np.dot(x, y) assert_(np.all(z == 0)) assert_(z.shape == (m, n)) def test_zeros(self): """Regression test for #1061.""" # Set a size which cannot fit into a 64 bits signed integer sz = 2 ** 64 good = 'Maximum allowed dimension exceeded' try: np.empty(sz) except ValueError as e: if not str(e) == good: self.fail("Got msg '%s', expected '%s'" % (e, good)) except Exception as e: self.fail("Got exception of type %s instead of ValueError" % type(e)) def test_huge_arange(self): """Regression test for #1062.""" # Set a size which cannot fit into a 64 bits signed integer sz = 2 ** 64 good = 'Maximum allowed size exceeded' try: a = np.arange(sz) self.assertTrue(np.size == sz) except ValueError as e: if not str(e) == good: self.fail("Got msg '%s', expected '%s'" % (e, good)) except Exception as e: self.fail("Got exception of type %s instead of ValueError" % type(e)) def test_fromiter_bytes(self): """Ticket #1058""" a = np.fromiter(list(range(10)), dtype='b') b = np.fromiter(list(range(10)), dtype='B') assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) def test_array_from_sequence_scalar_array(self): """Ticket #1078: segfaults when creating an array with a sequence of 0d arrays.""" a = np.array((np.ones(2), np.array(2))) assert_equal(a.shape, (2,)) assert_equal(a.dtype, np.dtype(object)) assert_equal(a[0], np.ones(2)) assert_equal(a[1], np.array(2)) a = np.array(((1,), np.array(1))) assert_equal(a.shape, (2,)) assert_equal(a.dtype, np.dtype(object)) assert_equal(a[0], (1,)) assert_equal(a[1], np.array(1)) def test_array_from_sequence_scalar_array2(self): """Ticket #1081: weird array with strange input...""" t = np.array([np.array([]), np.array(0, object)]) assert_equal(t.shape, (2,)) assert_equal(t.dtype, np.dtype(object)) def test_array_too_big(self): """Ticket #1080.""" assert_raises(ValueError, np.zeros, [975]*7, np.int8) assert_raises(ValueError, np.zeros, [26244]*5, np.int8) def test_dtype_keyerrors_(self): """Ticket #1106.""" dt = np.dtype([('f1', np.uint)]) assert_raises(KeyError, dt.__getitem__, "f2") assert_raises(IndexError, dt.__getitem__, 1) assert_raises(ValueError, dt.__getitem__, 0.0) def test_lexsort_buffer_length(self): """Ticket #1217, don't segfault.""" a = np.ones(100, dtype=np.int8) b = np.ones(100, dtype=np.int32) i = np.lexsort((a[::-1], b)) assert_equal(i, np.arange(100, dtype=np.int)) def test_object_array_to_fixed_string(self): """Ticket #1235.""" a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_) b = np.array(a, dtype=(np.str_, 8)) assert_equal(a, b) c = np.array(a, dtype=(np.str_, 5)) assert_equal(c, np.array(['abcde', 'ijklm'])) d = np.array(a, dtype=(np.str_, 12)) assert_equal(a, d) e = np.empty((2, ), dtype=(np.str_, 8)) e[:] = a[:] assert_equal(a, e) def test_unicode_to_string_cast(self): """Ticket #1240.""" a = np.array( [ [sixu('abc'), sixu('\u03a3')], [sixu('asdf'), sixu('erw')] ], dtype='U') def fail(): b = np.array(a, 'S4') self.assertRaises(UnicodeEncodeError, fail) def test_mixed_string_unicode_array_creation(self): a = np.array(['1234', sixu('123')]) assert_(a.itemsize == 16) a = np.array([sixu('123'), '1234']) assert_(a.itemsize == 16) a = np.array(['1234', sixu('123'), '12345']) assert_(a.itemsize == 20) a = np.array([sixu('123'), '1234', sixu('12345')]) assert_(a.itemsize == 20) a = np.array([sixu('123'), '1234', sixu('1234')]) assert_(a.itemsize == 16) def test_misaligned_objects_segfault(self): """Ticket #1198 and #1267""" a1 = np.zeros((10,), dtype='O,c') a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10') a1['f0'] = a2 r = repr(a1) np.argmax(a1['f0']) a1['f0'][1] = "FOO" a1['f0'] = "FOO" a3 = np.array(a1['f0'], dtype='S') np.nonzero(a1['f0']) a1.sort() a4 = copy.deepcopy(a1) def test_misaligned_scalars_segfault(self): """Ticket #1267""" s1 = np.array(('a', 'Foo'), dtype='c,O') s2 = np.array(('b', 'Bar'), dtype='c,O') s1['f1'] = s2['f1'] s1['f1'] = 'Baz' def test_misaligned_dot_product_objects(self): """Ticket #1267""" # This didn't require a fix, but it's worth testing anyway, because # it may fail if .dot stops enforcing the arrays to be BEHAVED a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c') b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c') np.dot(a['f0'], b['f0']) def test_byteswap_complex_scalar(self): """Ticket #1259 and gh-441""" for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]: z = np.array([2.2-1.1j], dtype) x = z[0] # always native-endian y = x.byteswap() if x.dtype.byteorder == z.dtype.byteorder: # little-endian machine assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype.newbyteorder())) else: # big-endian machine assert_equal(x, np.fromstring(y.tobytes(), dtype=dtype)) # double check real and imaginary parts: assert_equal(x.real, y.real.byteswap()) assert_equal(x.imag, y.imag.byteswap()) def test_structured_arrays_with_objects1(self): """Ticket #1299""" stra = 'aaaa' strb = 'bbbb' x = np.array([[(0, stra), (1, strb)]], 'i8,O') x[x.nonzero()] = x.ravel()[:1] assert_(x[0, 1] == x[0, 0]) def test_structured_arrays_with_objects2(self): """Ticket #1299 second test""" stra = 'aaaa' strb = 'bbbb' if hasattr(sys, 'getrefcount'): numb = sys.getrefcount(strb) numa = sys.getrefcount(stra) x = np.array([[(0, stra), (1, strb)]], 'i8,O') x[x.nonzero()] = x.ravel()[:1] if hasattr(sys, 'getrefcount'): assert_(sys.getrefcount(strb) == numb) assert_(sys.getrefcount(stra) == numa + 2) def test_duplicate_title_and_name(self): """Ticket #1254""" def func(): x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')]) self.assertRaises(ValueError, func) def test_signed_integer_division_overflow(self): """Ticket #1317.""" def test_type(t): min = np.array([np.iinfo(t).min]) min //= -1 with np.errstate(divide="ignore"): for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long): test_type(t) def test_buffer_hashlib(self): try: from hashlib import md5 except ImportError: from md5 import new as md5 x = np.array([1, 2, 3], dtype=np.dtype('<i4')) assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6') def test_0d_string_scalar(self): # Bug #1436; the following should succeed np.asarray('x', '>c') def test_log1p_compiler_shenanigans(self): # Check if log1p is behaving on 32 bit intel systems. assert_(np.isfinite(np.log1p(np.exp2(-53)))) def test_fromiter_comparison(self, level=rlevel): a = np.fromiter(list(range(10)), dtype='b') b = np.fromiter(list(range(10)), dtype='B') assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) def test_fromstring_crash(self): # Ticket #1345: the following should not cause a crash np.fromstring(asbytes('aa, aa, 1.0'), sep=',') def test_ticket_1539(self): dtypes = [x for x in np.typeDict.values() if (issubclass(x, np.number) and not issubclass(x, np.timedelta64))] a = np.array([], dtypes[0]) failures = [] # ignore complex warnings with warnings.catch_warnings(): warnings.simplefilter('ignore', np.ComplexWarning) for x in dtypes: b = a.astype(x) for y in dtypes: c = a.astype(y) try: np.dot(b, c) except TypeError as e: failures.append((x, y)) if failures: raise AssertionError("Failures: %r" % failures) def test_ticket_1538(self): x = np.finfo(np.float32) for name in 'eps epsneg max min resolution tiny'.split(): assert_equal(type(getattr(x, name)), np.float32, err_msg=name) def test_ticket_1434(self): # Check that the out= argument in var and std has an effect data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9))) out = np.zeros((3,)) ret = data.var(axis=1, out=out) assert_(ret is out) assert_array_equal(ret, data.var(axis=1)) ret = data.std(axis=1, out=out) assert_(ret is out) assert_array_equal(ret, data.std(axis=1)) def test_complex_nan_maximum(self): cnan = complex(0, np.nan) assert_equal(np.maximum(1, cnan), cnan) def test_subclass_int_tuple_assignment(self): # ticket #1563 class Subclass(np.ndarray): def __new__(cls, i): return np.ones((i,)).view(cls) x = Subclass(5) x[(0,)] = 2 # shouldn't raise an exception assert_equal(x[0], 2) def test_ufunc_no_unnecessary_views(self): # ticket #1548 class Subclass(np.ndarray): pass x = np.array([1, 2, 3]).view(Subclass) y = np.add(x, x, x) assert_equal(id(x), id(y)) def test_take_refcount(self): # ticket #939 a = np.arange(16, dtype=np.float) a.shape = (4, 4) lut = np.ones((5 + 3, 4), np.float) rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) if hasattr(sys, 'getrefcount'): c1 = sys.getrefcount(rgba) try: lut.take(a, axis=0, mode='clip', out=rgba) except TypeError: pass if hasattr(sys, 'getrefcount'): c2 = sys.getrefcount(rgba) assert_equal(c1, c2) def test_fromfile_tofile_seeks(self): # On Python 3, tofile/fromfile used to get (#1610) the Python # file handle out of sync f0 = tempfile.NamedTemporaryFile() f = f0.file f.write(np.arange(255, dtype='u1').tobytes()) f.seek(20) ret = np.fromfile(f, count=4, dtype='u1') assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) assert_equal(f.tell(), 24) f.seek(40) np.array([1, 2, 3], dtype='u1').tofile(f) assert_equal(f.tell(), 43) f.seek(40) data = f.read(3) assert_equal(data, asbytes("\x01\x02\x03")) f.seek(80) f.read(4) data = np.fromfile(f, dtype='u1', count=4) assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) f.close() def test_complex_scalar_warning(self): for tp in [np.csingle, np.cdouble, np.clongdouble]: x = tp(1+2j) assert_warns(np.ComplexWarning, float, x) with warnings.catch_warnings(): warnings.simplefilter('ignore') assert_equal(float(x), float(x.real)) def test_complex_scalar_complex_cast(self): for tp in [np.csingle, np.cdouble, np.clongdouble]: x = tp(1+2j) assert_equal(complex(x), 1+2j) def test_complex_boolean_cast(self): """Ticket #2218""" for tp in [np.csingle, np.cdouble, np.clongdouble]: x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp) assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool)) assert_(np.any(x)) assert_(np.all(x[1:])) def test_uint_int_conversion(self): x = 2**64 - 1 assert_equal(int(np.uint64(x)), x) def test_duplicate_field_names_assign(self): ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8') ra.dtype.names = ('f1', 'f2') rep = repr(ra) # should not cause a segmentation fault assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) def test_eq_string_and_object_array(self): # From e-mail thread "__eq__ with str and object" (Keith Goodman) a1 = np.array(['a', 'b'], dtype=object) a2 = np.array(['a', 'c']) assert_array_equal(a1 == a2, [True, False]) assert_array_equal(a2 == a1, [True, False]) def test_nonzero_byteswap(self): a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) a.dtype = np.float32 assert_equal(a.nonzero()[0], [1]) a = a.byteswap().newbyteorder() assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap def test_find_common_type_boolean(self): # Ticket #1695 assert_(np.find_common_type([], ['?', '?']) == '?') def test_empty_mul(self): a = np.array([1.]) a[1:1] *= 2 assert_equal(a, [1.]) def test_array_side_effect(self): assert_equal(np.dtype('S10').itemsize, 10) A = np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_) # This was throwing an exception because in ctors.c, # discover_itemsize was calling PyObject_Length without checking # the return code. This failed to get the length of the number 2, # and the exception hung around until something checked # PyErr_Occurred() and returned an error. assert_equal(np.dtype('S10').itemsize, 10) def test_any_float(self): # all and any for floats a = np.array([0.1, 0.9]) assert_(np.any(a)) assert_(np.all(a)) def test_large_float_sum(self): a = np.arange(10000, dtype='f') assert_equal(a.sum(dtype='d'), a.astype('d').sum()) def test_ufunc_casting_out(self): a = np.array(1.0, dtype=np.float32) b = np.array(1.0, dtype=np.float64) c = np.array(1.0, dtype=np.float32) np.add(a, b, out=c) assert_equal(c, 2.0) def test_array_scalar_contiguous(self): # Array scalars are both C and Fortran contiguous assert_(np.array(1.0).flags.c_contiguous) assert_(np.array(1.0).flags.f_contiguous) assert_(np.array(np.float32(1.0)).flags.c_contiguous) assert_(np.array(np.float32(1.0)).flags.f_contiguous) def test_squeeze_contiguous(self): """Similar to GitHub issue #387""" a = np.zeros((1, 2)).squeeze() b = np.zeros((2, 2, 2), order='F')[:,:, ::2].squeeze() assert_(a.flags.c_contiguous) assert_(a.flags.f_contiguous) assert_(b.flags.f_contiguous) def test_reduce_contiguous(self): """GitHub issue #387""" a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1)) b = np.add.reduce(np.zeros((2, 1, 2)), 1) assert_(a.flags.c_contiguous) assert_(a.flags.f_contiguous) assert_(b.flags.c_contiguous) def test_object_array_self_reference(self): # Object arrays with references to themselves can cause problems a = np.array(0, dtype=object) a[()] = a assert_raises(TypeError, int, a) assert_raises(TypeError, long, a) assert_raises(TypeError, float, a) assert_raises(TypeError, oct, a) assert_raises(TypeError, hex, a) # Test the same for a circular reference. b = np.array(a, dtype=object) a[()] = b assert_raises(TypeError, int, a) # Numpy has no tp_traverse currently, so circular references # cannot be detected. So resolve it: a[()] = 0 # This was causing a to become like the above a = np.array(0, dtype=object) a[...] += 1 assert_equal(a, 1) def test_object_array_self_copy(self): # An object array being copied into itself DECREF'ed before INCREF'ing # causing segmentation faults (gh-3787) a = np.array(object(), dtype=object) np.copyto(a, a) assert_equal(sys.getrefcount(a[()]), 2) a[()].__class__ # will segfault if object was deleted def test_zerosize_accumulate(self): "Ticket #1733" x = np.array([[42, 0]], dtype=np.uint32) assert_equal(np.add.accumulate(x[:-1, 0]), []) def test_objectarray_setfield(self): # Setfield directly manipulates the raw array data, # so is invalid for object arrays. x = np.array([1, 2, 3], dtype=object) assert_raises(RuntimeError, x.setfield, 4, np.int32, 0) def test_setting_rank0_string(self): "Ticket #1736" s1 = asbytes("hello1") s2 = asbytes("hello2") a = np.zeros((), dtype="S10") a[()] = s1 assert_equal(a, np.array(s1)) a[()] = np.array(s2) assert_equal(a, np.array(s2)) a = np.zeros((), dtype='f4') a[()] = 3 assert_equal(a, np.array(3)) a[()] = np.array(4) assert_equal(a, np.array(4)) def test_string_astype(self): "Ticket #1748" s1 = asbytes('black') s2 = asbytes('white') s3 = asbytes('other') a = np.array([[s1], [s2], [s3]]) assert_equal(a.dtype, np.dtype('S5')) b = a.astype(np.dtype('S0')) assert_equal(b.dtype, np.dtype('S5')) def test_ticket_1756(self): """Ticket #1756 """ s = asbytes('0123456789abcdef') a = np.array([s]*5) for i in range(1, 17): a1 = np.array(a, "|S%d"%i) a2 = np.array([s[:i]]*5) assert_equal(a1, a2) def test_fields_strides(self): "Ticket #1760" r=np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2') assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides) def test_alignment_update(self): """Check that alignment flag is updated on stride setting""" a = np.arange(10) assert_(a.flags.aligned) a.strides = 3 assert_(not a.flags.aligned) def test_ticket_1770(self): "Should not segfault on python 3k" import numpy as np try: a = np.zeros((1,), dtype=[('f1', 'f')]) a['f1'] = 1 a['f2'] = 1 except ValueError: pass except: raise AssertionError def test_ticket_1608(self): "x.flat shouldn't modify data" x = np.array([[1, 2], [3, 4]]).T y = np.array(x.flat) assert_equal(x, [[1, 3], [2, 4]]) def test_pickle_string_overwrite(self): import re data = np.array([1], dtype='b') blob = pickle.dumps(data, protocol=1) data = pickle.loads(blob) # Check that loads does not clobber interned strings s = re.sub("a(.)", "\x01\\1", "a_") assert_equal(s[0], "\x01") data[0] = 0xbb s = re.sub("a(.)", "\x01\\1", "a_") assert_equal(s[0], "\x01") def test_pickle_bytes_overwrite(self): if sys.version_info[0] >= 3: data = np.array([1], dtype='b') data = pickle.loads(pickle.dumps(data)) data[0] = 0xdd bytestring = "\x01 ".encode('ascii') assert_equal(bytestring[0:1], '\x01'.encode('ascii')) def test_pickle_py2_array_latin1_hack(self): # Check that unpickling hacks in Py3 that support # encoding='latin1' work correctly. # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) data = asbytes("cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n" "tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n" "I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n" "p13\ntp14\nb.") if sys.version_info[0] >= 3: # This should work: result = pickle.loads(data, encoding='latin1') assert_array_equal(result, np.array([129], dtype='b')) # Should not segfault: assert_raises(Exception, pickle.loads, data, encoding='koi8-r') def test_pickle_py2_scalar_latin1_hack(self): # Check that scalar unpickling hack in Py3 that supports # encoding='latin1' work correctly. # Python2 output for pickle.dumps(...) datas = [ # (original, python2_pickle, koi8r_validity) (np.unicode_('\u6bd2'), asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n" "(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n" "tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."), 'invalid'), (np.float64(9e123), asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n" "p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n" "bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."), 'invalid'), (np.bytes_(asbytes('\x9c')), # different 8-bit code point in KOI8-R vs latin1 asbytes("cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n" "I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n" "tp8\nRp9\n."), 'different'), ] if sys.version_info[0] >= 3: for original, data, koi8r_validity in datas: result = pickle.loads(data, encoding='latin1') assert_equal(result, original) # Decoding under non-latin1 encoding (e.g.) KOI8-R can # produce bad results, but should not segfault. if koi8r_validity == 'different': # Unicode code points happen to lie within latin1, # but are different in koi8-r, resulting to silent # bogus results result = pickle.loads(data, encoding='koi8-r') assert_(result != original) elif koi8r_validity == 'invalid': # Unicode code points outside latin1, so results # to an encoding exception assert_raises(ValueError, pickle.loads, data, encoding='koi8-r') else: raise ValueError(koi8r_validity) def test_structured_type_to_object(self): a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') a_obj = np.empty((2,), dtype=object) a_obj[0] = (0, 1) a_obj[1] = (3, 2) # astype records -> object assert_equal(a_rec.astype(object), a_obj) # '=' records -> object b = np.empty_like(a_obj) b[...] = a_rec assert_equal(b, a_obj) # '=' object -> records b = np.empty_like(a_rec) b[...] = a_obj assert_equal(b, a_rec) def test_assign_obj_listoflists(self): # Ticket # 1870 # The inner list should get assigned to the object elements a = np.zeros(4, dtype=object) b = a.copy() a[0] = [1] a[1] = [2] a[2] = [3] a[3] = [4] b[...] = [[1], [2], [3], [4]] assert_equal(a, b) # The first dimension should get broadcast a = np.zeros((2, 2), dtype=object) a[...] = [[1, 2]] assert_equal(a, [[1, 2], [1, 2]]) @dec.skipif('__pypy__' in sys.builtin_module_names) def test_memoryleak(self): # Ticket #1917 - ensure that array data doesn't leak for i in range(1000): # 100MB times 1000 would give 100GB of memory usage if it leaks a = np.empty((100000000,), dtype='i1') del a @dec.skipif(not hasattr(sys, 'getrefcount')) def test_ufunc_reduce_memoryleak(self): a = np.arange(6) acnt = sys.getrefcount(a) res = np.add.reduce(a) assert_equal(sys.getrefcount(a), acnt) def test_search_sorted_invalid_arguments(self): # Ticket #2021, should not segfault. x = np.arange(0, 4, dtype='datetime64[D]') assert_raises(TypeError, x.searchsorted, 1) def test_string_truncation(self): # Ticket #1990 - Data can be truncated in creation of an array from a # mixed sequence of numeric values and strings for val in [True, 1234, 123.4, complex(1, 234)]: for tostr in [asunicode, asbytes]: b = np.array([val, tostr('xx')]) assert_equal(tostr(b[0]), tostr(val)) b = np.array([tostr('xx'), val]) assert_equal(tostr(b[1]), tostr(val)) # test also with longer strings b = np.array([val, tostr('xxxxxxxxxx')]) assert_equal(tostr(b[0]), tostr(val)) b = np.array([tostr('xxxxxxxxxx'), val]) assert_equal(tostr(b[1]), tostr(val)) def test_string_truncation_ucs2(self): # Ticket #2081. Python compiled with two byte unicode # can lead to truncation if itemsize is not properly # adjusted for Numpy's four byte unicode. if sys.version_info[0] >= 3: a = np.array(['abcd']) else: a = np.array([sixu('abcd')]) assert_equal(a.dtype.itemsize, 16) def test_unique_stable(self): # Ticket #2063 must always choose stable sort for argsort to # get consistent results v = np.array(([0]*5 + [1]*6 + [2]*6)*4) res = np.unique(v, return_index=True) tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11])) assert_equal(res, tgt) def test_unicode_alloc_dealloc_match(self): # Ticket #1578, the mismatch only showed up when running # python-debug for python versions >= 2.7, and then as # a core dump and error message. a = np.array(['abc'], dtype=np.unicode)[0] del a def test_refcount_error_in_clip(self): # Ticket #1588 a = np.zeros((2,), dtype='>i2').clip(min=0) x = a + a # This used to segfault: y = str(x) # Check the final string: assert_(y == "[0 0]") def test_searchsorted_wrong_dtype(self): # Ticket #2189, it used to segfault, so we check that it raises the # proper exception. a = np.array([('a', 1)], dtype='S1, int') assert_raises(TypeError, np.searchsorted, a, 1.2) # Ticket #2066, similar problem: dtype = np.format_parser(['i4', 'i4'], [], []) a = np.recarray((2, ), dtype) assert_raises(TypeError, np.searchsorted, a, 1) def test_complex64_alignment(self): # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment dtt = np.complex64 arr = np.arange(10, dtype=dtt) # 2D array arr2 = np.reshape(arr, (2, 5)) # Fortran write followed by (C or F) read caused bus error data_str = arr2.tobytes('F') data_back = np.ndarray(arr2.shape, arr2.dtype, buffer=data_str, order='F') assert_array_equal(arr2, data_back) def test_structured_count_nonzero(self): arr = np.array([0, 1]).astype('i4, (2)i4')[:1] count = np.count_nonzero(arr) assert_equal(count, 0) def test_copymodule_preserves_f_contiguity(self): a = np.empty((2, 2), order='F') b = copy.copy(a) c = copy.deepcopy(a) assert_(b.flags.fortran) assert_(b.flags.f_contiguous) assert_(c.flags.fortran) assert_(c.flags.f_contiguous) def test_fortran_order_buffer(self): import numpy as np a = np.array([['Hello', 'Foob']], dtype='U5', order='F') arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a) arr2 = np.array([[[sixu('H'), sixu('e'), sixu('l'), sixu('l'), sixu('o')], [sixu('F'), sixu('o'), sixu('o'), sixu('b'), sixu('')]]]) assert_array_equal(arr, arr2) def test_assign_from_sequence_error(self): # Ticket #4024. arr = np.array([1, 2, 3]) assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9]) arr.__setitem__(slice(None), [9]) assert_equal(arr, [9, 9, 9]) def test_format_on_flex_array_element(self): # Ticket #4369. dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')]) arr = np.array([('2000-01-01', 1)], dt) formatted = '{0}'.format(arr[0]) assert_equal(formatted, str(arr[0])) def test_deepcopy_on_0d_array(self): # Ticket #3311. arr = np.array(3) arr_cp = copy.deepcopy(arr) assert_equal(arr, arr_cp) assert_equal(arr.shape, arr_cp.shape) assert_equal(int(arr), int(arr_cp)) self.assertTrue(arr is not arr_cp) self.assertTrue(isinstance(arr_cp, type(arr))) def test_bool_subscript_crash(self): # gh-4494 c = np.rec.array([(1, 2, 3), (4, 5, 6)]) masked = c[np.array([True, False])] base = masked.base del masked, c base.dtype def test_richcompare_crash(self): # gh-4613 import operator as op # dummy class where __array__ throws exception class Foo(object): __array_priority__ = 1002 def __array__(self,*args,**kwargs): raise Exception() rhs = Foo() lhs = np.array(1) for f in [op.lt, op.le, op.gt, op.ge]: if sys.version_info[0] >= 3: assert_raises(TypeError, f, lhs, rhs) else: f(lhs, rhs) assert_(not op.eq(lhs, rhs)) assert_(op.ne(lhs, rhs)) def test_richcompare_scalar_and_subclass(self): # gh-4709 class Foo(np.ndarray): def __eq__(self, other): return "OK" x = np.array([1,2,3]).view(Foo) assert_equal(10 == x, "OK") assert_equal(np.int32(10) == x, "OK") assert_equal(np.array([10]) == x, "OK") if __name__ == "__main__": run_module_suite()
codeparrot/github-code-clean
"""Metrics to assess performance on classification task given classe prediction Functions named as ``*_score`` return a scalar value to maximize: the higher the better Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better """ # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Arnaud Joly <a.joly@ulg.ac.be> # Jochen Wersdorfer <jochen@wersdoerfer.de> # Lars Buitinck <L.J.Buitinck@uva.nl> # Joel Nothman <joel.nothman@gmail.com> # Noel Dawe <noel@dawe.me> # Jatin Shah <jatindshah@gmail.com> # Saurabh Jha <saurabh.jhaa@gmail.com> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy.sparse import coo_matrix from scipy.sparse import csr_matrix from scipy.spatial.distance import hamming as sp_hamming from ..preprocessing import LabelBinarizer, label_binarize from ..preprocessing import LabelEncoder from ..utils import check_array from ..utils import check_consistent_length from ..utils import column_or_1d from ..utils.multiclass import unique_labels from ..utils.multiclass import type_of_target from ..utils.validation import _num_samples from ..utils.sparsefuncs import count_nonzero from ..utils.fixes import bincount from ..exceptions import UndefinedMetricWarning def _check_targets(y_true, y_pred): """Check that y_true and y_pred belong to the same classification task This converts multiclass or binary types to a common shape, and raises a ValueError for a mix of multilabel and multiclass targets, a mix of multilabel formats, for the presence of continuous-valued or multioutput targets, or for targets of different lengths. Column vectors are squeezed to 1d, while multilabel formats are returned as CSR sparse label indicators. Parameters ---------- y_true : array-like y_pred : array-like Returns ------- type_true : one of {'multilabel-indicator', 'multiclass', 'binary'} The type of the true target data, as output by ``utils.multiclass.type_of_target`` y_true : array or indicator matrix y_pred : array or indicator matrix """ check_consistent_length(y_true, y_pred) type_true = type_of_target(y_true) type_pred = type_of_target(y_pred) y_type = set([type_true, type_pred]) if y_type == set(["binary", "multiclass"]): y_type = set(["multiclass"]) if len(y_type) > 1: raise ValueError("Can't handle mix of {0} and {1}" "".format(type_true, type_pred)) # We can't have more than one value on y_type => The set is no more needed y_type = y_type.pop() # No metrics support "multiclass-multioutput" format if (y_type not in ["binary", "multiclass", "multilabel-indicator"]): raise ValueError("{0} is not supported".format(y_type)) if y_type in ["binary", "multiclass"]: y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) if y_type.startswith('multilabel'): y_true = csr_matrix(y_true) y_pred = csr_matrix(y_pred) y_type = 'multilabel-indicator' return y_type, y_true, y_pred def _weighted_sum(sample_score, sample_weight, normalize=False): if normalize: return np.average(sample_score, weights=sample_weight) elif sample_weight is not None: return np.dot(sample_score, sample_weight) else: return sample_score.sum() def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None): """Accuracy classification score. In multilabel classification, this function computes subset accuracy: the set of labels predicted for a sample must *exactly* match the corresponding set of labels in y_true. Read more in the :ref:`User Guide <accuracy_score>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the number of correctly classified samples. Otherwise, return the fraction of correctly classified samples. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- score : float If ``normalize == True``, return the correctly classified samples (float), else it returns the number of correctly classified samples (int). The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- jaccard_similarity_score, hamming_loss, zero_one_loss Notes ----- In binary and multiclass classification, this function is equal to the ``jaccard_similarity_score`` function. Examples -------- >>> import numpy as np >>> from sklearn.metrics import accuracy_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> accuracy_score(y_true, y_pred) 0.5 >>> accuracy_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ # Compute accuracy for each possible representation y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type.startswith('multilabel'): differing_labels = count_nonzero(y_true - y_pred, axis=1) score = differing_labels == 0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize) def confusion_matrix(y_true, y_pred, labels=None): """Compute confusion matrix to evaluate the accuracy of a classification By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}` is equal to the number of observations known to be in group :math:`i` but predicted to be in group :math:`j`. Read more in the :ref:`User Guide <confusion_matrix>`. Parameters ---------- y_true : array, shape = [n_samples] Ground truth (correct) target values. y_pred : array, shape = [n_samples] Estimated targets as returned by a classifier. labels : array, shape = [n_classes], optional List of labels to index the matrix. This may be used to reorder or select a subset of labels. If none is given, those that appear at least once in ``y_true`` or ``y_pred`` are used in sorted order. Returns ------- C : array, shape = [n_classes, n_classes] Confusion matrix References ---------- .. [1] `Wikipedia entry for the Confusion matrix <http://en.wikipedia.org/wiki/Confusion_matrix>`_ Examples -------- >>> from sklearn.metrics import confusion_matrix >>> y_true = [2, 0, 2, 2, 0, 1] >>> y_pred = [0, 0, 2, 2, 0, 2] >>> confusion_matrix(y_true, y_pred) array([[2, 0, 0], [0, 0, 1], [1, 0, 2]]) >>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"] >>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"] >>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"]) array([[2, 0, 0], [0, 0, 1], [1, 0, 2]]) """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type not in ("binary", "multiclass"): raise ValueError("%s is not supported" % y_type) if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) n_labels = labels.size label_to_ind = dict((y, x) for x, y in enumerate(labels)) # convert yt, yp into index y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred]) y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true]) # intersect y_pred, y_true with labels, eliminate items not in labels ind = np.logical_and(y_pred < n_labels, y_true < n_labels) y_pred = y_pred[ind] y_true = y_true[ind] CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)), shape=(n_labels, n_labels) ).toarray() return CM def cohen_kappa_score(y1, y2, labels=None): """Cohen's kappa: a statistic that measures inter-annotator agreement. This function computes Cohen's kappa [1], a score that expresses the level of agreement between two annotators on a classification problem. It is defined as .. math:: \kappa = (p_o - p_e) / (1 - p_e) where :math:`p_o` is the empirical probability of agreement on the label assigned to any sample (the observed agreement ratio), and :math:`p_e` is the expected agreement when both annotators assign labels randomly. :math:`p_e` is estimated using a per-annotator empirical prior over the class labels [2]. Parameters ---------- y1 : array, shape = [n_samples] Labels assigned by the first annotator. y2 : array, shape = [n_samples] Labels assigned by the second annotator. The kappa statistic is symmetric, so swapping ``y1`` and ``y2`` doesn't change the value. labels : array, shape = [n_classes], optional List of labels to index the matrix. This may be used to select a subset of labels. If None, all labels that appear at least once in ``y1`` or ``y2`` are used. Returns ------- kappa : float The kappa statistic, which is a number between -1 and 1. The maximum value means complete agreement; zero or lower means chance agreement. References ---------- .. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales". Educational and Psychological Measurement 20(1):37-46. doi:10.1177/001316446002000104. .. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for computational linguistics". Computational Linguistic 34(4):555-596. """ confusion = confusion_matrix(y1, y2, labels=labels) P = confusion / float(confusion.sum()) p_observed = np.trace(P) p_expected = np.dot(P.sum(axis=0), P.sum(axis=1)) return (p_observed - p_expected) / (1 - p_expected) def jaccard_similarity_score(y_true, y_pred, normalize=True, sample_weight=None): """Jaccard similarity coefficient score The Jaccard index [1], or Jaccard similarity coefficient, defined as the size of the intersection divided by the size of the union of two label sets, is used to compare set of predicted labels for a sample to the corresponding set of labels in ``y_true``. Read more in the :ref:`User Guide <jaccard_similarity_score>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the sum of the Jaccard similarity coefficient over the sample set. Otherwise, return the average of Jaccard similarity coefficient. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- score : float If ``normalize == True``, return the average Jaccard similarity coefficient, else it returns the sum of the Jaccard similarity coefficient over the sample set. The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- accuracy_score, hamming_loss, zero_one_loss Notes ----- In binary and multiclass classification, this function is equivalent to the ``accuracy_score``. It differs in the multilabel classification problem. References ---------- .. [1] `Wikipedia entry for the Jaccard index <http://en.wikipedia.org/wiki/Jaccard_index>`_ Examples -------- >>> import numpy as np >>> from sklearn.metrics import jaccard_similarity_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> jaccard_similarity_score(y_true, y_pred) 0.5 >>> jaccard_similarity_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\ np.ones((2, 2))) 0.75 """ # Compute accuracy for each possible representation y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type.startswith('multilabel'): with np.errstate(divide='ignore', invalid='ignore'): # oddly, we may get an "invalid" rather than a "divide" error here pred_or_true = count_nonzero(y_true + y_pred, axis=1) pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1) score = pred_and_true / pred_or_true # If there is no label, it results in a Nan instead, we set # the jaccard to 1: lim_{x->0} x/x = 1 # Note with py2.6 and np 1.3: we can't check safely for nan. score[pred_or_true == 0.0] = 1.0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize) def matthews_corrcoef(y_true, y_pred): """Compute the Matthews correlation coefficient (MCC) for binary classes The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary (two-class) classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] Only in the binary case does this relate to information about true and false positives and negatives. See references below. Read more in the :ref:`User Guide <matthews_corrcoef>`. Parameters ---------- y_true : array, shape = [n_samples] Ground truth (correct) target values. y_pred : array, shape = [n_samples] Estimated targets as returned by a classifier. Returns ------- mcc : float The Matthews correlation coefficient (+1 represents a perfect prediction, 0 an average random prediction and -1 and inverse prediction). References ---------- .. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the accuracy of prediction algorithms for classification: an overview <http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_ .. [2] `Wikipedia entry for the Matthews Correlation Coefficient <http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_ Examples -------- >>> from sklearn.metrics import matthews_corrcoef >>> y_true = [+1, +1, +1, -1] >>> y_pred = [+1, -1, +1, +1] >>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS -0.33... """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type != "binary": raise ValueError("%s is not supported" % y_type) lb = LabelEncoder() lb.fit(np.hstack([y_true, y_pred])) y_true = lb.transform(y_true) y_pred = lb.transform(y_pred) with np.errstate(invalid='ignore'): mcc = np.corrcoef(y_true, y_pred)[0, 1] if np.isnan(mcc): return 0. else: return mcc def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None): """Zero-one classification loss. If normalize is ``True``, return the fraction of misclassifications (float), else it returns the number of misclassifications (int). The best performance is 0. Read more in the :ref:`User Guide <zero_one_loss>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the number of misclassifications. Otherwise, return the fraction of misclassifications. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float or int, If ``normalize == True``, return the fraction of misclassifications (float), else it returns the number of misclassifications (int). Notes ----- In multilabel classification, the zero_one_loss function corresponds to the subset zero-one loss: for each sample, the entire set of labels must be correctly predicted, otherwise the loss for that sample is equal to one. See also -------- accuracy_score, hamming_loss, jaccard_similarity_score Examples -------- >>> from sklearn.metrics import zero_one_loss >>> y_pred = [1, 2, 3, 4] >>> y_true = [2, 2, 3, 4] >>> zero_one_loss(y_true, y_pred) 0.25 >>> zero_one_loss(y_true, y_pred, normalize=False) 1 In the multilabel case with binary label indicators: >>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ score = accuracy_score(y_true, y_pred, normalize=normalize, sample_weight=sample_weight) if normalize: return 1 - score else: if sample_weight is not None: n_samples = np.sum(sample_weight) else: n_samples = _num_samples(y_true) return n_samples - score def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the F1 score, also known as balanced F-score or F-measure The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0. The relative contribution of precision and recall to the F1 score are equal. The formula for the F1 score is:: F1 = 2 * (precision * recall) / (precision + recall) In the multi-class and multi-label case, this is the weighted average of the F1 score of each class. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 parameter *labels* improved for multiclass problem. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- f1_score : float or array of float, shape = [n_unique_labels] F1 score of the positive class in binary classification or weighted average of the F1 scores of each class for the multiclass task. References ---------- .. [1] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_ Examples -------- >>> from sklearn.metrics import f1_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.26... >>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS 0.26... >>> f1_score(y_true, y_pred, average=None) array([ 0.8, 0. , 0. ]) """ return fbeta_score(y_true, y_pred, 1, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the F-beta score The F-beta score is the weighted harmonic mean of precision and recall, reaching its optimal value at 1 and its worst value at 0. The `beta` parameter determines the weight of precision in the combined score. ``beta < 1`` lends more weight to precision, while ``beta > 1`` favors recall (``beta -> 0`` considers only precision, ``beta -> inf`` only recall). Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta: float Weight of precision in harmonic mean. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 parameter *labels* improved for multiclass problem. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- fbeta_score : float (if average is not None) or array of float, shape =\ [n_unique_labels] F-beta score of the positive class in binary classification or weighted average of the F-beta score of each class for the multiclass task. References ---------- .. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval. Addison Wesley, pp. 327-328. .. [2] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_ Examples -------- >>> from sklearn.metrics import fbeta_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> fbeta_score(y_true, y_pred, average='macro', beta=0.5) ... # doctest: +ELLIPSIS 0.23... >>> fbeta_score(y_true, y_pred, average='micro', beta=0.5) ... # doctest: +ELLIPSIS 0.33... >>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5) ... # doctest: +ELLIPSIS 0.23... >>> fbeta_score(y_true, y_pred, average=None, beta=0.5) ... # doctest: +ELLIPSIS array([ 0.71..., 0. , 0. ]) """ _, _, f, _ = precision_recall_fscore_support(y_true, y_pred, beta=beta, labels=labels, pos_label=pos_label, average=average, warn_for=('f-score',), sample_weight=sample_weight) return f def _prf_divide(numerator, denominator, metric, modifier, average, warn_for): """Performs division and handles divide-by-zero. On zero-division, sets the corresponding result elements to zero and raises a warning. The metric, modifier and average arguments are used only for determining an appropriate warning. """ result = numerator / denominator mask = denominator == 0.0 if not np.any(mask): return result # remove infs result[mask] = 0.0 # build appropriate warning # E.g. "Precision and F-score are ill-defined and being set to 0.0 in # labels with no predicted samples" axis0 = 'sample' axis1 = 'label' if average == 'samples': axis0, axis1 = axis1, axis0 if metric in warn_for and 'f-score' in warn_for: msg_start = '{0} and F-score are'.format(metric.title()) elif metric in warn_for: msg_start = '{0} is'.format(metric.title()) elif 'f-score' in warn_for: msg_start = 'F-score is' else: return result msg = ('{0} ill-defined and being set to 0.0 {{0}} ' 'no {1} {2}s.'.format(msg_start, modifier, axis0)) if len(mask) == 1: msg = msg.format('due to') else: msg = msg.format('in {0}s with'.format(axis1)) warnings.warn(msg, UndefinedMetricWarning, stacklevel=2) return result def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None, pos_label=1, average=None, warn_for=('precision', 'recall', 'f-score'), sample_weight=None): """Compute precision, recall, F-measure and support for each class The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The F-beta score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0. The F-beta score weights recall more than precision by a factor of ``beta``. ``beta == 1.0`` means recall and precision are equally important. The support is the number of occurrences of each class in ``y_true``. If ``pos_label is None`` and in binary classification, this function returns the average precision, recall and F-measure if ``average`` is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta : float, 1.0 by default The strength of recall versus precision in the F-score. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \ 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. warn_for : tuple or set, for internal use This determines which warnings will be made in the case that this function is being used to return only one of its metrics. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision: float (if average is not None) or array of float, shape =\ [n_unique_labels] recall: float (if average is not None) or array of float, , shape =\ [n_unique_labels] fbeta_score: float (if average is not None) or array of float, shape =\ [n_unique_labels] support: int (if average is not None) or array of int, shape =\ [n_unique_labels] The number of occurrences of each label in ``y_true``. References ---------- .. [1] `Wikipedia entry for the Precision and recall <http://en.wikipedia.org/wiki/Precision_and_recall>`_ .. [2] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_ .. [3] `Discriminative Methods for Multi-labeled Classification Advances in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu Godbole, Sunita Sarawagi <http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>` Examples -------- >>> from sklearn.metrics import precision_recall_fscore_support >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig']) >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog']) >>> precision_recall_fscore_support(y_true, y_pred, average='macro') ... # doctest: +ELLIPSIS (0.22..., 0.33..., 0.26..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='micro') ... # doctest: +ELLIPSIS (0.33..., 0.33..., 0.33..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='weighted') ... # doctest: +ELLIPSIS (0.22..., 0.33..., 0.26..., None) It is possible to compute per-label precisions, recalls, F1-scores and supports instead of averaging: >>> precision_recall_fscore_support(y_true, y_pred, average=None, ... labels=['pig', 'dog', 'cat']) ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE (array([ 0. , 0. , 0.66...]), array([ 0., 0., 1.]), array([ 0. , 0. , 0.8]), array([2, 2, 2])) """ average_options = (None, 'micro', 'macro', 'weighted', 'samples') if average not in average_options and average != 'binary': raise ValueError('average has to be one of ' + str(average_options)) if beta <= 0: raise ValueError("beta should be >0 in the F-beta score") y_type, y_true, y_pred = _check_targets(y_true, y_pred) present_labels = unique_labels(y_true, y_pred) if average == 'binary' and (y_type != 'binary' or pos_label is None): warnings.warn('The default `weighted` averaging is deprecated, ' 'and from version 0.18, use of precision, recall or ' 'F-score with multiclass or multilabel data or ' 'pos_label=None will result in an exception. ' 'Please set an explicit value for `average`, one of ' '%s. In cross validation use, for instance, ' 'scoring="f1_weighted" instead of scoring="f1".' % str(average_options), DeprecationWarning, stacklevel=2) average = 'weighted' if y_type == 'binary' and pos_label is not None and average is not None: if average != 'binary': warnings.warn('From version 0.18, binary input will not be ' 'handled specially when using averaged ' 'precision/recall/F-score. ' 'Please use average=\'binary\' to report only the ' 'positive class performance.', DeprecationWarning) if labels is None or len(labels) <= 2: if pos_label not in present_labels: if len(present_labels) < 2: # Only negative labels return (0., 0., 0., 0) else: raise ValueError("pos_label=%r is not a valid label: %r" % (pos_label, present_labels)) labels = [pos_label] if labels is None: labels = present_labels n_labels = None else: n_labels = len(labels) labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)]) # Calculate tp_sum, pred_sum, true_sum ### if y_type.startswith('multilabel'): sum_axis = 1 if average == 'samples' else 0 # All labels are index integers for multilabel. # Select labels: if not np.all(labels == present_labels): if np.max(labels) > np.max(present_labels): raise ValueError('All labels must be in [0, n labels). ' 'Got %d > %d' % (np.max(labels), np.max(present_labels))) if np.min(labels) < 0: raise ValueError('All labels must be in [0, n labels). ' 'Got %d < 0' % np.min(labels)) y_true = y_true[:, labels[:n_labels]] y_pred = y_pred[:, labels[:n_labels]] # calculate weighted counts true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) elif average == 'samples': raise ValueError("Sample-based precision, recall, fscore is " "not meaningful outside multilabel " "classification. See the accuracy_score instead.") else: le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) y_pred = le.transform(y_pred) sorted_labels = le.classes_ # labels are now from 0 to len(labels) - 1 -> use bincount tp = y_true == y_pred tp_bins = y_true[tp] if sample_weight is not None: tp_bins_weights = np.asarray(sample_weight)[tp] else: tp_bins_weights = None if len(tp_bins): tp_sum = bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels)) else: # Pathological case true_sum = pred_sum = tp_sum = np.zeros(len(labels)) if len(y_pred): pred_sum = bincount(y_pred, weights=sample_weight, minlength=len(labels)) if len(y_true): true_sum = bincount(y_true, weights=sample_weight, minlength=len(labels)) # Retain only selected labels indices = np.searchsorted(sorted_labels, labels[:n_labels]) tp_sum = tp_sum[indices] true_sum = true_sum[indices] pred_sum = pred_sum[indices] if average == 'micro': tp_sum = np.array([tp_sum.sum()]) pred_sum = np.array([pred_sum.sum()]) true_sum = np.array([true_sum.sum()]) # Finally, we have all our sufficient statistics. Divide! # beta2 = beta ** 2 with np.errstate(divide='ignore', invalid='ignore'): # Divide, and on zero-division, set scores to 0 and warn: # Oddly, we may get an "invalid" rather than a "divide" error # here. precision = _prf_divide(tp_sum, pred_sum, 'precision', 'predicted', average, warn_for) recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', average, warn_for) # Don't need to warn for F: either P or R warned, or tp == 0 where pos # and true are nonzero, in which case, F is well-defined and zero f_score = ((1 + beta2) * precision * recall / (beta2 * precision + recall)) f_score[tp_sum == 0] = 0.0 # Average the results if average == 'weighted': weights = true_sum if weights.sum() == 0: return 0, 0, 0, None elif average == 'samples': weights = sample_weight else: weights = None if average is not None: assert average != 'binary' or len(precision) == 1 precision = np.average(precision, weights=weights) recall = np.average(recall, weights=weights) f_score = np.average(f_score, weights=weights) true_sum = None # return no support return precision, recall, f_score, true_sum def precision_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the precision The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The best value is 1 and the worst value is 0. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 parameter *labels* improved for multiclass problem. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision : float (if average is not None) or array of float, shape =\ [n_unique_labels] Precision of the positive class in binary classification or weighted average of the precision of each class for the multiclass task. Examples -------- >>> from sklearn.metrics import precision_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.22... >>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> precision_score(y_true, y_pred, average='weighted') ... # doctest: +ELLIPSIS 0.22... >>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS array([ 0.66..., 0. , 0. ]) """ p, _, _, _ = precision_recall_fscore_support(y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=('precision',), sample_weight=sample_weight) return p def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the recall The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The best value is 1 and the worst value is 0. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 parameter *labels* improved for multiclass problem. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- recall : float (if average is not None) or array of float, shape =\ [n_unique_labels] Recall of the positive class in binary classification or weighted average of the recall of each class for the multiclass task. Examples -------- >>> from sklearn.metrics import recall_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average=None) array([ 1., 0., 0.]) """ _, r, _, _ = precision_recall_fscore_support(y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=('recall',), sample_weight=sample_weight) return r def classification_report(y_true, y_pred, labels=None, target_names=None, sample_weight=None, digits=2): """Build a text report showing the main classification metrics Read more in the :ref:`User Guide <classification_report>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array, shape = [n_labels] Optional list of label indices to include in the report. target_names : list of strings Optional display names matching the labels (same order). sample_weight : array-like of shape = [n_samples], optional Sample weights. digits : int Number of digits for formatting output floating point values Returns ------- report : string Text summary of the precision, recall, F1 score for each class. Examples -------- >>> from sklearn.metrics import classification_report >>> y_true = [0, 1, 2, 2, 2] >>> y_pred = [0, 0, 2, 2, 1] >>> target_names = ['class 0', 'class 1', 'class 2'] >>> print(classification_report(y_true, y_pred, target_names=target_names)) precision recall f1-score support <BLANKLINE> class 0 0.50 1.00 0.67 1 class 1 0.00 0.00 0.00 1 class 2 1.00 0.67 0.80 3 <BLANKLINE> avg / total 0.70 0.60 0.61 5 <BLANKLINE> """ if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) last_line_heading = 'avg / total' if target_names is None: width = len(last_line_heading) target_names = ['%s' % l for l in labels] else: width = max(len(cn) for cn in target_names) width = max(width, len(last_line_heading), digits) headers = ["precision", "recall", "f1-score", "support"] fmt = '%% %ds' % width # first column: class name fmt += ' ' fmt += ' '.join(['% 9s' for _ in headers]) fmt += '\n' headers = [""] + headers report = fmt % tuple(headers) report += '\n' p, r, f1, s = precision_recall_fscore_support(y_true, y_pred, labels=labels, average=None, sample_weight=sample_weight) for i, label in enumerate(labels): values = [target_names[i]] for v in (p[i], r[i], f1[i]): values += ["{0:0.{1}f}".format(v, digits)] values += ["{0}".format(s[i])] report += fmt % tuple(values) report += '\n' # compute averages values = [last_line_heading] for v in (np.average(p, weights=s), np.average(r, weights=s), np.average(f1, weights=s)): values += ["{0:0.{1}f}".format(v, digits)] values += ['{0}'.format(np.sum(s))] report += fmt % tuple(values) return report def hamming_loss(y_true, y_pred, classes=None, sample_weight=None): """Compute the average Hamming loss. The Hamming loss is the fraction of labels that are incorrectly predicted. Read more in the :ref:`User Guide <hamming_loss>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. classes : array, shape = [n_labels], optional Integer array of labels. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float or int, Return the average Hamming loss between element of ``y_true`` and ``y_pred``. See Also -------- accuracy_score, jaccard_similarity_score, zero_one_loss Notes ----- In multiclass classification, the Hamming loss correspond to the Hamming distance between ``y_true`` and ``y_pred`` which is equivalent to the subset ``zero_one_loss`` function. In multilabel classification, the Hamming loss is different from the subset zero-one loss. The zero-one loss considers the entire set of labels for a given sample incorrect if it does entirely match the true set of labels. Hamming loss is more forgiving in that it penalizes the individual labels. The Hamming loss is upperbounded by the subset zero-one loss. When normalized over samples, the Hamming loss is always between 0 and 1. References ---------- .. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification: An Overview. International Journal of Data Warehousing & Mining, 3(3), 1-13, July-September 2007. .. [2] `Wikipedia entry on the Hamming distance <http://en.wikipedia.org/wiki/Hamming_distance>`_ Examples -------- >>> from sklearn.metrics import hamming_loss >>> y_pred = [1, 2, 3, 4] >>> y_true = [2, 2, 3, 4] >>> hamming_loss(y_true, y_pred) 0.25 In the multilabel case with binary label indicators: >>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2))) 0.75 """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if classes is None: classes = unique_labels(y_true, y_pred) else: classes = np.asarray(classes) if sample_weight is None: weight_average = 1. else: weight_average = np.mean(sample_weight) if y_type.startswith('multilabel'): n_differences = count_nonzero(y_true - y_pred, sample_weight=sample_weight) return (n_differences / (y_true.shape[0] * len(classes) * weight_average)) elif y_type in ["binary", "multiclass"]: return _weighted_sum(y_true != y_pred, sample_weight, normalize=True) else: raise ValueError("{0} is not supported".format(y_type)) def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None): """Log loss, aka logistic loss or cross-entropy loss. This is the loss function used in (multinomial) logistic regression and extensions of it such as neural networks, defined as the negative log-likelihood of the true labels given a probabilistic classifier's predictions. For a single sample with true label yt in {0,1} and estimated probability yp that yt = 1, the log loss is -log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp)) Read more in the :ref:`User Guide <log_loss>`. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels for n_samples samples. y_pred : array-like of float, shape = (n_samples, n_classes) Predicted probabilities, as returned by a classifier's predict_proba method. eps : float Log loss is undefined for p=0 or p=1, so probabilities are clipped to max(eps, min(1 - eps, p)). normalize : bool, optional (default=True) If true, return the mean loss per sample. Otherwise, return the sum of the per-sample losses. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float Examples -------- >>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS ... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]]) 0.21616... References ---------- C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer, p. 209. Notes ----- The logarithm used is the natural logarithm (base-e). """ lb = LabelBinarizer() T = lb.fit_transform(y_true) if T.shape[1] == 1: T = np.append(1 - T, T, axis=1) y_pred = check_array(y_pred, ensure_2d=False) # Clipping Y = np.clip(y_pred, eps, 1 - eps) # This happens in cases when elements in y_pred have type "str". if not isinstance(Y, np.ndarray): raise ValueError("y_pred should be an array of floats.") # If y_pred is of single dimension, assume y_true to be binary # and then check. if Y.ndim == 1: Y = Y[:, np.newaxis] if Y.shape[1] == 1: Y = np.append(1 - Y, Y, axis=1) # Check if dimensions are consistent. check_consistent_length(T, Y) T = check_array(T) Y = check_array(Y) if T.shape[1] != Y.shape[1]: raise ValueError("y_true and y_pred have different number of classes " "%d, %d" % (T.shape[1], Y.shape[1])) # Renormalize Y /= Y.sum(axis=1)[:, np.newaxis] loss = -(T * np.log(Y)).sum(axis=1) return _weighted_sum(loss, sample_weight, normalize) def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None): """Average hinge loss (non-regularized) In binary class case, assuming labels in y_true are encoded with +1 and -1, when a prediction mistake is made, ``margin = y_true * pred_decision`` is always negative (since the signs disagree), implying ``1 - margin`` is always greater than 1. The cumulated hinge loss is therefore an upper bound of the number of mistakes made by the classifier. In multiclass case, the function expects that either all the labels are included in y_true or an optional labels argument is provided which contains all the labels. The multilabel margin is calculated according to Crammer-Singer's method. As in the binary case, the cumulated hinge loss is an upper bound of the number of mistakes made by the classifier. Read more in the :ref:`User Guide <hinge_loss>`. Parameters ---------- y_true : array, shape = [n_samples] True target, consisting of integers of two values. The positive label must be greater than the negative label. pred_decision : array, shape = [n_samples] or [n_samples, n_classes] Predicted decisions, as output by decision_function (floats). labels : array, optional, default None Contains all the labels for the problem. Used in multiclass hinge loss. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float References ---------- .. [1] `Wikipedia entry on the Hinge loss <http://en.wikipedia.org/wiki/Hinge_loss>`_ .. [2] Koby Crammer, Yoram Singer. On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines. Journal of Machine Learning Research 2, (2001), 265-292 .. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models by Robert C. Moore, John DeNero. <http://www.ttic.edu/sigml/symposium2011/papers/ Moore+DeNero_Regularization.pdf>`_ Examples -------- >>> from sklearn import svm >>> from sklearn.metrics import hinge_loss >>> X = [[0], [1]] >>> y = [-1, 1] >>> est = svm.LinearSVC(random_state=0) >>> est.fit(X, y) LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True, intercept_scaling=1, loss='squared_hinge', max_iter=1000, multi_class='ovr', penalty='l2', random_state=0, tol=0.0001, verbose=0) >>> pred_decision = est.decision_function([[-2], [3], [0.5]]) >>> pred_decision # doctest: +ELLIPSIS array([-2.18..., 2.36..., 0.09...]) >>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS 0.30... In the multiclass case: >>> X = np.array([[0], [1], [2], [3]]) >>> Y = np.array([0, 1, 2, 3]) >>> labels = np.array([0, 1, 2, 3]) >>> est = svm.LinearSVC() >>> est.fit(X, Y) LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True, intercept_scaling=1, loss='squared_hinge', max_iter=1000, multi_class='ovr', penalty='l2', random_state=None, tol=0.0001, verbose=0) >>> pred_decision = est.decision_function([[-1], [2], [3]]) >>> y_true = [0, 2, 3] >>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS 0.56... """ check_consistent_length(y_true, pred_decision, sample_weight) pred_decision = check_array(pred_decision, ensure_2d=False) y_true = column_or_1d(y_true) y_true_unique = np.unique(y_true) if y_true_unique.size > 2: if (labels is None and pred_decision.ndim > 1 and (np.size(y_true_unique) != pred_decision.shape[1])): raise ValueError("Please include all labels in y_true " "or pass labels as third argument") if labels is None: labels = y_true_unique le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) mask = np.ones_like(pred_decision, dtype=bool) mask[np.arange(y_true.shape[0]), y_true] = False margin = pred_decision[~mask] margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1), axis=1) else: # Handles binary class case # this code assumes that positive and negative labels # are encoded as +1 and -1 respectively pred_decision = column_or_1d(pred_decision) pred_decision = np.ravel(pred_decision) lbin = LabelBinarizer(neg_label=-1) y_true = lbin.fit_transform(y_true)[:, 0] try: margin = y_true * pred_decision except TypeError: raise TypeError("pred_decision should be an array of floats.") losses = 1 - margin # The hinge_loss doesn't penalize good enough predictions. losses[losses <= 0] = 0 return np.average(losses, weights=sample_weight) def _check_binary_probabilistic_predictions(y_true, y_prob): """Check that y_true is binary and y_prob contains valid probabilities""" check_consistent_length(y_true, y_prob) labels = np.unique(y_true) if len(labels) != 2: raise ValueError("Only binary classification is supported. " "Provided labels %s." % labels) if y_prob.max() > 1: raise ValueError("y_prob contains values greater than 1.") if y_prob.min() < 0: raise ValueError("y_prob contains values less than 0.") return label_binarize(y_true, labels)[:, 0] def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None): """Compute the Brier score. The smaller the Brier score, the better, hence the naming with "loss". Across all items in a set N predictions, the Brier score measures the mean squared difference between (1) the predicted probability assigned to the possible outcomes for item i, and (2) the actual outcome. Therefore, the lower the Brier score is for a set of predictions, the better the predictions are calibrated. Note that the Brier score always takes on a value between zero and one, since this is the largest possible difference between a predicted probability (which must be between zero and one) and the actual outcome (which can take on values of only 0 and 1). The Brier score is appropriate for binary and categorical outcomes that can be structured as true or false, but is inappropriate for ordinal variables which can take on three or more values (this is because the Brier score assumes that all possible outcomes are equivalently "distant" from one another). Which label is considered to be the positive label is controlled via the parameter pos_label, which defaults to 1. Read more in the :ref:`User Guide <calibration>`. Parameters ---------- y_true : array, shape (n_samples,) True targets. y_prob : array, shape (n_samples,) Probabilities of the positive class. sample_weight : array-like of shape = [n_samples], optional Sample weights. pos_label : int (default: None) Label of the positive class. If None, the maximum label is used as positive class Returns ------- score : float Brier score Examples -------- >>> import numpy as np >>> from sklearn.metrics import brier_score_loss >>> y_true = np.array([0, 1, 1, 0]) >>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"]) >>> y_prob = np.array([0.1, 0.9, 0.8, 0.3]) >>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true_categorical, y_prob, \ pos_label="ham") # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true, np.array(y_prob) > 0.5) 0.0 References ---------- http://en.wikipedia.org/wiki/Brier_score """ y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) if pos_label is None: pos_label = y_true.max() y_true = np.array(y_true == pos_label, int) y_true = _check_binary_probabilistic_predictions(y_true, y_prob) return np.average((y_true - y_prob) ** 2, weights=sample_weight)
codeparrot/github-code-clean
# -*- encoding: utf-8 -*- import re import string from unicodedata import normalize from lxml import etree from lxml.etree import ElementTree, Element, SubElement from datetime import datetime import time import netsvc from osv import fields, osv import decimal_precision as dp from tools.translate import _ class account_invoice(osv.osv): _inherit = 'account.invoice' def _amount_all(self, cr, uid, ids, name, args, context=None): obj_precision = self.pool.get('decimal.precision') prec = obj_precision.precision_get(cr, uid, 'Account') res = {} for invoice in self.browse(cr, uid, ids, context=context): res[invoice.id] = { 'amount_untaxed': 0.0, 'amount_tax': 0.0, 'amount_tax_discount': 0.0, 'amount_total': 0.0, 'icms_base': 0.0, 'icms_value': 0.0, 'icms_st_base': 0.0, 'icms_st_value': 0.0, 'ipi_base': 0.0, 'ipi_value': 0.0, 'pis_base': 0.0, 'pis_value': 0.0, 'cofins_base': 0.0, 'cofins_value': 0.0, } for line in invoice.invoice_line: res[invoice.id]['amount_untaxed'] += line.price_total res[invoice.id]['icms_base'] += line.icms_base res[invoice.id]['icms_value'] += line.icms_value res[invoice.id]['icms_st_base'] += line.icms_st_base res[invoice.id]['icms_st_value'] += line.icms_st_value res[invoice.id]['ipi_base'] += line.ipi_base res[invoice.id]['ipi_value'] += line.ipi_value res[invoice.id]['pis_base'] += line.pis_base res[invoice.id]['pis_value'] += line.pis_value res[invoice.id]['cofins_base'] += line.cofins_base res[invoice.id]['cofins_value'] += line.cofins_value for invoice_tax in invoice.tax_line: if not invoice_tax.tax_code_id.tax_include: res[invoice.id]['amount_tax'] += invoice_tax.amount res[invoice.id]['amount_total'] = res[invoice.id]['amount_tax'] + res[invoice.id]['amount_untaxed'] return res def _get_fiscal_type(self, cr, uid, context=None): if context is None: context = {} return context.get('fiscal_type', 'product') def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False): result = super(account_invoice, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu) if context is None: context = {} field_names = ['service_type_id'] result['fields'].update(self.fields_get(cr, uid, field_names, context)) if not view_type: view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.invoice.tree')]) view_type = 'tree' if view_type == 'form': eview = etree.fromstring(result['arch']) if 'type' in context.keys(): operation_type = {'out_invoice': 'output', 'in_invoice': 'input', 'out_refund': 'input', 'in_refund': 'output'} types = eview.xpath("//field[@name='invoice_line']") for type in types: type.set('context', "{'type': '%s', 'fiscal_type': '%s'}" % (context['type'], context.get('fiscal_type', 'product'), )) cfops = eview.xpath("//field[@name='cfop_id']") for cfop_id in cfops: cfop_id.set('domain', "[('type', '=', '%s')]" % (operation_type[context['type']], )) cfop_id.set('required', '1') fiscal_operation_categories = eview.xpath("//field[@name='fiscal_operation_category_id']") for fiscal_operation_category_id in fiscal_operation_categories: fiscal_operation_category_id.set('domain', "[('fiscal_type', '=', 'product'), ('type', '=', '%s'), ('use_invoice', '=', True)]" % (operation_type[context['type']], )) fiscal_operation_category_id.set('required', '1') fiscal_operations = eview.xpath("//field[@name='fiscal_operation_id']") for fiscal_operation_id in fiscal_operations: fiscal_operation_id.set('domain', "[('fiscal_type', '=', 'product'), ('type', '=', '%s'), ('fiscal_operation_category_id', '=', fiscal_operation_category_id), ('use_invoice', '=', True)]" % (operation_type[context['type']], )) fiscal_operation_id.set('required', '1') if context.get('fiscal_type', False) == 'service': delivery_infos = eview.xpath("//group[@name='delivery_info']") for delivery_info in delivery_infos: delivery_info.set('invisible', '1') cfops = eview.xpath("//field[@name='cfop_id']") for cfop_id in cfops: cfop_id.set('name', 'service_type_id') cfop_id.set('domain', '[]') document_series = eview.xpath("//field[@name='document_serie_id']") for document_serie_id in document_series: document_serie_id.set('domain', "[('fiscal_type', '=', 'service')]") if context['type'] in ('in_invoice', 'out_refund'): fiscal_operation_categories = eview.xpath("//field[@name='fiscal_operation_category_id']") for fiscal_operation_category_id in fiscal_operation_categories: fiscal_operation_category_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'input'), ('use_invoice', '=', True)]") fiscal_operation_category_id.set('required', '1') fiscal_operations = eview.xpath("//field[@name='fiscal_operation_id']") for fiscal_operation_id in fiscal_operations: fiscal_operation_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'input'), ('fiscal_operation_category_id', '=', fiscal_operation_category_id), ('use_invoice', '=', True)]") fiscal_operation_id.set('required', '1') if context['type'] in ('out_invoice', 'in_refund'): fiscal_operation_categories = eview.xpath("//field[@name='fiscal_operation_category_id']") for fiscal_operation_category_id in fiscal_operation_categories: fiscal_operation_category_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'output'), ('use_invoice', '=', True)]") fiscal_operation_category_id.set('required', '1') fiscal_operations = eview.xpath("//field[@name='fiscal_operation_id']") for fiscal_operation_id in fiscal_operations: fiscal_operation_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'output'), ('fiscal_operation_category_id', '=', fiscal_operation_category_id), ('use_invoice', '=', True)]") fiscal_operation_id.set('required', '1') result['arch'] = etree.tostring(eview) if view_type == 'tree': doc = etree.XML(result['arch']) nodes = doc.xpath("//field[@name='partner_id']") partner_string = _('Customer') if context.get('type', 'out_invoice') in ('in_invoice', 'in_refund'): partner_string = _('Supplier') for node in nodes: node.set('string', partner_string) result['arch'] = etree.tostring(doc) return result def _get_invoice_line(self, cr, uid, ids, context=None): result = {} for line in self.pool.get('account.invoice.line').browse(cr, uid, ids, context=context): result[line.invoice_id.id] = True return result.keys() def _get_invoice_tax(self, cr, uid, ids, context=None): result = {} for tax in self.pool.get('account.invoice.tax').browse(cr, uid, ids, context=context): result[tax.invoice_id.id] = True return result.keys() def _get_receivable_lines(self, cr, uid, ids, name, arg, context=None): res = {} for invoice in self.browse(cr, uid, ids, context=context): id = invoice.id res[id] = [] if not invoice.move_id: continue data_lines = [x for x in invoice.move_id.line_id if x.account_id.id == invoice.account_id.id and x.account_id.type in ('receivable', 'payable') and invoice.journal_id.revenue_expense] New_ids = [] for line in data_lines: New_ids.append(line.id) New_ids.sort() res[id] = New_ids return res _columns = { 'state': fields.selection([ ('draft', 'Draft'), ('proforma', 'Pro-forma'), ('proforma2', 'Pro-forma'), ('open', 'Open'), ('sefaz_export', 'Enviar para Receita'), ('sefaz_exception', 'Erro de autorização da Receita'), ('paid', 'Paid'), ('cancel', 'Cancelled') ], 'State', select=True, readonly=True, help=' * The \'Draft\' state is used when a user is encoding a new and unconfirmed Invoice. \ \n* The \'Pro-forma\' when invoice is in Pro-forma state, invoice does not have an invoice number. \ \n* The \'Open\' state is used when user create invoice, a invoice number is generated.Its in open state till user does not pay invoice. \ \n* The \'Paid\' state is set automatically when invoice is paid.\ \n* The \'sefaz_out\' Gerado aquivo de exportação para sistema daReceita.\ \n* The \'sefaz_aut\' Recebido arquivo de autolização da Receita.\ \n* The \'Cancelled\' state is used when user cancel invoice.'), 'partner_shipping_id': fields.many2one('res.partner.address', 'Endereço de Entrega', readonly=True, states={'draft': [('readonly', False)]}, help="Shipping address for current sales order."), 'own_invoice': fields.boolean('Nota Fiscal Própria', readonly=True, states={'draft': [('readonly', False)]}), 'internal_number': fields.char('Invoice Number', size=32, readonly=True, states={'draft': [('readonly', False)]}, help="Unique number of the invoice, computed automatically when the invoice is created."), 'vendor_serie': fields.char('Série NF Entrada', size=12, readonly=True, states={'draft': [('readonly', False)]}, help="Série do número da Nota Fiscal do Fornecedor"), 'nfe_access_key': fields.char('Chave de Acesso NFE', size=44, readonly=True, states={'draft': [('readonly', False)]}), 'nfe_status': fields.char('Status na Sefaz', size=44, readonly=True), 'nfe_date': fields.datetime('Data do Status NFE', readonly=True, states={'draft': [('readonly', False)]}), 'nfe_export_date': fields.datetime('Exportação NFE', readonly=True), 'fiscal_document_id': fields.many2one('l10n_br_account.fiscal.document', 'Documento', readonly=True, states={'draft': [('readonly', False)]}), 'fiscal_document_nfe': fields.related('fiscal_document_id', 'nfe', type='boolean', readonly=True, size=64, relation='l10n_br_account.fiscal.document', store=True, string='NFE'), 'fiscal_type': fields.selection([('product', 'Produto'), ('service', 'Serviço')], 'Tipo Fiscal', requeried=True), 'move_line_receivable_id': fields.function(_get_receivable_lines, method=True, type='many2many', relation='account.move.line', string='Entry Lines'), 'document_serie_id': fields.many2one('l10n_br_account.document.serie', 'Serie', domain="[('fiscal_document_id', '=', fiscal_document_id), ('company_id', '=', company_id)]", readonly=True, states={'draft': [('readonly', False)]}), 'fiscal_operation_category_id': fields.many2one('l10n_br_account.fiscal.operation.category', 'Categoria', readonly=True, states={'draft': [('readonly', False)]}), 'fiscal_operation_id': fields.many2one('l10n_br_account.fiscal.operation', 'Operação Fiscal', domain="[('fiscal_operation_category_id', '=', fiscal_operation_category_id)]", readonly=True, states={'draft': [('readonly', False)]}), 'cfop_id': fields.many2one('l10n_br_account.cfop', 'CFOP', readonly=True, states={'draft': [('readonly', False)]}), 'service_type_id': fields.many2one('l10n_br_account.service.type', 'Tipo de Serviço', readonly=True, states={'draft': [('readonly', False)]}), 'amount_untaxed': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Untaxed', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), 'amount_tax': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Tax', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), 'amount_total': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Total', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), 'icms_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base ICMS', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), 'icms_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor ICMS', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), 'icms_st_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base ICMS ST', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), 'icms_st_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor ICMS ST', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), 'ipi_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base IPI', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), 'ipi_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor IPI', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), 'pis_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base PIS', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), 'pis_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor PIS', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), 'cofins_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base COFINS', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), 'cofins_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor COFINS', store={ 'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20), 'account.invoice.tax': (_get_invoice_tax, None, 20), 'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20), }, multi='all'), } _defaults = { 'own_invoice': True, 'fiscal_type': _get_fiscal_type, } # go from canceled state to draft state def action_cancel_draft(self, cr, uid, ids, *args): self.write(cr, uid, ids, {'state': 'draft', 'internal_number': False, 'nfe_access_key': False, 'nfe_status': False, 'nfe_date': False, 'nfe_export_date': False}) wf_service = netsvc.LocalService("workflow") for inv_id in ids: wf_service.trg_delete(uid, 'account.invoice', inv_id, cr) wf_service.trg_create(uid, 'account.invoice', inv_id, cr) return True def copy(self, cr, uid, id, default={}, context=None): default.update({ 'internal_number': False, 'nfe_access_key': False, 'nfe_status': False, 'nfe_date': False, 'nfe_export_date': False, }) return super(account_invoice, self).copy(cr, uid, id, default, context) def action_internal_number(self, cr, uid, ids, context=None): if context is None: context = {} for obj_inv in self.browse(cr, uid, ids): if obj_inv.own_invoice: obj_sequence = self.pool.get('ir.sequence') seq_no = obj_sequence.get_id(cr, uid, obj_inv.journal_id.internal_sequence.id, context=context) self.write(cr, uid, obj_inv.id, {'internal_number': seq_no}) return True def action_number(self, cr, uid, ids, context=None): if context is None: context = {} #TODO: not correct fix but required a frech values before reading it. #self.write(cr, uid, ids, {}) for obj_inv in self.browse(cr, uid, ids): id = obj_inv.id invtype = obj_inv.type number = obj_inv.number move_id = obj_inv.move_id and obj_inv.move_id.id or False reference = obj_inv.internal_number or obj_inv.reference or '' #self.write(cr, uid, ids, {'internal_number': number}) #if invtype in ('in_invoice', 'in_refund'): # if not reference: # ref = self._convert_ref(cr, uid, number) # else: # ref = reference #else: # ref = self._convert_ref(cr, uid, number) ref = reference cr.execute('UPDATE account_move SET ref=%s ' \ 'WHERE id=%s AND (ref is null OR ref = \'\')', (ref, move_id)) cr.execute('UPDATE account_move_line SET ref=%s ' \ 'WHERE move_id=%s AND (ref is null OR ref = \'\')', (ref, move_id)) cr.execute('UPDATE account_analytic_line SET ref=%s ' \ 'FROM account_move_line ' \ 'WHERE account_move_line.move_id = %s ' \ 'AND account_analytic_line.move_id = account_move_line.id', (ref, move_id)) for inv_id, name in self.name_get(cr, uid, [id]): ctx = context.copy() if obj_inv.type in ('out_invoice', 'out_refund'): ctx = self.get_log_context(cr, uid, context=ctx) message = _('Invoice ') + " '" + name + "' " + _("is validated.") self.log(cr, uid, inv_id, message, context=ctx) return True def action_move_create(self, cr, uid, ids, *args): result = super(account_invoice, self).action_move_create(cr, uid, ids, *args) for inv in self.browse(cr, uid, ids): if inv.move_id: self.pool.get('account.move').write(cr, uid, [inv.move_id.id], {'ref': inv.internal_number}) for move_line in inv.move_id.line_id: self.pool.get('account.move.line').write(cr, uid, [move_line.id], {'ref': inv.internal_number}) move_lines = [x for x in inv.move_id.line_id if x.account_id.id == inv.account_id.id and x.account_id.type in ('receivable', 'payable')] i = len(move_lines) for move_line in move_lines: move_line_name = '%s/%s' % (inv.internal_number, i) self.pool.get('account.move.line').write(cr, uid, [move_line.id], {'name': move_line_name}) i -= 1 return result def finalize_invoice_move_lines(self, cr, uid, invoice_browse, move_lines): """finalize_invoice_move_lines(cr, uid, invoice, move_lines) -> move_lines Hook method to be overridden in additional modules to verify and possibly alter the move lines to be created by an invoice, for special cases. * remove move lines without credit and debit, we have to see where l10n_br modules are creating these error entries; * correct move entreis when we have tax_retain :param invoice_browse: browsable record of the invoice that is generating the move lines :param move_lines: list of dictionaries with the account.move.lines (as for create()) :return: the (possibly updated) final move_lines to create for this invoice """ total_taxes_credit = 0 move_lines_tmp = [] remove_itens = [] tax_retained_itens = [] mv_tmp_tuple = [] final_credit_ind = -1 initial_debit_ind = -1 for ind, move_line in enumerate(move_lines): move_line_item = move_line[2] if not move_line_item['credit'] and not move_line_item['debit']: remove_itens.append(ind) elif move_line_item['account_id'] == invoice_browse.account_id.id and not move_line_item['credit']: move_line_item['debit'] = invoice_browse.amount_total initial_debit_ind = ind elif move_line_item['tax_amount'] < 0: move_line_item['tax_amount'] = - move_line_item['tax_amount'] tax_retained_itens.append(ind) elif move_line_item['credit'] > 0 and move_line_item['credit'] != invoice_browse.amount_untaxed: total_taxes_credit += move_line_item['credit'] elif move_line_item['credit'] == invoice_browse.amount_untaxed: final_credit_ind = ind if final_credit_ind > -1: move_lines[final_credit_ind][2]['credit'] = invoice_browse.amount_total - total_taxes_credit for mv_ind in tax_retained_itens: mv_tmp = move_lines[mv_ind][2].copy() mv_tmp['credit'] = mv_tmp['debit'] mv_tmp['debit'] = False mv_tmp_tuple = 0, 0, mv_tmp move_lines_tmp.append(mv_tmp_tuple) while remove_itens: move_lines.pop(remove_itens.pop()) while move_lines_tmp: move_lines.append(move_lines_tmp.pop()) return move_lines def nfe_dv(self, key): #Testing return '2' def nfe_check(self, cr, uid, ids, context=None): strErro = '' strErro = u'' if context is None: context = {} for inv in self.browse(cr, uid, ids): #Nota fiscal if not inv.own_invoice or inv.fiscal_type == 'service': continue company_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default']) company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0] if not inv.document_serie_id: strErro = u'Nota Fiscal - Série da nota fiscal\n' if not inv.fiscal_document_id: strErro = u'Nota Fiscal - Tipo de documento fiscal\n' #if not inv.date_invoice: # strErro = 'Nota Fiscal - Data da nota fiscal\n' if not inv.journal_id.internal_sequence: strErro = u'Nota Fiscal - Número da nota fiscal, o diário deve ter uma sequencia interna\n' if not inv.cfop_id: strErro = u'Nota Fiscal - CFOP\n' else: if not inv.cfop_id.small_name: strErro = u'Nota Fiscal - Descrição reduzida do CFOP\n' #Emitente if not inv.company_id.partner_id.legal_name: strErro = u'Emitente - Razão Social\n' if not inv.company_id.partner_id.name: strErro = u'Emitente - Fantasia\n' if not inv.company_id.partner_id.cnpj_cpf: strErro = u'Emitente - CNPJ/CPF\n' if not company_addr_default.street: strErro = u'Emitente / Endereço - Logradouro\n' if not company_addr_default.number: strErro = u'Emitente / Endereço - Número\n' if not company_addr_default.zip: strErro = u'Emitente / Endereço - CEP\n' if not inv.company_id.cnae_main_id: strErro = u'Emitente / CNAE Principal\n' if not inv.company_id.partner_id.inscr_est: strErro = u'Emitente / Inscrição Estadual\n' if not company_addr_default.state_id: strErro = u'Emitente / Endereço - Estado\n' else: if not company_addr_default.state_id.ibge_code: strErro = u'Emitente / Endereço - Código do IBGE do estado\n' if not company_addr_default.state_id.name: strErro = u'Emitente / Endereço - Nome do estado\n' if not company_addr_default.l10n_br_city_id: strErro = u'Emitente / Endereço - município\n' else: if not company_addr_default.l10n_br_city_id.name: strErro = u'Emitente / Endereço - Nome do município\n' if not company_addr_default.l10n_br_city_id.ibge_code: strErro = u'Emitente / Endereço - Código do IBGE do município\n' if not company_addr_default.country_id: strErro = u'Emitente / Endereço - país\n' else: if not company_addr_default.country_id.name: strErro = u'Emitente / Endereço - Nome do país\n' if not company_addr_default.country_id.bc_code: strErro = u'Emitente / Endereço - Código do BC do país\n' if not company_addr_default.country_id: strErro = u'Emitente / Regime Tributário\n' #Destinatário if not inv.partner_id.legal_name: strErro = u'Destinatário - Razão Social\n' if not inv.partner_id.cnpj_cpf: strErro = u'Destinatário - CNPJ/CPF\n' if not inv.address_invoice_id.street: strErro = u'Destinatário / Endereço - Logradouro\n' if not inv.address_invoice_id.number: strErro = u'Destinatário / Endereço - Número\n' if not inv.address_invoice_id.zip: strErro = u'Destinatário / Endereço - CEP\n' if not inv.address_invoice_id.state_id: strErro = u'Destinatário / Endereço - Estado\n' else: if not inv.address_invoice_id.state_id.ibge_code: strErro = u'Destinatário / Endereço - Código do IBGE do estado\n' if not inv.address_invoice_id.state_id.name: strErro = u'Destinatário / Endereço - Nome do estado\n' if not inv.address_invoice_id.l10n_br_city_id: strErro = u'Destinatário / Endereço - Município\n' else: if not inv.address_invoice_id.l10n_br_city_id.name: strErro = u'Destinatário / Endereço - Nome do município\n' if not inv.address_invoice_id.l10n_br_city_id.ibge_code: strErro = u'Destinatário / Endereço - Código do IBGE do município\n' if not inv.address_invoice_id.country_id: strErro = u'Destinatário / Endereço - País\n' else: if not inv.address_invoice_id.country_id.name: strErro = u'Destinatário / Endereço - Nome do país\n' if not inv.address_invoice_id.country_id.bc_code: strErro = u'Destinatário / Endereço - Código do BC do país\n' #endereco de entrega if inv.partner_shipping_id: if inv.address_invoice_id != inv.partner_shipping_id: if not inv.partner_shipping_id.street: strErro = u'Destinatário / Endereço de Entrega - Logradouro\n' if not inv.partner_shipping_id.number: strErro = u'Destinatário / Endereço de Entrega - Número\n' if not inv.address_invoice_id.zip: strErro = u'Destinatário / Endereço de Entrega - CEP\n' if not inv.partner_shipping_id.state_id: strErro = u'Destinatário / Endereço de Entrega - Estado\n' else: if not inv.partner_shipping_id.state_id.ibge_code: strErro = u'Destinatário / Endereço de Entrega - Código do IBGE do estado\n' if not inv.partner_shipping_id.state_id.name: strErro = u'Destinatário / Endereço de Entrega - Nome do estado\n' if not inv.partner_shipping_id.l10n_br_city_id: strErro = u'Destinatário / Endereço - Município\n' else: if not inv.partner_shipping_id.l10n_br_city_id.name: strErro = u'Destinatário / Endereço de Entrega - Nome do município\n' if not inv.partner_shipping_id.l10n_br_city_id.ibge_code: strErro = u'Destinatário / Endereço de Entrega - Código do IBGE do município\n' if not inv.partner_shipping_id.country_id: strErro = u'Destinatário / Endereço de Entrega - País\n' else: if not inv.partner_shipping_id.country_id.name: strErro = u'Destinatário / Endereço de Entrega - Nome do país\n' if not inv.partner_shipping_id.country_id.bc_code: strErro = u'Destinatário / Endereço de Entrega - Código do BC do país\n' #produtos for inv_line in inv.invoice_line: if inv_line.product_id: if not inv_line.product_id.code: strErro = u'Produtos e Serviços: %s, Qtde: %s - Código do produto\n' % (inv_line.product_id.name, inv_line.quantity) if not inv_line.product_id.name: strErro = u'Produtos e Serviços: %s, Qtde: %s - Nome do produto\n' % (inv_line.product_id.name, inv_line.quantity) if not inv_line.cfop_id: strErro = u'Produtos e Serviços: %s, Qtde: %s - CFOP\n' % (inv_line.product_id.name, inv_line.quantity) else: if not inv_line.cfop_id.code: strErro = u'Produtos e Serviços: %s, Qtde: %s - Código do CFOP\n' % (inv_line.product_id.name, inv_line.quantity) if not inv_line.uos_id: strErro = u'Produtos e Serviços: %s, Qtde: %s - Unidade de medida\n' % (inv_line.product_id.name, inv_line.quantity) if not inv_line.quantity: strErro = u'Produtos e Serviços: %s, Qtde: %s - Quantidade\n' % (inv_line.product_id.name, inv_line.quantity) if not inv_line.price_unit: strErro = u'Produtos e Serviços: %s, Qtde: %s - Preço unitário\n' % (inv_line.product_id.name, inv_line.quantity) if not inv_line.icms_cst: strErro = u'Produtos e Serviços: %s, Qtde: %s - CST do ICMS\n' % (inv_line.product_id.name, inv_line.quantity) if not inv_line.ipi_cst: strErro = u'Produtos e Serviços: %s, Qtde: %s - CST do IPI\n' % (inv_line.product_id.name, inv_line.quantity) if not inv_line.pis_cst: strErro = u'Produtos e Serviços: %s, Qtde: %s - CST do PIS\n' % (inv_line.product_id.name, inv_line.quantity) if not inv_line.cofins_cst: strErro = u'Produtos e Serviços: %s, Qtde: %s - CST do COFINS\n' % (inv_line.product_id.name, inv_line.quantity) if strErro: raise osv.except_osv(_('Error !'), _("Error Validating NFE:\n '%s'") % (strErro, )) return True def nfe_export_txt(self, cr, uid, ids, context=False): StrFile = '' StrNF = 'NOTA FISCAL|%s|\n' % len(ids) StrFile = StrNF for inv in self.browse(cr, uid, ids, context={'lang': 'pt_BR'}): #Endereço do company company_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default']) company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0] #nfe_key = unicode(company_addr_default.state_id.ibge_code).strip().rjust(2, u'0') #nfe_key += unicode(datetime.strptime(inv.date_invoice, '%Y-%m-%d').strftime(u'%y%m')).strip().rjust(4, u'0') #nfe_key += re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.cnpj_cpf or '') #nfe_key += inv.fiscal_document_id.code #nfe_key += unicode(inv.document_serie_id.code).strip().rjust(3, u'0') #nfe_key += unicode(inv.internal_number).strip().rjust(9, u'0') #fe_key += unicode('1').strip().rjust(1, u'0') # Homologação #nfe_key += unicode(inv.internal_number).strip().rjust(8, u'0') #nfe_key += unicode(self.nfe_dv(nfe_key)).strip().rjust(1, u'0') StrA = 'A|%s|%s|\n' % ('2.00', '') StrFile += StrA StrRegB = { 'cUF': company_addr_default.state_id.ibge_code, 'cNF': '', 'NatOp': normalize('NFKD', unicode(inv.cfop_id.small_name or '')).encode('ASCII', 'ignore'), 'intPag': '2', 'mod': inv.fiscal_document_id.code, 'serie': inv.document_serie_id.code, 'nNF': inv.internal_number or '', 'dEmi': inv.date_invoice or '', 'dSaiEnt': inv.date_invoice or '', 'hSaiEnt': '', 'tpNF': '', 'cMunFG': ('%s%s') % (company_addr_default.state_id.ibge_code, company_addr_default.l10n_br_city_id.ibge_code), 'TpImp': '1', 'TpEmis': '1', 'cDV': '', 'tpAmb': '2', 'finNFe': '1', 'procEmi': '0', 'VerProc': '2.1.4', 'dhCont': '', 'xJust': '', } if inv.cfop_id.type in ("input"): StrRegB['tpNF'] = '0' else: StrRegB['tpNF'] = '1' StrB = 'B|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegB['cUF'], StrRegB['cNF'], StrRegB['NatOp'], StrRegB['intPag'], StrRegB['mod'], StrRegB['serie'], StrRegB['nNF'], StrRegB['dEmi'], StrRegB['dSaiEnt'], StrRegB['hSaiEnt'], StrRegB['tpNF'], StrRegB['cMunFG'], StrRegB['TpImp'], StrRegB['TpEmis'], StrRegB['cDV'], StrRegB['tpAmb'], StrRegB['finNFe'], StrRegB['procEmi'], StrRegB['VerProc'], StrRegB['dhCont'], StrRegB['xJust']) StrFile += StrB StrRegC = { 'XNome': normalize('NFKD', unicode(inv.company_id.partner_id.legal_name or '')).encode('ASCII', 'ignore'), 'XFant': normalize('NFKD', unicode(inv.company_id.partner_id.name or '')).encode('ASCII', 'ignore'), 'IE': re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.inscr_est or ''), 'IEST': '', 'IM': re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.inscr_mun or ''), 'CNAE': re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.cnae_main_id.code or ''), 'CRT': inv.company_id.fiscal_type or '', } #TODO - Verificar, pois quando e informado do CNAE ele exige que a inscricao municipal, parece um bug do emissor da NFE if not inv.company_id.partner_id.inscr_mun: StrRegC['CNAE'] = '' StrC = 'C|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegC['XNome'], StrRegC['XFant'], StrRegC['IE'], StrRegC['IEST'], StrRegC['IM'], StrRegC['CNAE'], StrRegC['CRT']) StrFile += StrC if inv.company_id.partner_id.tipo_pessoa == 'J': StrC02 = 'C02|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.cnpj_cpf or '')) else: StrC02 = 'C02a|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.cnpj_cpf or '')) StrFile += StrC02 StrRegC05 = { 'XLgr': normalize('NFKD', unicode(company_addr_default.street or '')).encode('ASCII', 'ignore'), 'Nro': company_addr_default.number or '', 'Cpl': normalize('NFKD', unicode(company_addr_default.street2 or '')).encode('ASCII', 'ignore'), 'Bairro': normalize('NFKD', unicode(company_addr_default.district or 'Sem Bairro')).encode('ASCII', 'ignore'), 'CMun': '%s%s' % (company_addr_default.state_id.ibge_code, company_addr_default.l10n_br_city_id.ibge_code), 'XMun': normalize('NFKD', unicode(company_addr_default.l10n_br_city_id.name or '')).encode('ASCII', 'ignore'), 'UF': company_addr_default.state_id.code or '', 'CEP': re.sub('[%s]' % re.escape(string.punctuation), '', str(company_addr_default.zip or '').replace(' ', '')), 'cPais': company_addr_default.country_id.bc_code or '', 'xPais': normalize('NFKD', unicode(company_addr_default.country_id.name or '')).encode('ASCII', 'ignore'), 'fone': re.sub('[%s]' % re.escape(string.punctuation), '', str(company_addr_default.phone or '').replace(' ', '')), } StrC05 = 'C05|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegC05['XLgr'], StrRegC05['Nro'], StrRegC05['Cpl'], StrRegC05['Bairro'], StrRegC05['CMun'], StrRegC05['XMun'], StrRegC05['UF'], StrRegC05['CEP'], StrRegC05['cPais'], StrRegC05['xPais'], StrRegC05['fone']) StrFile += StrC05 if inv.partner_id.tipo_pessoa == 'J': _xNome = normalize('NFKD', unicode(inv.partner_id.legal_name)).encode('ASCII', 'ignore') else: _xNome = normalize('NFKD', unicode(inv.partner_id.name)).encode('ASCII', 'ignore') StrRegE = { 'xNome': _xNome, 'IE': re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.inscr_est or ''), 'ISUF': '', 'email': inv.partner_id.email or '', } StrE = 'E|%s|%s|%s|%s|\n' % (StrRegE['xNome'], StrRegE['IE'], StrRegE['ISUF'], StrRegE['email']) StrFile += StrE if inv.partner_id.tipo_pessoa == 'J': StrE0 = 'E02|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or '')) else: StrE0 = 'E03|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or '')) StrFile += StrE0 StrRegE05 = { 'xLgr': normalize('NFKD', unicode(inv.address_invoice_id.street or '')).encode('ASCII', 'ignore'), 'nro': normalize('NFKD', unicode(inv.address_invoice_id.number or '')).encode('ASCII', 'ignore'), 'xCpl': re.sub('[%s]' % re.escape(string.punctuation), '', normalize('NFKD', unicode(inv.address_invoice_id.street2 or '')).encode('ASCII', 'ignore')), 'xBairro': normalize('NFKD', unicode(inv.address_invoice_id.district or 'Sem Bairro')).encode('ASCII', 'ignore'), 'cMun': ('%s%s') % (inv.address_invoice_id.state_id.ibge_code, inv.address_invoice_id.l10n_br_city_id.ibge_code), 'xMun': normalize('NFKD', unicode(inv.address_invoice_id.l10n_br_city_id.name or '')).encode('ASCII', 'ignore'), 'UF': inv.address_invoice_id.state_id.code, 'CEP': re.sub('[%s]' % re.escape(string.punctuation), '', str(inv.address_invoice_id.zip or '').replace(' ', '')), 'cPais': inv.address_invoice_id.country_id.bc_code, 'xPais': normalize('NFKD', unicode(inv.address_invoice_id.country_id.name or '')).encode('ASCII', 'ignore'), 'fone': re.sub('[%s]' % re.escape(string.punctuation), '', str(inv.address_invoice_id.phone or '').replace(' ', '')), } StrE05 = 'E05|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegE05['xLgr'], StrRegE05['nro'], StrRegE05['xCpl'], StrRegE05['xBairro'], StrRegE05['cMun'], StrRegE05['xMun'], StrRegE05['UF'], StrRegE05['CEP'], StrRegE05['cPais'], StrRegE05['xPais'], StrRegE05['fone'], ) StrFile += StrE05 if inv.partner_shipping_id: if inv.address_invoice_id != inv.partner_shipping_id: StrRegG = { 'XLgr': normalize('NFKD', unicode(inv.partner_shipping_id.street or '', )).encode('ASCII', 'ignore'), 'Nro': normalize('NFKD', unicode(inv.partner_shipping_id.number or '')).encode('ASCII', 'ignore'), 'XCpl': re.sub('[%s]' % re.escape(string.punctuation), '', normalize('NFKD', unicode(inv.partner_shipping_id.street2 or '')).encode('ASCII', 'ignore')), 'XBairro': re.sub('[%s]' % re.escape(string.punctuation), '', normalize('NFKD', unicode(inv.partner_shipping_id.district or 'Sem Bairro')).encode('ASCII', 'ignore')), 'CMun': ('%s%s') % (inv.partner_shipping_id.state_id.ibge_code, inv.partner_shipping_id.l10n_br_city_id.ibge_code), 'XMun': normalize('NFKD', unicode(inv.partner_shipping_id.l10n_br_city_id.name or '')).encode('ASCII', 'ignore'), 'UF': inv.address_invoice_id.state_id.code, } StrG = 'G|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegG['XLgr'], StrRegG['Nro'], StrRegG['XCpl'], StrRegG['XBairro'], StrRegG['CMun'], StrRegG['XMun'], StrRegG['UF']) StrFile += StrG if inv.partner_id.tipo_pessoa == 'J': StrG0 = 'G02|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or '')) else: StrG0 = 'G02a|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or '')) StrFile += StrG0 i = 0 for inv_line in inv.invoice_line: i += 1 StrH = 'H|%s||\n' % (i) StrFile += StrH StrRegI = { 'CProd': normalize('NFKD', unicode(inv_line.product_id.code or '', )).encode('ASCII', 'ignore'), 'CEAN': inv_line.product_id.ean13 or '', 'XProd': normalize('NFKD', unicode(inv_line.product_id.name or '')).encode('ASCII', 'ignore'), 'NCM': re.sub('[%s]' % re.escape(string.punctuation), '', inv_line.product_id.property_fiscal_classification.name or ''), 'EXTIPI': '', 'CFOP': inv_line.cfop_id.code, 'UCom': normalize('NFKD', unicode(inv_line.uos_id.name or '', )).encode('ASCII', 'ignore'), 'QCom': str("%.4f" % inv_line.quantity), 'VUnCom': str("%.2f" % (inv_line.price_unit * (1 - (inv_line.discount or 0.0) / 100.0))), 'VProd': str("%.2f" % inv_line.price_total), 'CEANTrib': '', 'UTrib': inv_line.uos_id.name, 'QTrib': str("%.4f" % inv_line.quantity), 'VUnTrib': str("%.2f" % inv_line.price_unit), 'VFrete': '', 'VSeg': '', 'VDesc': '', 'vOutro': '', 'indTot': '1', 'xPed': '', 'nItemPed': '', } if inv_line.product_id.code: StrRegI['CProd'] = inv_line.product_id.code else: StrRegI['CProd'] = unicode(i).strip().rjust(4, u'0') #No OpenERP já traz o valor unitário como desconto #if inv_line.discount > 0: # StrRegI['VDesc'] = str("%.2f" % (inv_line.quantity * (inv_line.price_unit * (1-(inv_line.discount or 0.0)/100.0)))) StrI = 'I|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegI['CProd'], StrRegI['CEAN'], StrRegI['XProd'], StrRegI['NCM'], StrRegI['EXTIPI'], StrRegI['CFOP'], StrRegI['UCom'], StrRegI['QCom'], StrRegI['VUnCom'], StrRegI['VProd'], StrRegI['CEANTrib'], StrRegI['UTrib'], StrRegI['QTrib'], StrRegI['VUnTrib'], StrRegI['VFrete'], StrRegI['VSeg'], StrRegI['VDesc'], StrRegI['vOutro'], StrRegI['indTot'], StrRegI['xPed'], StrRegI['nItemPed']) StrFile += StrI StrM = 'M|\n' StrFile += StrM StrN = 'N|\n' StrFile += StrN #TODO - Fazer alteração para cada tipo de cst if inv_line.icms_cst in ('00'): StrRegN02 = { 'Orig': inv_line.product_id.origin or '0', 'CST': inv_line.icms_cst, 'ModBC': '0', 'VBC': str("%.2f" % inv_line.icms_base), 'PICMS': str("%.2f" % inv_line.icms_percent), 'VICMS': str("%.2f" % inv_line.icms_value), } StrN02 = 'N02|%s|%s|%s|%s|%s|%s|\n' % (StrRegN02['Orig'], StrRegN02['CST'], StrRegN02['ModBC'], StrRegN02['VBC'], StrRegN02['PICMS'], StrRegN02['VICMS']) StrFile += StrN02 if inv_line.icms_cst in ('20'): StrRegN04 = { 'Orig': inv_line.product_id.origin or '0', 'CST': inv_line.icms_cst, 'ModBC': '0', 'PRedBC': str("%.2f" % inv_line.icms_percent_reduction), 'VBC': str("%.2f" % inv_line.icms_base), 'PICMS': str("%.2f" % inv_line.icms_percent), 'VICMS': str("%.2f" % inv_line.icms_value), } StrN04 = 'N04|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN04['Orig'], StrRegN04['CST'], StrRegN04['ModBC'], StrRegN04['PRedBC'], StrRegN04['VBC'], StrRegN04['PICMS'], StrRegN04['VICMS']) StrFile += StrN04 if inv_line.icms_cst in ('10'): StrRegN03 = { 'Orig': inv_line.product_id.origin or '0', 'CST': inv_line.icms_cst, 'ModBC': '0', 'VBC': str("%.2f" % inv_line.icms_base), 'PICMS': str("%.2f" % inv_line.icms_percent), 'VICMS': str("%.2f" % inv_line.icms_value), 'ModBCST': '4', # TODO 'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '', 'PRedBCST': '', 'VBCST': str("%.2f" % inv_line.icms_st_base), 'PICMSST': str("%.2f" % inv_line.icms_st_percent), 'VICMSST': str("%.2f" % inv_line.icms_st_value), } StrN03 = 'N03|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN03['Orig'], StrRegN03['CST'], StrRegN03['ModBC'], StrRegN03['VBC'], StrRegN03['PICMS'], StrRegN03['VICMS'], StrRegN03['ModBCST'], StrRegN03['PMVAST'], StrRegN03['PRedBCST'], StrRegN03['VBCST'], StrRegN03['PICMSST'], StrRegN03['VICMSST']) StrFile += StrN03 if inv_line.icms_cst in ('40', '41', '50', '51'): StrRegN06 = { 'Orig': inv_line.product_id.origin or '0', 'CST': inv_line.icms_cst, 'vICMS': str("%.2f" % inv_line.icms_value), 'motDesICMS': '9', # FIXME } StrN06 = 'N06|%s|%s|%s|%s|\n' % (StrRegN06['Orig'], StrRegN06['CST'], StrRegN06['vICMS'], StrRegN06['motDesICMS']) StrFile += StrN06 if inv_line.icms_cst in ('60'): StrRegN08 = { 'Orig': inv_line.product_id.origin or '0', 'CST': inv_line.icms_cst, 'VBCST': str("%.2f" % 0.00), 'VICMSST': str("%.2f" % 0.00), } StrN08 = 'N08|%s|%s|%s|%s|\n' % (StrRegN08['Orig'], StrRegN08['CST'], StrRegN08['VBCST'], StrRegN08['VICMSST']) StrFile += StrN08 if inv_line.icms_cst in ('70'): StrRegN09 = { 'Orig': inv_line.product_id.origin or '0', 'CST': inv_line.icms_cst, 'ModBC': '0', 'PRedBC': str("%.2f" % inv_line.icms_percent_reduction), 'VBC': str("%.2f" % inv_line.icms_base), 'PICMS': str("%.2f" % inv_line.icms_percent), 'VICMS': str("%.2f" % inv_line.icms_value), 'ModBCST': '4', # TODO 'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '', 'PRedBCST': '', 'VBCST': str("%.2f" % inv_line.icms_st_base), 'PICMSST': str("%.2f" % inv_line.icms_st_percent), 'VICMSST': str("%.2f" % inv_line.icms_st_value), } StrN09 = 'N09|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN09['Orig'], StrRegN09['CST'], StrRegN09['ModBC'], StrRegN09['PRedBC'], StrRegN09['VBC'], StrRegN09['PICMS'], StrRegN09['VICMS'], StrRegN09['ModBCST'], StrRegN09['PMVAST'], StrRegN09['PRedBCST'], StrRegN09['VBCST'], StrRegN09['PICMSST'], StrRegN09['VICMSST']) StrFile += StrN09 if inv_line.icms_cst == '102': StrRegN10d = { 'Orig': inv_line.product_id.origin or '0', 'CST': inv_line.icms_cst, } StrFile += 'N10d|%(Orig)s|%(CST)s|\n' % StrRegN10d StrRegO = { 'ClEnq': '', 'CNPJProd': '', 'CSelo': '', 'QSelo': '', 'CEnq': '999', } StrO = 'O|%s|%s|%s|%s|%s|\n' % (StrRegO['ClEnq'], StrRegO['CNPJProd'], StrRegO['CSelo'], StrRegO['QSelo'], StrRegO['CEnq']) StrFile += StrO if inv_line.ipi_percent > 0: StrRegO07 = { 'CST': inv_line.ipi_cst, 'VIPI': str("%.2f" % inv_line.ipi_value), } StrO07 = 'O07|%s|%s|\n' % (StrRegO07['CST'], StrRegO07['VIPI']) StrFile += StrO07 if inv_line.ipi_type == 'percent': StrRegO10 = { 'VBC': str("%.2f" % inv_line.ipi_base), 'PIPI': str("%.2f" % inv_line.ipi_percent), } StrO1 = 'O10|%s|%s|\n' % (StrRegO10['VBC'], StrRegO10['PIPI']) if inv_line.ipi_type == 'quantity': pesol = 0 if inv_line.product_id: pesol = inv_line.product_id.weight_net StrRegO11 = { 'QUnid': str("%.4f" % (inv_line.quantity * pesol)), 'VUnid': str("%.4f" % inv_line.ipi_percent), } StrO1 = 'O11|%s|%s|\n' % (StrRegO11['QUnid'], StrRegO11['VUnid']) StrFile += StrO1 else: StrO1 = 'O08|%s|\n' % inv_line.ipi_cst StrFile += StrO1 StrFile += 'Q|\n' if inv_line.pis_cst == '99': StrFile += 'Q05|99|0.00|\nQ10|0.0000|0.0000|\n' else: if inv_line.pis_percent > 0: StrRegQ02 = { 'CST': inv_line.pis_cst, 'VBC': str("%.2f" % inv_line.pis_base), 'PPIS': str("%.2f" % inv_line.pis_percent), 'VPIS': str("%.2f" % inv_line.pis_value), } StrQ02 = ('Q02|%s|%s|%s|%s|\n') % (StrRegQ02['CST'], StrRegQ02['VBC'], StrRegQ02['PPIS'], StrRegQ02['VPIS']) else: StrQ02 = 'Q04|%s|\n' % inv_line.pis_cst StrFile += StrQ02 StrFile += 'S|\n' if inv_line.cofins_cst == '99': StrFile += 'S05|99|0.00|\nS09|0.0000|0.0000|\n' else: if inv_line.cofins_percent > 0: StrRegS02 = { 'CST': inv_line.cofins_cst, 'VBC': str("%.2f" % inv_line.cofins_base), 'PCOFINS': str("%.2f" % inv_line.cofins_percent), 'VCOFINS': str("%.2f" % inv_line.cofins_value), } StrS02 = ('S02|%s|%s|%s|%s|\n') % (StrRegS02['CST'], StrRegS02['VBC'], StrRegS02['PCOFINS'], StrRegS02['VCOFINS']) else: StrS02 = 'S04|%s|\n' % inv_line.cofins_cst StrFile += StrS02 StrW = 'W|\n' StrFile += StrW StrRegW02 = { 'vBC': str("%.2f" % inv.icms_base), 'vICMS': str("%.2f" % inv.icms_value), 'vBCST': str("%.2f" % inv.icms_st_base), 'vST': str("%.2f" % inv.icms_st_value), 'vProd': str("%.2f" % inv.amount_untaxed), 'vFrete': str("%.2f" % inv.amount_freight), 'vSeg': str("%.2f" % inv.amount_insurance), 'vDesc': '0.00', 'vII': '0.00', 'vIPI': str("%.2f" % inv.ipi_value), 'vPIS': str("%.2f" % inv.pis_value), 'vCOFINS': str("%.2f" % inv.cofins_value), 'vOutro': str("%.2f" % inv.amount_costs), 'vNF': str("%.2f" % inv.amount_total), } StrW02 = 'W02|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegW02['vBC'], StrRegW02['vICMS'], StrRegW02['vBCST'], StrRegW02['vST'], StrRegW02['vProd'], StrRegW02['vFrete'], StrRegW02['vSeg'], StrRegW02['vDesc'], StrRegW02['vII'], StrRegW02['vIPI'], StrRegW02['vPIS'], StrRegW02['vCOFINS'], StrRegW02['vOutro'], StrRegW02['vNF']) StrFile += StrW02 # Modo do Frete: 0- Por conta do emitente; 1- Por conta do destinatário/remetente; 2- Por conta de terceiros; 9- Sem frete (v2.0) StrRegX0 = '0' if inv.incoterm.code == 'FOB': StrRegX0 = '0' if inv.incoterm.code == 'CIF': StrRegX0 = '1' StrX = 'X|%s|\n' % (StrRegX0) StrFile += StrX StrRegX03 = { 'XNome': '', 'IE': '', 'XEnder': '', 'UF': '', 'XMun': '', } StrX0 = '' if inv.carrier_id: #Endereço da transportadora carrier_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.carrier_id.partner_id.id], ['default']) carrier_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [carrier_addr['default']])[0] if inv.carrier_id.partner_id.legal_name: StrRegX03['XNome'] = normalize('NFKD', unicode(inv.carrier_id.partner_id.legal_name or '')).encode('ASCII', 'ignore') else: StrRegX03['XNome'] = normalize('NFKD', unicode(inv.carrier_id.partner_id.name or '')).encode('ASCII', 'ignore') StrRegX03['IE'] = inv.carrier_id.partner_id.inscr_est or '' StrRegX03['xEnder'] = normalize('NFKD', unicode(carrier_addr_default.street or '')).encode('ASCII', 'ignore') StrRegX03['UF'] = carrier_addr_default.state_id.code or '' if carrier_addr_default.l10n_br_city_id: StrRegX03['xMun'] = normalize('NFKD', unicode(carrier_addr_default.l10n_br_city_id.name or '')).encode('ASCII', 'ignore') if inv.carrier_id.partner_id.tipo_pessoa == 'J': StrX0 = 'X04|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.carrier_id.partner_id.cnpj_cpf or '')) else: StrX0 = 'X05|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.carrier_id.partner_id.cnpj_cpf or '')) StrX03 = 'X03|%s|%s|%s|%s|%s|\n' % (StrRegX03['XNome'], StrRegX03['IE'], StrRegX03['XEnder'], StrRegX03['UF'], StrRegX03['XMun']) StrFile += StrX03 StrFile += StrX0 StrRegX18 = { 'Placa': '', 'UF': '', 'RNTC': '', } if inv.vehicle_id: StrRegX18['Placa'] = inv.vehicle_id.plate or '' StrRegX18['UF'] = inv.vehicle_id.plate.state_id.code or '' StrRegX18['RNTC'] = inv.vehicle_id.rntc_code or '' StrX18 = 'X18|%s|%s|%s|\n' % (StrRegX18['Placa'], StrRegX18['UF'], StrRegX18['RNTC']) StrFile += StrX18 StrRegX26 = { 'QVol': '', 'Esp': '', 'Marca': '', 'NVol': '', 'PesoL': '', 'PesoB': '', } if inv.number_of_packages: StrRegX26['QVol'] = inv.number_of_packages StrRegX26['Esp'] = 'Volume' # TODO StrRegX26['Marca'] StrRegX26['NVol'] StrRegX26['PesoL'] = str("%.3f" % inv.weight_net) StrRegX26['PesoB'] = str("%.3f" % inv.weight) StrX26 = 'X26|%s|%s|%s|%s|%s|%s|\n' % (StrRegX26['QVol'], StrRegX26['Esp'], StrRegX26['Marca'], StrRegX26['NVol'], StrRegX26['PesoL'], StrRegX26['PesoB']) StrFile += StrX26 if inv.journal_id.revenue_expense: StrY = 'Y|\n' StrFile += StrY for line in inv.move_line_receivable_id: StrRegY07 = { 'NDup': line.name, 'DVenc': line.date_maturity or inv.date_due or inv.date_invoice, 'VDup': str("%.2f" % line.debit), } StrY07 = 'Y07|%s|%s|%s|\n' % (StrRegY07['NDup'], StrRegY07['DVenc'], StrRegY07['VDup']) StrFile += StrY07 StrRegZ = { 'InfAdFisco': '', 'InfCpl': normalize('NFKD', unicode(inv.comment or '')).encode('ASCII', 'ignore'), } StrZ = 'Z|%s|%s|\n' % (StrRegZ['InfAdFisco'], StrRegZ['InfCpl']) StrFile += StrZ self.write(cr, uid, [inv.id], {'nfe_export_date': datetime.now()}) return unicode(StrFile.encode('utf-8'), errors='replace') def nfe_export_xml(self, cr, uid, ids, context=False): nfeProc = Element('nfeProc', {'versao': '2.00', 'xmlns': 'http://www.portalfiscal.inf.br/nfe'}) for inv in self.browse(cr, uid, ids, context={'lang': 'pt_BR'}): #Endereço do company company_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default']) company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0] #MontaChave da Nota Fiscal Eletronica nfe_key = unicode(company_addr_default.state_id.ibge_code).strip().rjust(2, u'0') nfe_key += unicode(datetime.strptime(inv.date_invoice, '%Y-%m-%d').strftime(u'%y%m')).strip().rjust(4, u'0') nfe_key += '08478495000170' # unicode(inv.company_id.partner_id.cnpj_cpf).strip().rjust(14, u'0') nfe_key += inv.fiscal_document_id.code nfe_key += unicode(inv.document_serie_id.code).strip().rjust(3, u'0') nfe_key += unicode(inv.internal_number).strip().rjust(9, u'0') nfe_key += unicode('1').strip().rjust(1, u'0') # Homologação nfe_key += unicode(inv.internal_number).strip().rjust(8, u'0') nfe_key += unicode(self.nfe_dv(nfe_key)).strip().rjust(1, u'0') NFe = SubElement(nfeProc, 'NFe', {'xmlns': 'http://www.portalfiscal.inf.br/nfe'}) infNFe = SubElement(NFe, 'infNFe', {'versao': '2.00', 'Id': nfe_key}) #Dados da identificação da nota fiscal ide = SubElement(infNFe, 'ide') ide_cUF = SubElement(ide, 'cUF') ide_cUF.text = company_addr_default.state_id.ibge_code ide_cNF = SubElement(ide, 'cNF') ide_cNF.text = unicode(inv.internal_number).strip().rjust(8, u'0') ide_natOp = SubElement(ide, 'natOp') ide_natOp.text = inv.cfop_id.name ide_indPag = SubElement(ide, 'indPag') ide_indPag.text = "2" ide_mod = SubElement(ide, 'mod') ide_mod.text = inv.fiscal_document_id.code ide_serie = SubElement(ide, 'serie') ide_serie.text = inv.document_serie_id.code ide_nNF = SubElement(ide, 'nNF') ide_nNF.text = inv.internal_number ide_dEmi = SubElement(ide, 'dEmi') ide_dEmi.text = inv.date_invoice ide_dSaiEnt = SubElement(ide, 'dSaiEnt') ide_dSaiEnt.text = inv.date_invoice ide_tpNF = SubElement(ide, 'tpNF') if inv.type in ("out_invoice", "in_refuld"): ide_tpNF.text = '0' else: ide_tpNF.text = '1' ide_cMunFG = SubElement(ide, 'cMunFG') ide_cMunFG.text = ('%s%s') % (company_addr_default.state_id.ibge_code, company_addr_default.l10n_br_city_id.ibge_code) ide_tpImp = SubElement(ide, 'tpImp') ide_tpImp.text = "1" ide_tpEmis = SubElement(ide, 'tpEmis') ide_tpEmis.text = "1" ide_cDV = SubElement(ide, 'cDV') ide_cDV.text = self.nfe_dv(nfe_key) #Tipo de ambiente: 1 - Produção; 2 - Homologação ide_tpAmb = SubElement(ide, 'tpAmb') ide_tpAmb.text = "2" #Finalidade da emissão da NF-e: 1 - NFe normal 2 - NFe complementar 3 - NFe de ajuste ide_finNFe = SubElement(ide, 'finNFe') ide_finNFe.text = "1" ide_procEmi = SubElement(ide, 'procEmi') ide_procEmi.text = "0" ide_verProc = SubElement(ide, 'verProc') ide_verProc.text = "2.0.4" emit = SubElement(infNFe, 'emit') emit_CNPJ = SubElement(emit, 'CNPJ') emit_CNPJ.text = inv.company_id.partner_id.cnpj_cpf emit_xNome = SubElement(emit, 'xNome') emit_xNome.text = inv.company_id.partner_id.legal_name emit_xFant = SubElement(emit, 'xFant') emit_xFant.text = inv.company_id.partner_id.name enderEmit = SubElement(emit, 'enderEmit') enderEmit_xLgr = SubElement(enderEmit, 'xLgr') enderEmit_xLgr.text = company_addr_default.street enderEmit_nro = SubElement(enderEmit, 'nro') enderEmit_nro.text = company_addr_default.number enderEmit_xBairro = SubElement(enderEmit, 'xBairro') enderEmit_xBairro.text = company_addr_default.district enderEmit_cMun = SubElement(enderEmit, 'cMun') enderEmit_cMun.text = ('%s%s') % (company_addr_default.state_id.ibge_code, company_addr_default.l10n_br_city_id.ibge_code) enderEmit_xMun = SubElement(enderEmit, 'xMun') enderEmit_xMun.text = company_addr_default.l10n_br_city_id.name enderEmit_UF = SubElement(enderEmit, 'UF') enderEmit_UF.text = company_addr_default.state_id.code enderEmit_CEP = SubElement(enderEmit, 'CEP') enderEmit_CEP.text = company_addr_default.zip enderEmit_cPais = SubElement(enderEmit, 'cPais') enderEmit_cPais.text = company_addr_default.country_id.bc_code enderEmit_xPais = SubElement(enderEmit, 'xPais') enderEmit_xPais.text = company_addr_default.country_id.name enderEmit_fone = SubElement(enderEmit, 'fone') enderEmit_fone.text = company_addr_default.phone emit_IE = SubElement(emit, 'IE') emit_IE.text = inv.company_id.partner_id.inscr_est emit_IEST = SubElement(emit, 'IEST') emit_IEST.text = '0000000000' # FIXME emit_IM = SubElement(emit, 'IM') emit_IM.text = '0000000000' # FIXME emit_CNAE = SubElement(emit, 'CNAE') emit_CNAE.text = '0111301' # FIXME emit_CRT = SubElement(emit, 'CRT') emit_CRT.text = '3' # FIXME dest = SubElement(infNFe, 'dest') dest_CNPJ = SubElement(dest, 'CNPJ') dest_CNPJ.text = inv.partner_id.cnpj_cpf dest_xNome = SubElement(dest, 'xNome') dest_xNome.text = inv.partner_id.legal_name enderDest = SubElement(dest, 'enderDest') enderDest_xLgr = SubElement(enderDest, 'xLgr') enderDest_xLgr.text = inv.address_invoice_id.street enderDest_nro = SubElement(enderDest, 'nro') enderDest_nro.text = inv.address_invoice_id.number enderDest_xBairro = SubElement(enderDest, 'xBairro') enderDest_xBairro.text = inv.address_invoice_id.district enderDest_cMun = SubElement(enderDest, 'cMun') enderDest_cMun.text = ('%s%s') % (inv.address_invoice_id.state_id.ibge_code, inv.address_invoice_id.l10n_br_city_id.ibge_code) enderDest_xMun = SubElement(enderDest, 'xMun') enderDest_xMun.text = inv.address_invoice_id.l10n_br_city_id.name enderDest_UF = SubElement(enderDest, 'UF') enderDest_UF.text = inv.address_invoice_id.state_id.code enderDest_CEP = SubElement(enderDest, 'CEP') enderDest_CEP.text = inv.address_invoice_id.zip enderDest_cPais = SubElement(enderDest, 'cPais') enderDest_cPais.text = inv.address_invoice_id.country_id.bc_code enderDest_xPais = SubElement(enderDest, 'xPais') enderDest_xPais.text = inv.address_invoice_id.country_id.name enderDest_fone = SubElement(enderDest, 'fone') enderDest_fone.text = inv.address_invoice_id.phone dest_IE = SubElement(dest, 'IE') dest_IE.text = inv.partner_id.inscr_est i = 0 for inv_line in inv.invoice_line: i += 1 det = SubElement(infNFe, 'det', {'nItem': str(i)}) det_prod = SubElement(det, 'prod') prod_cProd = SubElement(det_prod, 'cProd') if inv_line.product_id.code: prod_cProd.text = inv_line.product_id.code else: prod_cProd.text = unicode(i).strip().rjust(4, u'0') prod_cEAN = SubElement(det_prod, 'cEAN') prod_cEAN.text = inv_line.product_id.ean13 prod_xProd = SubElement(det_prod, 'xProd') prod_xProd.text = inv_line.product_id.name prod_NCM = SubElement(det_prod, 'NCM') prod_NCM.text = inv_line.product_id.property_fiscal_classification.name prod_CFOP = SubElement(det_prod, 'CFOP') prod_CFOP.text = inv_line.cfop_id.code prod_uCom = SubElement(det_prod, 'uCom') prod_uCom.text = inv_line.uos_id.name prod_qCom = SubElement(det_prod, 'qCom') prod_qCom.text = str("%.4f" % inv_line.quantity) prod_vUnCom = SubElement(det_prod, 'vUnCom') prod_vUnCom.text = str("%.4f" % inv_line.price_unit) prod_vProd = SubElement(det_prod, 'vProd') prod_vProd.text = str("%.2f" % inv_line.price_subtotal) prod_cEANTrib = SubElement(det_prod, 'cEANTrib') #prod_vProd.text(inv_line.total) prod_uTrib = SubElement(det_prod, 'uTrib') prod_uTrib.text = inv_line.uos_id.name prod_qTrib = SubElement(det_prod, 'qTrib') prod_qTrib.text = '0.0000' # TODO prod_vUnTrib = SubElement(det_prod, 'vUnTrib') prod_vUnTrib.text = '0.00' # TODO prod_vFrete = SubElement(det_prod, 'vFrete') prod_vFrete.text = '0.00' # TODO - Valor do Frete prod_vSeg = SubElement(det_prod, 'vSeg') prod_vSeg.text = '0.00' # TODO - Valor do seguro prod_vDesc = SubElement(det_prod, 'vDesc') prod_vDesc.text = str("%.2f" % inv_line.discount) # TODO prod_vOutro = SubElement(det_prod, 'vOutro') prod_vOutro.text = '0.0000' # TODO prod_indTot = SubElement(det_prod, 'indTot') prod_indTot.text = '1' # TODO prod_imposto = SubElement(det, 'imposto') imposto_icms = SubElement(prod_imposto, 'ICMS') # + inv_line.icms_cst) imposto_icms_cst = SubElement(imposto_icms, 'ICMS%s' % (inv_line.icms_cst)) icms_orig = SubElement(imposto_icms_cst, 'orig') icms_orig.text = inv_line.product_id.origin icms_CST = SubElement(imposto_icms_cst, 'CST') icms_CST.text = inv_line.icms_cst icms_modBC = SubElement(imposto_icms_cst, 'modBC') icms_modBC.text = '0' # TODO icms_vBC = SubElement(imposto_icms_cst, 'vBC') icms_vBC.text = str("%.2f" % inv_line.icms_base) icms_pICMS = SubElement(imposto_icms_cst, 'pICMS') icms_pICMS.text = str("%.2f" % inv_line.icms_percent) icms_vICMS = SubElement(imposto_icms_cst, 'vICMS') icms_vICMS.text = str("%.2f" % inv_line.icms_value) imposto_ipi = SubElement(prod_imposto, 'IPI') icms_cEnq = SubElement(imposto_ipi, 'cEnq') icms_cEnq.text = '999' #Imposto Não Tributado ipi_IPINT = SubElement(imposto_ipi, 'IPINT') ipi_CST = SubElement(ipi_IPINT, 'CST') ipi_CST.text = inv_line.ipi_cst imposto_pis = SubElement(prod_imposto, 'PIS') pis_PISAliq = SubElement(imposto_pis, 'PISAliq') pis_CST = SubElement(pis_PISAliq, 'CST') pis_CST.text = inv_line.pis_cst pis_vBC = SubElement(pis_PISAliq, 'vBC') pis_vBC.text = str("%.2f" % inv_line.pis_base) pis_pPIS = SubElement(pis_PISAliq, 'pPIS') pis_pPIS.text = str("%.2f" % inv_line.pis_percent) pis_vPIS = SubElement(pis_PISAliq, 'vPIS') pis_vPIS.text = str("%.2f" % inv_line.pis_value) imposto_cofins = SubElement(prod_imposto, 'COFINS') cofins_COFINSAliq = SubElement(imposto_cofins, 'COFINSAliq') cofins_CST = SubElement(cofins_COFINSAliq, 'CST') cofins_CST.text = inv_line.pis_cst cofins_vBC = SubElement(cofins_COFINSAliq, 'vBC') cofins_vBC.text = str("%.2f" % inv_line.cofins_base) cofins_pCOFINS = SubElement(cofins_COFINSAliq, 'pCOFINS') cofins_pCOFINS.text = str("%.2f" % inv_line.cofins_percent) cofins_vCOFINS = SubElement(cofins_COFINSAliq, 'vCOFINS') cofins_vCOFINS.text = str("%.2f" % inv_line.cofins_value) total = SubElement(infNFe, 'total') total_ICMSTot = SubElement(total, 'ICMSTot') ICMSTot_vBC = SubElement(total_ICMSTot, 'vBC') ICMSTot_vBC.text = str("%.2f" % inv.icms_base) ICMSTot_vICMS = SubElement(total_ICMSTot, 'vICMS') ICMSTot_vICMS.text = str("%.2f" % inv.icms_value) ICMSTot_vBCST = SubElement(total_ICMSTot, 'vBCST') ICMSTot_vBCST.text = '0.00' # TODO ICMSTot_vST = SubElement(total_ICMSTot, 'vST') ICMSTot_vST.text = '0.00' # TODO ICMSTot_vProd = SubElement(total_ICMSTot, 'vProd') ICMSTot_vProd.text = str("%.2f" % inv.amount_untaxed) ICMSTot_vFrete = SubElement(total_ICMSTot, 'vFrete') ICMSTot_vFrete.text = '0.00' # TODO ICMSTot_vSeg = SubElement(total_ICMSTot, 'vSeg') ICMSTot_vSeg.text = str("%.2f" % inv.amount_insurance) ICMSTot_vDesc = SubElement(total_ICMSTot, 'vDesc') ICMSTot_vDesc.text = '0.00' # TODO ICMSTot_II = SubElement(total_ICMSTot, 'vII') ICMSTot_II.text = '0.00' # TODO ICMSTot_vIPI = SubElement(total_ICMSTot, 'vIPI') ICMSTot_vIPI.text = str("%.2f" % inv.ipi_value) ICMSTot_vPIS = SubElement(total_ICMSTot, 'vPIS') ICMSTot_vPIS.text = str("%.2f" % inv.pis_value) ICMSTot_vCOFINS = SubElement(total_ICMSTot, 'vCOFINS') ICMSTot_vCOFINS.text = str("%.2f" % inv.cofins_value) ICMSTot_vOutro = SubElement(total_ICMSTot, 'vOutro') ICMSTot_vOutro.text = str("%.2f" % inv.amount_costs) ICMSTot_vNF = SubElement(total_ICMSTot, 'vNF') ICMSTot_vNF.text = str("%.2f" % inv.amount_total) transp = SubElement(infNFe, 'transp') # Modo do Frete: 0- Por conta do emitente; 1- Por conta do destinatário/remetente; 2- Por conta de terceiros; 9- Sem frete (v2.0) transp_modFrete = SubElement(transp, 'modFrete') transp_modFrete.text = '0' # TODO if inv.carrier_id: #Endereço do company carrier_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.carrier_id.partner_id.id], ['default']) carrier_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [carrier_addr['default']])[0] transp_transporta = SubElement(transp, 'transporta') if inv.carrier_id.partner_id.tipo_pessoa == 'J': transporta_CNPJ = SubElement(transp_transporta, 'CNPJ') transporta_CNPJ.text = inv.carrier_id.partner_id.cnpj_cpf else: transporta_CPF = SubElement(transp_transporta, 'CPF') transporta_CPF.text = inv.carrier_id.partner_id.cnpj_cpf transporta_xNome = SubElement(transp_transporta, 'xNome') if inv.carrier_id.partner_id.legal_name: transporta_xNome.text = inv.carrier_id.partner_id.legal_name else: transporta_xNome.text = inv.carrier_id.partner_id.name transporta_IE = SubElement(transp_transporta, 'IE') transporta_IE.text = inv.carrier_id.partner_id.inscr_est transporta_xEnder = SubElement(transp_transporta, 'xEnder') transporta_xEnder.text = carrier_addr_default.street transporta_xMun = SubElement(transp_transporta, 'xMun') transporta_xMun.text = ('%s%s') % (carrier_addr_default.state_id.ibge_code, carrier_addr_default.l10n_br_city_id.ibge_code) transporta_UF = SubElement(transp_transporta, 'UF') transporta_UF.text = carrier_addr_default.state_id.code if inv.number_of_packages: transp_vol = SubElement(transp, 'vol') vol_qVol = SubElement(transp_vol, 'qVol') vol_qVol.text = inv.number_of_packages vol_esp = SubElement(transp_vol, 'esp') vol_esp.text = 'volume' # TODO vol_pesoL = SubElement(transp_vol, 'pesoL') vol_pesoL.text = inv.weight_net vol_pesoB = SubElement(transp_vol, 'pesoB') vol_pesoB.text = inv.weight xml_string = ElementTree.tostring(nfeProc, 'utf-8') return xml_string def onchange_partner_id(self, cr, uid, ids, type, partner_id, \ date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False, fiscal_operation_category_id=False): if self.browse(cr, uid, ids)[0].partner_id.id != partner_id: result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id, date_invoice, payment_term, partner_bank_id, company_id) result['value']['fiscal_operation_id'] = False result['value']['cfop_id'] = False result['value']['fiscal_document_id'] = False if not partner_id or not company_id or not result['value']['address_invoice_id']: return result obj_company = self.pool.get('res.company').browse(cr, uid, [company_id])[0] company_addr = self.pool.get('res.partner').address_get(cr, uid, [obj_company.partner_id.id], ['default']) company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']])[0] from_country = company_addr_default.country_id.id from_state = company_addr_default.state_id.id obj_partner = self.pool.get('res.partner').browse(cr, uid, [partner_id])[0] partner_fiscal_type = obj_partner.partner_fiscal_type_id.id partner_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [result['value']['address_invoice_id']])[0] to_country = partner_addr_default.country_id.id to_state = partner_addr_default.state_id.id fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id', '=', company_id), ('from_country', '=', from_country), ('from_state', '=', from_state), ('to_country', '=', to_country), ('to_state', '=', to_state), ('use_invoice', '=', True), ('partner_fiscal_type_id', '=', partner_fiscal_type), ('fiscal_operation_category_id', '=', fiscal_operation_category_id)]) if not fsc_pos_id: fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id', '=', company_id), ('from_country', '=', from_country), ('from_state', '=', from_state), ('to_country', '=', to_country), ('to_state', '=', to_state), ('use_invoice', '=', True), ('fiscal_operation_category_id', '=', fiscal_operation_category_id)]) if fsc_pos_id: obj_fpo_rule = self.pool.get('account.fiscal.position.rule').browse(cr, uid, fsc_pos_id)[0] obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0] obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0] result['value']['fiscal_position'] = obj_fpo.id result['value']['fiscal_operation_id'] = obj_foperation.id result['value']['cfop_id'] = obj_foperation.cfop_id.id result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id #for inv in self.browse(cr, uid, ids): # for line in inv.invoice_line: # line.cfop_id = obj_foperation.cfop_id.id #line.write(cr, uid, line.id, {'cfop_id': obj_foperation.cfop_id.id}) return result else: return True def onchange_company_id(self, cr, uid, ids, company_id, partner_id, type, invoice_line, currency_id, address_invoice_id, fiscal_operation_category_id=False): try: if self.browse(cr, uid, ids)[0].company_id.id != company_id: result = super(account_invoice, self).onchange_company_id(cr, uid, ids, company_id, partner_id, type, invoice_line, currency_id, address_invoice_id) result['value']['fiscal_operation_id'] = False result['value']['cfop_id'] = False result['value']['fiscal_document_id'] = False if not partner_id or not company_id or not address_invoice_id: return result obj_company = self.pool.get('res.company').browse(cr, uid, [company_id])[0] company_addr = self.pool.get('res.partner').address_get(cr, uid, [obj_company.partner_id.id], ['default']) company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']])[0] from_country = company_addr_default.country_id.id from_state = company_addr_default.state_id.id obj_partner = self.pool.get('res.partner').browse(cr, uid, [partner_id])[0] partner_fiscal_type = obj_partner.partner_fiscal_type_id.id if obj_partner.property_account_position.id: obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0] obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0] result['value']['fiscal_position'] = obj_fpo.id result['value']['fiscal_operation_id'] = obj_foperation.id result['value']['cfop_id'] = obj_foperation.cfop_id.id result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id return result partner_addr_invoice = self.pool.get('res.partner.address').browse(cr, uid, [address_invoice_id])[0] to_country = partner_addr_invoice.country_id.id to_state = partner_addr_invoice.state_id.id fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id', '=', company_id), ('from_country', '=', from_country), ('from_state', '=', from_state), ('to_country', '=', to_country), ('to_state', '=', to_state), ('use_invoice', '=', True), ('partner_fiscal_type_id', '=', partner_fiscal_type), ('fiscal_operation_category_id', '=', fiscal_operation_category_id)]) if not fsc_pos_id: fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id', '=', company_id), ('from_country', '=', from_country), ('from_state', '=', from_state), ('to_country', '=', to_country), ('to_state', '=', to_state), ('use_invoice', '=', True), ('fiscal_operation_category_id', '=', fiscal_operation_category_id)]) if fsc_pos_id: obj_fpo_rule = self.pool.get('account.fiscal.position.rule').browse(cr, uid, fsc_pos_id)[0] obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0] obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0] result['value']['fiscal_position'] = obj_fpo.id result['value']['fiscal_operation_id'] = obj_foperation.id result['value']['cfop_id'] = obj_foperation.cfop_id.id result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id for inv in self.browse(cr, uid, ids): for line in inv.invoice_line: line.cfop_id = obj_foperation.cfop_id.id return result except: pass result = super(account_invoice, self).onchange_company_id(cr, uid, ids, company_id, partner_id, type, invoice_line, currency_id, address_invoice_id) result['value']['fiscal_operation_id'] = False result['value']['cfop_id'] = False result['value']['fiscal_document_id'] = False return result def onchange_address_invoice_id(self, cr, uid, ids, cpy_id, ptn_id, ptn_invoice_id, fiscal_operation_category_id=False): result = super(account_invoice, self).onchange_address_invoice_id(cr, uid, ids, cpy_id, ptn_id, ptn_invoice_id) result['value']['fiscal_operation_id'] = False result['value']['cfop_id'] = False result['value']['fiscal_document_id'] = False if not ptn_id or not cpy_id or not ptn_invoice_id: return result obj_company = self.pool.get('res.company').browse(cr, uid, [cpy_id])[0] company_addr = self.pool.get('res.partner').address_get(cr, uid, [obj_company.partner_id.id], ['default']) company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']])[0] from_country = company_addr_default.country_id.id from_state = company_addr_default.state_id.id obj_partner = self.pool.get('res.partner').browse(cr, uid, [ptn_id])[0] partner_fiscal_type = obj_partner.partner_fiscal_type_id.id if obj_partner.property_account_position.id: obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0] obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0] result['value']['fiscal_position'] = obj_fpo.id result['value']['fiscal_operation_id'] = obj_foperation.id result['value']['cfop_id'] = obj_foperation.cfop_id.id result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id return result partner_addr_invoice = self.pool.get('res.partner.address').browse(cr, uid, [ptn_invoice_id])[0] to_country = partner_addr_invoice.country_id.id to_state = partner_addr_invoice.state_id.id fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id', '=', cpy_id), ('from_country', '=', from_country), ('from_state', '=', from_state), ('to_country', '=', to_country), ('to_state', '=', to_state), ('use_invoice', '=', True), ('partner_fiscal_type_id', '=', partner_fiscal_type), ('fiscal_operation_category_id', '=', fiscal_operation_category_id)]) if not fsc_pos_id: fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id', '=', cpy_id), ('from_country', '=', from_country), ('from_state', '=', from_state), ('to_country', '=', to_country), ('to_state', '=', to_state), ('use_invoice', '=', True), ('fiscal_operation_category_id', '=', fiscal_operation_category_id)]) if fsc_pos_id: obj_fpo_rule = self.pool.get('account.fiscal.position.rule').browse(cr, uid, fsc_pos_id)[0] obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0] obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0] result['value']['fiscal_position'] = obj_fpo.id result['value']['fiscal_operation_id'] = obj_foperation.id result['value']['cfop_id'] = obj_foperation.cfop_id.id result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id for inv in self.browse(cr, uid, ids): for line in inv.invoice_line: line.cfop_id = obj_foperation.cfop_id.id return result def onchange_cfop_id(self, cr, uid, ids, cfop_id): if not cfop_id: return False for inv in self.browse(cr, uid, ids): for inv_line in inv.invoice_line: self.pool.get('account.invoice.line').write(cr, uid, inv_line.id, {'cfop_id': inv.fiscal_operation_id.cfop_id.id}) return {'value': {'cfop_id': cfop_id}} account_invoice() class account_invoice_line(osv.osv): _inherit = 'account.invoice.line' def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False): result = super(account_invoice_line, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu) if context is None: context = {} if view_type == 'form': eview = etree.fromstring(result['arch']) if 'type' in context.keys(): operation_type = {'out_invoice': 'output', 'in_invoice': 'input', 'out_refund': 'input', 'in_refund': 'output'} cfops = eview.xpath("//field[@name='cfop_id']") for cfop_id in cfops: cfop_id.set('domain', "[('type', '=', '%s')]" % (operation_type[context['type']], )) cfop_id.set('required', '1') fiscal_operation_categories = eview.xpath("//field[@name='fiscal_operation_category_id']") for fiscal_operation_category_id in fiscal_operation_categories: fiscal_operation_category_id.set('domain', "[('fiscal_type', '=', 'product'), ('type', '=', '%s'), ('use_invoice', '=', True)]" % (operation_type[context['type']], )) fiscal_operation_category_id.set('required', '1') fiscal_operations = eview.xpath("//field[@name='fiscal_operation_id']") for fiscal_operation_id in fiscal_operations: fiscal_operation_id.set('domain', "[('fiscal_type', '=', 'product'), ('type', '=', '%s'), ('fiscal_operation_category_id', '=', fiscal_operation_category_id), ('use_invoice', '=', True)]" % (operation_type[context['type']], )) fiscal_operation_id.set('required', '1') if context.get('fiscal_type', False) == 'service': products = eview.xpath("//field[@name='product_id']") for product_id in products: product_id.set('domain', "[('fiscal_type', '=', '%s')]" % (context['fiscal_type'])) cfops = eview.xpath("//field[@name='cfop_id']") for cfop_id in cfops: cfop_id.set('invisible', '1') cfop_id.set('required', '0') if context['type'] in ('in_invoice', 'out_refund'): fiscal_operation_categories = eview.xpath("//field[@name='fiscal_operation_category_id']") for fiscal_operation_category_id in fiscal_operation_categories: fiscal_operation_category_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'input'), ('use_invoice', '=', True)]") fiscal_operation_category_id.set('required', '1') fiscal_operations = eview.xpath("//field[@name='fiscal_operation_id']") for fiscal_operation_id in fiscal_operations: fiscal_operation_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'input'), ('fiscal_operation_category_id', '=', fiscal_operation_category_id), ('use_invoice', '=', True)]") fiscal_operation_id.set('required', '1') if context['type'] in ('out_invoice', 'in_refund'): fiscal_operation_categories = eview.xpath("//field[@name='fiscal_operation_category_id']") for fiscal_operation_category_id in fiscal_operation_categories: fiscal_operation_category_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'output'), ('use_invoice', '=', True)]") fiscal_operation_category_id.set('required', '1') fiscal_operations = eview.xpath("//field[@name='fiscal_operation_id']") for fiscal_operation_id in fiscal_operations: fiscal_operation_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'output'), ('fiscal_operation_category_id', '=', fiscal_operation_category_id), ('use_invoice', '=', True)]") fiscal_operation_id.set('required', '1') result['arch'] = etree.tostring(eview) if view_type == 'tree': doc = etree.XML(result['arch']) nodes = doc.xpath("//field[@name='partner_id']") partner_string = _('Customer') if context.get('type', 'out_invoice') in ('in_invoice', 'in_refund'): partner_string = _('Supplier') for node in nodes: node.set('string', partner_string) result['arch'] = etree.tostring(doc) return result def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict): res = {} # super(account_invoice_line, self)._amount_line(cr, uid, ids, prop, unknow_none, unknow_dict) tax_obj = self.pool.get('account.tax') fsc_op_line_obj = self.pool.get('l10n_br_account.fiscal.operation.line') cur_obj = self.pool.get('res.currency') for line in self.browse(cr, uid, ids): res[line.id] = { 'price_subtotal': 0.0, 'price_total': 0.0, 'icms_base': 0.0, 'icms_base_other': 0.0, 'icms_value': 0.0, 'icms_percent': 0.0, 'icms_percent_reduction': 0.0, 'icms_st_value': 0.0, 'icms_st_base': 0.0, 'icms_st_percent': 0.0, 'icms_st_mva': 0.0, 'icms_st_base_other': 0.0, 'icms_cst': '40', # Coloca como isento caso não tenha ICMS 'ipi_type': 'percent', 'ipi_base': 0.0, 'ipi_base_other': 0.0, 'ipi_value': 0.0, 'ipi_percent': 0.0, 'ipi_cst': '53', # Coloca como isento caso não tenha IPI 'pis_base': 0.0, 'pis_base_other': 0.0, 'pis_value': 0.0, 'pis_percent': 0.0, 'pis_cst': '99', # Coloca como isento caso não tenha PIS 'cofins_base': 0.0, 'cofins_base_other': 0.0, 'cofins_value': 0.0, 'cofins_percent': 0.0, 'cofins_cst': '99', # Coloca como isento caso não tenha COFINS } price = line.price_unit * (1 - (line.discount or 0.0) / 100.0) taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, price, line.quantity, product=line.product_id, address_id=line.invoice_id.address_invoice_id, partner=line.invoice_id.partner_id) icms_base = 0.0 icms_base_other = 0.0 icms_value = 0.0 icms_percent = 0.0 icms_percent_reduction = 0.0 icms_st_value = 0.0 icms_st_base = 0.0 icms_st_percent = 0.0 icms_st_mva = 0.0 icms_st_base_other = 0.0 icms_cst = '40' ipi_type = 'percent' ipi_base = 0.0 ipi_base_other = 0.0 ipi_value = 0.0 ipi_percent = 0.0 ipi_cst = '53' pis_base = 0.0 pis_base_other = 0.0 pis_value = 0.0 pis_percent = 0.0 pis_cst = '99' cofins_base = 0.0 cofins_base_other = 0.0 cofins_value = 0.0 cofins_percent = 0.0 cofins_cst = '99' if line.fiscal_operation_id: fiscal_operation_ids = self.pool.get('l10n_br_account.fiscal.operation.line').search(cr, uid, [('company_id', '=', line.company_id.id), ('fiscal_operation_id', '=', line.fiscal_operation_id.id), ('fiscal_classification_id', '=', False)]) for fo_line in self.pool.get('l10n_br_account.fiscal.operation.line').browse(cr, uid, fiscal_operation_ids): if fo_line.tax_code_id.domain == 'icms': icms_cst = fo_line.cst_id.code if fo_line.tax_code_id.domain == 'ipi': ipi_cst = fo_line.cst_id.code if fo_line.tax_code_id.domain == 'pis': pis_cst = fo_line.cst_id.code if fo_line.tax_code_id.domain == 'cofins': cofins_cst = fo_line.cst_id.code if line.product_id: fo_ids_ncm = self.pool.get('l10n_br_account.fiscal.operation.line').search(cr, uid, [('company_id', '=', line.company_id.id), ('fiscal_operation_id', '=', line.fiscal_operation_id.id), ('fiscal_classification_id', '=', line.product_id.property_fiscal_classification.id)]) for fo_line_ncm in self.pool.get('l10n_br_account.fiscal.operation.line').browse(cr, uid, fo_ids_ncm): if fo_line_ncm.tax_code_id.domain == 'icms': icms_cst = fo_line_ncm.cst_id.code if fo_line_ncm.tax_code_id.domain == 'ipi': ipi_cst = fo_line_ncm.cst_id.code if fo_line_ncm.tax_code_id.domain == 'pis': pis_cst = fo_line_ncm.cst_id.code if fo_line_ncm.tax_code_id.domain == 'cofins': cofins_cst = fo_line_ncm.cst_id.code for tax in taxes['taxes']: fsc_op_line_ids = 0 fsc_fp_tax_ids = 0 tax_brw = tax_obj.browse(cr, uid, tax['id']) if tax_brw.domain == 'icms': icms_base += tax['total_base'] icms_base_other += taxes['total'] - tax['total_base'] icms_value += tax['amount'] icms_percent += tax_brw.amount * 100 icms_percent_reduction += tax_brw.base_reduction * 100 if tax_brw.domain == 'ipi': ipi_type = tax_brw.type ipi_base += tax['total_base'] ipi_value += tax['amount'] ipi_percent += tax_brw.amount * 100 if tax_brw.domain == 'pis': pis_base += tax['total_base'] pis_base_other += taxes['total'] - tax['total_base'] pis_value += tax['amount'] pis_percent += tax_brw.amount * 100 if tax_brw.domain == 'cofins': cofins_base += tax['total_base'] cofins_base_other += taxes['total'] - tax['total_base'] cofins_value += tax['amount'] cofins_percent += tax_brw.amount * 100 if tax_brw.domain == 'icmsst': icms_st_value += tax['amount'] icms_st_base += tax['total_base'] #cst do tipo pauta #icms_st_percent += icms_value icms_st_mva += tax_brw.amount_mva * 100 icms_st_base_other += 0 res[line.id] = { 'price_subtotal': taxes['total'], 'price_total': taxes['total'], 'icms_base': icms_base, 'icms_base_other': icms_base_other, 'icms_value': icms_value, 'icms_percent': icms_percent, 'icms_percent_reduction': icms_percent_reduction, 'icms_st_value': icms_st_value, 'icms_st_base': icms_st_base, 'icms_st_percent': icms_st_percent, 'icms_st_mva': icms_st_mva, 'icms_st_base_other': icms_st_base_other, 'icms_cst': icms_cst, 'ipi_type': ipi_type, 'ipi_base': ipi_base, 'ipi_base_other': ipi_base_other, 'ipi_value': ipi_value, 'ipi_percent': ipi_percent, 'ipi_cst': ipi_cst, 'pis_base': pis_base, 'pis_base_other': pis_base_other, 'pis_value': pis_value, 'pis_percent': pis_percent, 'pis_cst': pis_cst, 'cofins_base': cofins_base, 'cofins_base_other': cofins_base_other, 'cofins_value': cofins_value, 'cofins_percent': cofins_percent, 'cofins_cst': cofins_cst, } if line.invoice_id: cur = line.invoice_id.currency_id res[line.id] = { 'price_subtotal': cur_obj.round(cr, uid, cur, res[line.id]['price_subtotal']), 'price_total': cur_obj.round(cr, uid, cur, res[line.id]['price_total']), 'icms_base': cur_obj.round(cr, uid, cur, icms_base), 'icms_base_other': cur_obj.round(cr, uid, cur, icms_base_other), 'icms_value': cur_obj.round(cr, uid, cur, icms_value), 'icms_percent': icms_percent, 'icms_percent_reduction': icms_percent_reduction, 'icms_st_value': cur_obj.round(cr, uid, cur, icms_st_value), 'icms_st_base': cur_obj.round(cr, uid, cur, icms_st_base), 'icms_st_percent': icms_st_percent, 'icms_st_mva': icms_st_mva, 'icms_st_base_other': cur_obj.round(cr, uid, cur, icms_st_base_other), 'icms_cst': icms_cst, 'ipi_type': ipi_type, 'ipi_base': cur_obj.round(cr, uid, cur, ipi_base), 'ipi_base_other': cur_obj.round(cr, uid, cur, ipi_base_other), 'ipi_value': cur_obj.round(cr, uid, cur, ipi_value), 'ipi_percent': ipi_percent, 'ipi_cst': ipi_cst, 'pis_base': cur_obj.round(cr, uid, cur, pis_base), 'pis_base_other': cur_obj.round(cr, uid, cur, pis_base_other), 'pis_value': cur_obj.round(cr, uid, cur, pis_value), 'pis_percent': pis_percent, 'pis_cst': pis_cst, 'cofins_base': cur_obj.round(cr, uid, cur, cofins_base), 'cofins_base_other': cur_obj.round(cr, uid, cur, cofins_base_other), 'cofins_value': cur_obj.round(cr, uid, cur, cofins_value), 'cofins_percent': cofins_percent, 'cofins_cst': cofins_cst, } return res _columns = { 'fiscal_operation_category_id': fields.many2one('l10n_br_account.fiscal.operation.category', 'Categoria', readonly=True, states={'draft': [('readonly', False)]}), 'fiscal_operation_id': fields.many2one('l10n_br_account.fiscal.operation', 'Operação Fiscal', domain="[('fiscal_operation_category_id', '=', fiscal_operation_category_id)]", readonly=True, states={'draft': [('readonly', False)]}), 'cfop_id': fields.many2one('l10n_br_account.cfop', 'CFOP'), 'price_subtotal': fields.function(_amount_line, method=True, string='Subtotal', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'price_total': fields.function(_amount_line, method=True, string='Total', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'icms_base': fields.function(_amount_line, method=True, string='Base ICMS', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'icms_base_other': fields.function(_amount_line, method=True, string='Base ICMS Outras', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'icms_value': fields.function(_amount_line, method=True, string='Valor ICMS', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'icms_percent': fields.function(_amount_line, method=True, string='Perc ICMS', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'icms_percent_reduction': fields.function(_amount_line, method=True, string='Perc Redução de Base ICMS', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'icms_st_value': fields.function(_amount_line, method=True, string='Valor ICMS ST', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'icms_st_base': fields.function(_amount_line, method=True, string='Base ICMS ST', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'icms_st_percent': fields.function(_amount_line, method=True, string='Percentual ICMS ST', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'icms_st_mva': fields.function(_amount_line, method=True, string='MVA ICMS ST', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'icms_st_base_other': fields.function(_amount_line, method=True, string='Base ICMS ST Outras', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'icms_cst': fields.function(_amount_line, method=True, string='CST ICMS', type="char", size=3, store=True, multi='all'), 'ipi_type': fields.function(_amount_line, method=True, string='Tipo do IPI', type="char", size=64, store=True, multi='all'), 'ipi_base': fields.function(_amount_line, method=True, string='Base IPI', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'ipi_base_other': fields.function(_amount_line, method=True, string='Base IPI Outras', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'ipi_value': fields.function(_amount_line, method=True, string='Valor IPI', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'ipi_percent': fields.function(_amount_line, method=True, string='Perc IPI', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'ipi_cst': fields.function(_amount_line, method=True, string='CST IPI', type="char", size=2, store=True, multi='all'), 'pis_base': fields.function(_amount_line, method=True, string='Base PIS', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'pis_base_other': fields.function(_amount_line, method=True, string='Base PIS Outras', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'pis_value': fields.function(_amount_line, method=True, string='Valor PIS', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'pis_percent': fields.function(_amount_line, method=True, string='Perc PIS', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'pis_cst': fields.function(_amount_line, method=True, string='CST PIS', type="char", size=2, store=True, multi='all'), 'cofins_base': fields.function(_amount_line, method=True, string='Base COFINS', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'cofins_base_other': fields.function(_amount_line, method=True, string='Base COFINS Outras', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'cofins_value': fields.function(_amount_line, method=True, string='Valor COFINS', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'cofins_percent': fields.function(_amount_line, method=True, string='Perc COFINS', type="float", digits_compute=dp.get_precision('Account'), store=True, multi='all'), 'cofins_cst': fields.function(_amount_line, method=True, string='Valor COFINS', type="char", size=2, store=True, multi='all'), } def product_id_change(self, cr, uid, ids, product, uom, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, address_invoice_id=False, currency_id=False, context=None, cfop_id=False): result = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom, qty, name, type, partner_id, fposition_id, price_unit, address_invoice_id, currency_id, context) if not cfop_id: return result result['value']['cfop_id'] = cfop_id result['value']['fiscal_operation_category_id'] = cfop_id result['value']['fiscal_operation_id'] = cfop_id return result account_invoice_line() class account_invoice_tax(osv.osv): _inherit = "account.invoice.tax" _description = "Invoice Tax" def compute(self, cr, uid, invoice_id, context={}): tax_grouped = {} tax_obj = self.pool.get('account.tax') cur_obj = self.pool.get('res.currency') inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context) cur = inv.currency_id company_currency = inv.company_id.currency_id.id for line in inv.invoice_line: taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, (line.price_unit * (1 - (line.discount or 0.0) / 100.0)), line.quantity, inv.address_invoice_id.id, line.product_id, inv.partner_id) for tax in taxes['taxes']: val = {} val['invoice_id'] = inv.id val['name'] = tax['name'] val['amount'] = tax['amount'] val['manual'] = False val['sequence'] = tax['sequence'] val['base'] = tax['total_base'] if inv.type in ('out_invoice', 'in_invoice'): val['base_code_id'] = tax['base_code_id'] val['tax_code_id'] = tax['tax_code_id'] val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False) val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False) val['account_id'] = tax['account_collected_id'] or line.account_id.id else: val['base_code_id'] = tax['ref_base_code_id'] val['tax_code_id'] = tax['ref_tax_code_id'] val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['ref_base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False) val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['ref_tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False) val['account_id'] = tax['account_paid_id'] or line.account_id.id key = (val['tax_code_id'], val['base_code_id'], val['account_id']) if not key in tax_grouped: tax_grouped[key] = val else: tax_grouped[key]['amount'] += val['amount'] tax_grouped[key]['base'] += val['base'] tax_grouped[key]['base_amount'] += val['base_amount'] tax_grouped[key]['tax_amount'] += val['tax_amount'] for t in tax_grouped.values(): t['base'] = cur_obj.round(cr, uid, cur, t['base']) t['amount'] = cur_obj.round(cr, uid, cur, t['amount']) t['base_amount'] = cur_obj.round(cr, uid, cur, t['base_amount']) t['tax_amount'] = cur_obj.round(cr, uid, cur, t['tax_amount']) return tax_grouped account_invoice_tax()
codeparrot/github-code-clean
#!/bin/env python """ This file defines a set of system_info classes for getting information about various resources (libraries, library directories, include directories, etc.) in the system. Currently, the following classes are available: atlas_info atlas_threads_info atlas_blas_info atlas_blas_threads_info lapack_atlas_info lapack_atlas_threads_info atlas_3_10_info atlas_3_10_threads_info atlas_3_10_blas_info, atlas_3_10_blas_threads_info, lapack_atlas_3_10_info lapack_atlas_3_10_threads_info blas_info lapack_info openblas_info blas_opt_info # usage recommended lapack_opt_info # usage recommended fftw_info,dfftw_info,sfftw_info fftw_threads_info,dfftw_threads_info,sfftw_threads_info djbfft_info x11_info lapack_src_info blas_src_info numpy_info numarray_info numpy_info boost_python_info agg2_info wx_info gdk_pixbuf_xlib_2_info gdk_pixbuf_2_info gdk_x11_2_info gtkp_x11_2_info gtkp_2_info xft_info freetype2_info umfpack_info Usage: info_dict = get_info(<name>) where <name> is a string 'atlas','x11','fftw','lapack','blas', 'lapack_src', 'blas_src', etc. For a complete list of allowed names, see the definition of get_info() function below. Returned info_dict is a dictionary which is compatible with distutils.setup keyword arguments. If info_dict == {}, then the asked resource is not available (system_info could not find it). Several *_info classes specify an environment variable to specify the locations of software. When setting the corresponding environment variable to 'None' then the software will be ignored, even when it is available in system. Global parameters: system_info.search_static_first - search static libraries (.a) in precedence to shared ones (.so, .sl) if enabled. system_info.verbosity - output the results to stdout if enabled. The file 'site.cfg' is looked for in 1) Directory of main setup.py file being run. 2) Home directory of user running the setup.py file as ~/.numpy-site.cfg 3) System wide directory (location of this file...) The first one found is used to get system configuration options The format is that used by ConfigParser (i.e., Windows .INI style). The section ALL has options that are the default for each section. The available sections are fftw, atlas, and x11. Appropiate defaults are used if nothing is specified. The order of finding the locations of resources is the following: 1. environment variable 2. section in site.cfg 3. ALL section in site.cfg Only the first complete match is returned. Example: ---------- [ALL] library_dirs = /usr/lib:/usr/local/lib:/opt/lib include_dirs = /usr/include:/usr/local/include:/opt/include src_dirs = /usr/local/src:/opt/src # search static libraries (.a) in preference to shared ones (.so) search_static_first = 0 [fftw] fftw_libs = rfftw, fftw fftw_opt_libs = rfftw_threaded, fftw_threaded # if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs [atlas] library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas # for overriding the names of the atlas libraries atlas_libs = lapack, f77blas, cblas, atlas [x11] library_dirs = /usr/X11R6/lib include_dirs = /usr/X11R6/include ---------- Authors: Pearu Peterson <pearu@cens.ioc.ee>, February 2002 David M. Cooke <cookedm@physics.mcmaster.ca>, April 2002 Copyright 2002 Pearu Peterson all rights reserved, Pearu Peterson <pearu@cens.ioc.ee> Permission to use, modify, and distribute this software is given under the terms of the NumPy (BSD style) license. See LICENSE.txt that came with this distribution for specifics. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ from __future__ import division, absolute_import, print_function import sys import os import re import copy import warnings from glob import glob from functools import reduce if sys.version_info[0] < 3: from ConfigParser import NoOptionError, ConfigParser else: from configparser import NoOptionError, ConfigParser from distutils.errors import DistutilsError from distutils.dist import Distribution import distutils.sysconfig from distutils import log from distutils.util import get_platform from numpy.distutils.exec_command import \ find_executable, exec_command, get_pythonexe from numpy.distutils.misc_util import is_sequence, is_string, \ get_shared_lib_extension from numpy.distutils.command.config import config as cmd_config from numpy.distutils.compat import get_exception import distutils.ccompiler import tempfile import shutil # Determine number of bits import platform _bits = {'32bit': 32, '64bit': 64} platform_bits = _bits[platform.architecture()[0]] def libpaths(paths, bits): """Return a list of library paths valid on 32 or 64 bit systems. Inputs: paths : sequence A sequence of strings (typically paths) bits : int An integer, the only valid values are 32 or 64. A ValueError exception is raised otherwise. Examples: Consider a list of directories >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] For a 32-bit platform, this is already valid: >>> np.distutils.system_info.libpaths(paths,32) ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] On 64 bits, we prepend the '64' postfix >>> np.distutils.system_info.libpaths(paths,64) ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', '/usr/lib64', '/usr/lib'] """ if bits not in (32, 64): raise ValueError("Invalid bit size in libpaths: 32 or 64 only") # Handle 32bit case if bits == 32: return paths # Handle 64bit case out = [] for p in paths: out.extend([p + '64', p]) return out if sys.platform == 'win32': default_lib_dirs = ['C:\\', os.path.join(distutils.sysconfig.EXEC_PREFIX, 'libs')] default_runtime_dirs = [] default_include_dirs = [] default_src_dirs = ['.'] default_x11_lib_dirs = [] default_x11_include_dirs = [] else: default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', '/opt/local/lib', '/sw/lib'], platform_bits) default_runtime_dirs = [] default_include_dirs = ['/usr/local/include', '/opt/include', '/usr/include', # path of umfpack under macports '/opt/local/include/ufsparse', '/opt/local/include', '/sw/include', '/usr/include/suitesparse'] default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'], platform_bits) default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include', '/usr/include'] if os.path.exists('/usr/lib/X11'): globbed_x11_dir = glob('/usr/lib/*/libX11.so') if globbed_x11_dir: x11_so_dir = os.path.split(globbed_x11_dir[0])[0] default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) default_x11_include_dirs.extend(['/usr/lib/X11/include', '/usr/include/X11']) import subprocess as sp tmp = None try: # Explicitly open/close file to avoid ResourceWarning when # tests are run in debug mode Python 3. tmp = open(os.devnull, 'w') p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE, stderr=tmp) except (OSError, DistutilsError): # OSError if gcc is not installed, or SandboxViolation (DistutilsError # subclass) if an old setuptools bug is triggered (see gh-3160). pass else: triplet = str(p.communicate()[0].decode().strip()) if p.returncode == 0: # gcc supports the "-print-multiarch" option default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] default_lib_dirs += [os.path.join("/usr/lib/", triplet)] finally: if tmp is not None: tmp.close() if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) default_include_dirs.append(os.path.join(sys.prefix, 'include')) default_src_dirs.append(os.path.join(sys.prefix, 'src')) default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] so_ext = get_shared_lib_extension() def get_standard_file(fname): """Returns a list of files named 'fname' from 1) System-wide directory (directory-location of this module) 2) Users HOME directory (os.environ['HOME']) 3) Local directory """ # System-wide file filenames = [] try: f = __file__ except NameError: f = sys.argv[0] else: sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], fname) if os.path.isfile(sysfile): filenames.append(sysfile) # Home directory # And look for the user config file try: f = os.path.expanduser('~') except KeyError: pass else: user_file = os.path.join(f, fname) if os.path.isfile(user_file): filenames.append(user_file) # Local file if os.path.isfile(fname): filenames.append(os.path.abspath(fname)) return filenames def get_info(name, notfound_action=0): """ notfound_action: 0 - do nothing 1 - display warning message 2 - raise error """ cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead 'atlas_threads': atlas_threads_info, # ditto 'atlas_blas': atlas_blas_info, 'atlas_blas_threads': atlas_blas_threads_info, 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto 'atlas_3_10_blas': atlas_3_10_blas_info, 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto 'mkl': mkl_info, # openblas which may or may not have embedded lapack 'openblas': openblas_info, # use blas_opt instead # openblas with embedded lapack 'openblas_lapack': openblas_lapack_info, # use blas_opt instead 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead 'blas_mkl': blas_mkl_info, # use blas_opt instead 'x11': x11_info, 'fft_opt': fft_opt_info, 'fftw': fftw_info, 'fftw2': fftw2_info, 'fftw3': fftw3_info, 'dfftw': dfftw_info, 'sfftw': sfftw_info, 'fftw_threads': fftw_threads_info, 'dfftw_threads': dfftw_threads_info, 'sfftw_threads': sfftw_threads_info, 'djbfft': djbfft_info, 'blas': blas_info, # use blas_opt instead 'lapack': lapack_info, # use lapack_opt instead 'lapack_src': lapack_src_info, 'blas_src': blas_src_info, 'numpy': numpy_info, 'f2py': f2py_info, 'Numeric': Numeric_info, 'numeric': Numeric_info, 'numarray': numarray_info, 'numerix': numerix_info, 'lapack_opt': lapack_opt_info, 'blas_opt': blas_opt_info, 'boost_python': boost_python_info, 'agg2': agg2_info, 'wx': wx_info, 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, 'gdk_pixbuf_2': gdk_pixbuf_2_info, 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, 'gdk': gdk_info, 'gdk_2': gdk_2_info, 'gdk-2.0': gdk_2_info, 'gdk_x11_2': gdk_x11_2_info, 'gdk-x11-2.0': gdk_x11_2_info, 'gtkp_x11_2': gtkp_x11_2_info, 'gtk+-x11-2.0': gtkp_x11_2_info, 'gtkp_2': gtkp_2_info, 'gtk+-2.0': gtkp_2_info, 'xft': xft_info, 'freetype2': freetype2_info, 'umfpack': umfpack_info, 'amd': amd_info, }.get(name.lower(), system_info) return cl().get_info(notfound_action) class NotFoundError(DistutilsError): """Some third-party program or library is not found.""" class AtlasNotFoundError(NotFoundError): """ Atlas (http://math-atlas.sourceforge.net/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [atlas]) or by setting the ATLAS environment variable.""" class LapackNotFoundError(NotFoundError): """ Lapack (http://www.netlib.org/lapack/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [lapack]) or by setting the LAPACK environment variable.""" class LapackSrcNotFoundError(LapackNotFoundError): """ Lapack (http://www.netlib.org/lapack/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [lapack_src]) or by setting the LAPACK_SRC environment variable.""" class BlasNotFoundError(NotFoundError): """ Blas (http://www.netlib.org/blas/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable.""" class BlasSrcNotFoundError(BlasNotFoundError): """ Blas (http://www.netlib.org/blas/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [blas_src]) or by setting the BLAS_SRC environment variable.""" class FFTWNotFoundError(NotFoundError): """ FFTW (http://www.fftw.org/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [fftw]) or by setting the FFTW environment variable.""" class DJBFFTNotFoundError(NotFoundError): """ DJBFFT (http://cr.yp.to/djbfft.html) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [djbfft]) or by setting the DJBFFT environment variable.""" class NumericNotFoundError(NotFoundError): """ Numeric (http://www.numpy.org/) module not found. Get it from above location, install it, and retry setup.py.""" class X11NotFoundError(NotFoundError): """X11 libraries not found.""" class UmfpackNotFoundError(NotFoundError): """ UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/) not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [umfpack]) or by setting the UMFPACK environment variable.""" class system_info(object): """ get_info() is the only public method. Don't use others. """ section = 'ALL' dir_env_var = None search_static_first = 0 # XXX: disabled by default, may disappear in # future unless it is proved to be useful. verbosity = 1 saved_results = {} notfounderror = NotFoundError def __init__(self, default_lib_dirs=default_lib_dirs, default_include_dirs=default_include_dirs, verbosity=1, ): self.__class__.info = {} self.local_prefixes = [] defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), 'include_dirs': os.pathsep.join(default_include_dirs), 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), 'rpath': '', 'src_dirs': os.pathsep.join(default_src_dirs), 'search_static_first': str(self.search_static_first), 'extra_compile_args': '', 'extra_link_args': ''} self.cp = ConfigParser(defaults) self.files = [] self.files.extend(get_standard_file('.numpy-site.cfg')) self.files.extend(get_standard_file('site.cfg')) self.parse_config_files() if self.section is not None: self.search_static_first = self.cp.getboolean( self.section, 'search_static_first') assert isinstance(self.search_static_first, int) def parse_config_files(self): self.cp.read(self.files) if not self.cp.has_section(self.section): if self.section is not None: self.cp.add_section(self.section) def calc_libraries_info(self): libs = self.get_libraries() dirs = self.get_lib_dirs() # The extensions use runtime_library_dirs r_dirs = self.get_runtime_lib_dirs() # Intrinsic distutils use rpath, we simply append both entries # as though they were one entry r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) info = {} for lib in libs: i = self.check_libs(dirs, [lib]) if i is not None: dict_append(info, **i) else: log.info('Library %s was not found. Ignoring' % (lib)) i = self.check_libs(r_dirs, [lib]) if i is not None: # Swap library keywords found to runtime_library_dirs # the libraries are insisting on the user having defined # them using the library_dirs, and not necessarily by # runtime_library_dirs del i['libraries'] i['runtime_library_dirs'] = i.pop('library_dirs') dict_append(info, **i) else: log.info('Runtime library %s was not found. Ignoring' % (lib)) return info def set_info(self, **info): if info: lib_info = self.calc_libraries_info() dict_append(info, **lib_info) # Update extra information extra_info = self.calc_extra_info() dict_append(info, **extra_info) self.saved_results[self.__class__.__name__] = info def has_info(self): return self.__class__.__name__ in self.saved_results def calc_extra_info(self): """ Updates the information in the current information with respect to these flags: extra_compile_args extra_link_args """ info = {} for key in ['extra_compile_args', 'extra_link_args']: # Get values opt = self.cp.get(self.section, key) if opt: tmp = {key : [opt]} dict_append(info, **tmp) return info def get_info(self, notfound_action=0): """ Return a dictonary with items that are compatible with numpy.distutils.setup keyword arguments. """ flag = 0 if not self.has_info(): flag = 1 log.info(self.__class__.__name__ + ':') if hasattr(self, 'calc_info'): self.calc_info() if notfound_action: if not self.has_info(): if notfound_action == 1: warnings.warn(self.notfounderror.__doc__) elif notfound_action == 2: raise self.notfounderror(self.notfounderror.__doc__) else: raise ValueError(repr(notfound_action)) if not self.has_info(): log.info(' NOT AVAILABLE') self.set_info() else: log.info(' FOUND:') res = self.saved_results.get(self.__class__.__name__) if self.verbosity > 0 and flag: for k, v in res.items(): v = str(v) if k in ['sources', 'libraries'] and len(v) > 270: v = v[:120] + '...\n...\n...' + v[-120:] log.info(' %s = %s', k, v) log.info('') return copy.deepcopy(res) def get_paths(self, section, key): dirs = self.cp.get(section, key).split(os.pathsep) env_var = self.dir_env_var if env_var: if is_sequence(env_var): e0 = env_var[-1] for e in env_var: if e in os.environ: e0 = e break if not env_var[0] == e0: log.info('Setting %s=%s' % (env_var[0], e0)) env_var = e0 if env_var and env_var in os.environ: d = os.environ[env_var] if d == 'None': log.info('Disabled %s: %s', self.__class__.__name__, '(%s is None)' % (env_var,)) return [] if os.path.isfile(d): dirs = [os.path.dirname(d)] + dirs l = getattr(self, '_lib_names', []) if len(l) == 1: b = os.path.basename(d) b = os.path.splitext(b)[0] if b[:3] == 'lib': log.info('Replacing _lib_names[0]==%r with %r' \ % (self._lib_names[0], b[3:])) self._lib_names[0] = b[3:] else: ds = d.split(os.pathsep) ds2 = [] for d in ds: if os.path.isdir(d): ds2.append(d) for dd in ['include', 'lib']: d1 = os.path.join(d, dd) if os.path.isdir(d1): ds2.append(d1) dirs = ds2 + dirs default_dirs = self.cp.get(self.section, key).split(os.pathsep) dirs.extend(default_dirs) ret = [] for d in dirs: if not os.path.isdir(d): warnings.warn('Specified path %s is invalid.' % d) continue if d not in ret: ret.append(d) log.debug('( %s = %s )', key, ':'.join(ret)) return ret def get_lib_dirs(self, key='library_dirs'): return self.get_paths(self.section, key) def get_runtime_lib_dirs(self, key='runtime_library_dirs'): return self.get_paths(self.section, key) def get_include_dirs(self, key='include_dirs'): return self.get_paths(self.section, key) def get_src_dirs(self, key='src_dirs'): return self.get_paths(self.section, key) def get_libs(self, key, default): try: libs = self.cp.get(self.section, key) except NoOptionError: if not default: return [] if is_string(default): return [default] return default return [b for b in [a.strip() for a in libs.split(',')] if b] def get_libraries(self, key='libraries'): return self.get_libs(key, '') def library_extensions(self): static_exts = ['.a'] if sys.platform == 'win32': static_exts.append('.lib') # .lib is used by MSVC if self.search_static_first: exts = static_exts + [so_ext] else: exts = [so_ext] + static_exts if sys.platform == 'cygwin': exts.append('.dll.a') if sys.platform == 'darwin': exts.append('.dylib') return exts def check_libs(self, lib_dirs, libs, opt_libs=[]): """If static or shared libraries are available then return their info dictionary. Checks for all libraries as shared libraries first, then static (or vice versa if self.search_static_first is True). """ exts = self.library_extensions() info = None for ext in exts: info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) if info is not None: break if not info: log.info(' libraries %s not found in %s', ','.join(libs), lib_dirs) return info def check_libs2(self, lib_dirs, libs, opt_libs=[]): """If static or shared libraries are available then return their info dictionary. Checks each library for shared or static. """ exts = self.library_extensions() info = self._check_libs(lib_dirs, libs, opt_libs, exts) if not info: log.info(' libraries %s not found in %s', ','.join(libs), lib_dirs) return info def _lib_list(self, lib_dir, libs, exts): assert is_string(lib_dir) liblist = [] # under windows first try without 'lib' prefix if sys.platform == 'win32': lib_prefixes = ['', 'lib'] else: lib_prefixes = ['lib'] # for each library name, see if we can find a file for it. for l in libs: for ext in exts: for prefix in lib_prefixes: p = self.combine_paths(lib_dir, prefix + l + ext) if p: break if p: assert len(p) == 1 # ??? splitext on p[0] would do this for cygwin # doesn't seem correct if ext == '.dll.a': l += '.dll' liblist.append(l) break return liblist def _check_libs(self, lib_dirs, libs, opt_libs, exts): """Find mandatory and optional libs in expected paths. Missing optional libraries are silently forgotten. """ # First, try to find the mandatory libraries if is_sequence(lib_dirs): found_libs, found_dirs = [], [] for dir_ in lib_dirs: found_libs1 = self._lib_list(dir_, libs, exts) # It's possible that we'll find the same library in multiple # directories. It's also possible that we'll find some # libraries on in directory, and some in another. So the # obvious thing would be to use a set instead of a list, but I # don't know if preserving order matters (does it?). for found_lib in found_libs1: if found_lib not in found_libs: found_libs.append(found_lib) if dir_ not in found_dirs: found_dirs.append(dir_) else: found_libs = self._lib_list(lib_dirs, libs, exts) found_dirs = [lib_dirs] if len(found_libs) > 0 and len(found_libs) == len(libs): info = {'libraries': found_libs, 'library_dirs': found_dirs} # Now, check for optional libraries if is_sequence(lib_dirs): for dir_ in lib_dirs: opt_found_libs = self._lib_list(dir_, opt_libs, exts) if opt_found_libs: if dir_ not in found_dirs: found_dirs.extend(dir_) found_libs.extend(opt_found_libs) else: opt_found_libs = self._lib_list(lib_dirs, opt_libs, exts) if opt_found_libs: found_libs.extend(opt_found_libs) return info else: return None def combine_paths(self, *args): """Return a list of existing paths composed by all combinations of items from the arguments. """ return combine_paths(*args, **{'verbosity': self.verbosity}) class fft_opt_info(system_info): def calc_info(self): info = {} fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') djbfft_info = get_info('djbfft') if fftw_info: dict_append(info, **fftw_info) if djbfft_info: dict_append(info, **djbfft_info) self.set_info(**info) return class fftw_info(system_info): #variables to override section = 'fftw' dir_env_var = 'FFTW' notfounderror = FFTWNotFoundError ver_info = [{'name':'fftw3', 'libs':['fftw3'], 'includes':['fftw3.h'], 'macros':[('SCIPY_FFTW3_H', None)]}, {'name':'fftw2', 'libs':['rfftw', 'fftw'], 'includes':['fftw.h', 'rfftw.h'], 'macros':[('SCIPY_FFTW_H', None)]}] def calc_ver_info(self, ver_param): """Returns True on successful version detection, else False""" lib_dirs = self.get_lib_dirs() incl_dirs = self.get_include_dirs() incl_dir = None libs = self.get_libs(self.section + '_libs', ver_param['libs']) info = self.check_libs(lib_dirs, libs) if info is not None: flag = 0 for d in incl_dirs: if len(self.combine_paths(d, ver_param['includes'])) \ == len(ver_param['includes']): dict_append(info, include_dirs=[d]) flag = 1 incl_dirs = [d] break if flag: dict_append(info, define_macros=ver_param['macros']) else: info = None if info is not None: self.set_info(**info) return True else: log.info(' %s not found' % (ver_param['name'])) return False def calc_info(self): for i in self.ver_info: if self.calc_ver_info(i): break class fftw2_info(fftw_info): #variables to override section = 'fftw' dir_env_var = 'FFTW' notfounderror = FFTWNotFoundError ver_info = [{'name':'fftw2', 'libs':['rfftw', 'fftw'], 'includes':['fftw.h', 'rfftw.h'], 'macros':[('SCIPY_FFTW_H', None)]} ] class fftw3_info(fftw_info): #variables to override section = 'fftw3' dir_env_var = 'FFTW3' notfounderror = FFTWNotFoundError ver_info = [{'name':'fftw3', 'libs':['fftw3'], 'includes':['fftw3.h'], 'macros':[('SCIPY_FFTW3_H', None)]}, ] class dfftw_info(fftw_info): section = 'fftw' dir_env_var = 'FFTW' ver_info = [{'name':'dfftw', 'libs':['drfftw', 'dfftw'], 'includes':['dfftw.h', 'drfftw.h'], 'macros':[('SCIPY_DFFTW_H', None)]}] class sfftw_info(fftw_info): section = 'fftw' dir_env_var = 'FFTW' ver_info = [{'name':'sfftw', 'libs':['srfftw', 'sfftw'], 'includes':['sfftw.h', 'srfftw.h'], 'macros':[('SCIPY_SFFTW_H', None)]}] class fftw_threads_info(fftw_info): section = 'fftw' dir_env_var = 'FFTW' ver_info = [{'name':'fftw threads', 'libs':['rfftw_threads', 'fftw_threads'], 'includes':['fftw_threads.h', 'rfftw_threads.h'], 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] class dfftw_threads_info(fftw_info): section = 'fftw' dir_env_var = 'FFTW' ver_info = [{'name':'dfftw threads', 'libs':['drfftw_threads', 'dfftw_threads'], 'includes':['dfftw_threads.h', 'drfftw_threads.h'], 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] class sfftw_threads_info(fftw_info): section = 'fftw' dir_env_var = 'FFTW' ver_info = [{'name':'sfftw threads', 'libs':['srfftw_threads', 'sfftw_threads'], 'includes':['sfftw_threads.h', 'srfftw_threads.h'], 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] class djbfft_info(system_info): section = 'djbfft' dir_env_var = 'DJBFFT' notfounderror = DJBFFTNotFoundError def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): lib_dirs = self.get_lib_dirs() incl_dirs = self.get_include_dirs() info = None for d in lib_dirs: p = self.combine_paths(d, ['djbfft.a']) if p: info = {'extra_objects': p} break p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) if p: info = {'libraries': ['djbfft'], 'library_dirs': [d]} break if info is None: return for d in incl_dirs: if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: dict_append(info, include_dirs=[d], define_macros=[('SCIPY_DJBFFT_H', None)]) self.set_info(**info) return return class mkl_info(system_info): section = 'mkl' dir_env_var = 'MKL' _lib_mkl = ['mkl', 'vml', 'guide'] def get_mkl_rootdir(self): mklroot = os.environ.get('MKLROOT', None) if mklroot is not None: return mklroot paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) ld_so_conf = '/etc/ld.so.conf' if os.path.isfile(ld_so_conf): for d in open(ld_so_conf, 'r'): d = d.strip() if d: paths.append(d) intel_mkl_dirs = [] for path in paths: path_atoms = path.split(os.sep) for m in path_atoms: if m.startswith('mkl'): d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) intel_mkl_dirs.append(d) break for d in paths: dirs = glob(os.path.join(d, 'mkl', '*')) dirs += glob(os.path.join(d, 'mkl*')) for d in dirs: if os.path.isdir(os.path.join(d, 'lib')): return d return None def __init__(self): mklroot = self.get_mkl_rootdir() if mklroot is None: system_info.__init__(self) else: from .cpuinfo import cpu l = 'mkl' # use shared library if cpu.is_Itanium(): plt = '64' elif cpu.is_Xeon(): plt = 'intel64' else: plt = '32' if l not in self._lib_mkl: self._lib_mkl.insert(0, l) system_info.__init__( self, default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], default_include_dirs=[os.path.join(mklroot, 'include')]) def calc_info(self): lib_dirs = self.get_lib_dirs() incl_dirs = self.get_include_dirs() mkl_libs = self.get_libs('mkl_libs', self._lib_mkl) info = self.check_libs2(lib_dirs, mkl_libs) if info is None: return dict_append(info, define_macros=[('SCIPY_MKL_H', None), ('HAVE_CBLAS', None)], include_dirs=incl_dirs) if sys.platform == 'win32': pass # win32 has no pthread library else: dict_append(info, libraries=['pthread']) self.set_info(**info) class lapack_mkl_info(mkl_info): def calc_info(self): mkl = get_info('mkl') if not mkl: return if sys.platform == 'win32': lapack_libs = self.get_libs('lapack_libs', ['mkl_lapack']) else: lapack_libs = self.get_libs('lapack_libs', ['mkl_lapack32', 'mkl_lapack64']) info = {'libraries': lapack_libs} dict_append(info, **mkl) self.set_info(**info) class blas_mkl_info(mkl_info): pass class atlas_info(system_info): section = 'atlas' dir_env_var = 'ATLAS' _lib_names = ['f77blas', 'cblas'] if sys.platform[:7] == 'freebsd': _lib_atlas = ['atlas_r'] _lib_lapack = ['alapack_r'] else: _lib_atlas = ['atlas'] _lib_lapack = ['lapack'] notfounderror = AtlasNotFoundError def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', 'sse', '3dnow', 'sse2']) + [d]) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): lib_dirs = self.get_lib_dirs() info = {} atlas_libs = self.get_libs('atlas_libs', self._lib_names + self._lib_atlas) lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) atlas = None lapack = None atlas_1 = None for d in lib_dirs: atlas = self.check_libs2(d, atlas_libs, []) lapack_atlas = self.check_libs2(d, ['lapack_atlas'], []) if atlas is not None: lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) lapack = self.check_libs2(lib_dirs2, lapack_libs, []) if lapack is not None: break if atlas: atlas_1 = atlas log.info(self.__class__) if atlas is None: atlas = atlas_1 if atlas is None: return include_dirs = self.get_include_dirs() h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) h = h[0] if h: h = os.path.dirname(h) dict_append(info, include_dirs=[h]) info['language'] = 'c' if lapack is not None: dict_append(info, **lapack) dict_append(info, **atlas) elif 'lapack_atlas' in atlas['libraries']: dict_append(info, **atlas) dict_append(info, define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) self.set_info(**info) return else: dict_append(info, **atlas) dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) message = """ ********************************************************************* Could not find lapack library within the ATLAS installation. ********************************************************************* """ warnings.warn(message) self.set_info(**info) return # Check if lapack library is complete, only warn if it is not. lapack_dir = lapack['library_dirs'][0] lapack_name = lapack['libraries'][0] lapack_lib = None lib_prefixes = ['lib'] if sys.platform == 'win32': lib_prefixes.append('') for e in self.library_extensions(): for prefix in lib_prefixes: fn = os.path.join(lapack_dir, prefix + lapack_name + e) if os.path.exists(fn): lapack_lib = fn break if lapack_lib: break if lapack_lib is not None: sz = os.stat(lapack_lib)[6] if sz <= 4000 * 1024: message = """ ********************************************************************* Lapack library (from ATLAS) is probably incomplete: size of %s is %sk (expected >4000k) Follow the instructions in the KNOWN PROBLEMS section of the file numpy/INSTALL.txt. ********************************************************************* """ % (lapack_lib, sz / 1024) warnings.warn(message) else: info['language'] = 'f77' atlas_version, atlas_extra_info = get_atlas_version(**atlas) dict_append(info, **atlas_extra_info) self.set_info(**info) class atlas_blas_info(atlas_info): _lib_names = ['f77blas', 'cblas'] def calc_info(self): lib_dirs = self.get_lib_dirs() info = {} atlas_libs = self.get_libs('atlas_libs', self._lib_names + self._lib_atlas) atlas = self.check_libs2(lib_dirs, atlas_libs, []) if atlas is None: return include_dirs = self.get_include_dirs() h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) h = h[0] if h: h = os.path.dirname(h) dict_append(info, include_dirs=[h]) info['language'] = 'c' info['define_macros'] = [('HAVE_CBLAS', None)] atlas_version, atlas_extra_info = get_atlas_version(**atlas) dict_append(atlas, **atlas_extra_info) dict_append(info, **atlas) self.set_info(**info) return class atlas_threads_info(atlas_info): dir_env_var = ['PTATLAS', 'ATLAS'] _lib_names = ['ptf77blas', 'ptcblas'] class atlas_blas_threads_info(atlas_blas_info): dir_env_var = ['PTATLAS', 'ATLAS'] _lib_names = ['ptf77blas', 'ptcblas'] class lapack_atlas_info(atlas_info): _lib_names = ['lapack_atlas'] + atlas_info._lib_names class lapack_atlas_threads_info(atlas_threads_info): _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names class atlas_3_10_info(atlas_info): _lib_names = ['satlas'] _lib_atlas = _lib_names _lib_lapack = _lib_names class atlas_3_10_blas_info(atlas_3_10_info): _lib_names = ['satlas'] def calc_info(self): lib_dirs = self.get_lib_dirs() info = {} atlas_libs = self.get_libs('atlas_libs', self._lib_names) atlas = self.check_libs2(lib_dirs, atlas_libs, []) if atlas is None: return include_dirs = self.get_include_dirs() h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) h = h[0] if h: h = os.path.dirname(h) dict_append(info, include_dirs=[h]) info['language'] = 'c' info['define_macros'] = [('HAVE_CBLAS', None)] atlas_version, atlas_extra_info = get_atlas_version(**atlas) dict_append(atlas, **atlas_extra_info) dict_append(info, **atlas) self.set_info(**info) return class atlas_3_10_threads_info(atlas_3_10_info): dir_env_var = ['PTATLAS', 'ATLAS'] _lib_names = ['tatlas'] _lib_atlas = _lib_names _lib_lapack = _lib_names class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): dir_env_var = ['PTATLAS', 'ATLAS'] _lib_names = ['tatlas'] class lapack_atlas_3_10_info(atlas_3_10_info): pass class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): pass class lapack_info(system_info): section = 'lapack' dir_env_var = 'LAPACK' _lib_names = ['lapack'] notfounderror = LapackNotFoundError def calc_info(self): lib_dirs = self.get_lib_dirs() lapack_libs = self.get_libs('lapack_libs', self._lib_names) info = self.check_libs(lib_dirs, lapack_libs, []) if info is None: return info['language'] = 'f77' self.set_info(**info) class lapack_src_info(system_info): section = 'lapack_src' dir_env_var = 'LAPACK_SRC' notfounderror = LapackSrcNotFoundError def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): src_dirs = self.get_src_dirs() src_dir = '' for d in src_dirs: if os.path.isfile(os.path.join(d, 'dgesv.f')): src_dir = d break if not src_dir: #XXX: Get sources from netlib. May be ask first. return # The following is extracted from LAPACK-3.0/SRC/Makefile. # Added missing names from lapack-lite-3.1.1/SRC/Makefile # while keeping removed names for Lapack-3.0 compatibility. allaux = ''' ilaenv ieeeck lsame lsamen xerbla iparmq ''' # *.f laux = ''' bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf stebz stedc steqr sterf larra larrc larrd larr larrk larrj larrr laneg laisnan isnan lazq3 lazq4 ''' # [s|d]*.f lasrc = ''' gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv lartv larz larzb larzt laswp lasyf latbs latdf latps latrd latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs tzrqf tzrzf lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 ''' # [s|c|d|z]*.f sd_lasrc = ''' laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd sygvx sytd2 sytrd ''' # [s|d]*.f cz_lasrc = ''' bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr ''' # [c|z]*.f ####### sclaux = laux + ' econd ' # s*.f dzlaux = laux + ' secnd ' # d*.f slasrc = lasrc + sd_lasrc # s*.f dlasrc = lasrc + sd_lasrc # d*.f clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f oclasrc = ' icmax1 scsum1 ' # *.f ozlasrc = ' izmax1 dzsum1 ' # *.f sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ + ['c%s.f' % f for f in (clasrc).split()] \ + ['z%s.f' % f for f in (zlasrc).split()] \ + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] sources = [os.path.join(src_dir, f) for f in sources] # Lapack 3.1: src_dir2 = os.path.join(src_dir, '..', 'INSTALL') sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] # Lapack 3.2.1: sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] # Should we check here actual existence of source files? # Yes, the file listing is different between 3.0 and 3.1 # versions. sources = [f for f in sources if os.path.isfile(f)] info = {'sources': sources, 'language': 'f77'} self.set_info(**info) atlas_version_c_text = r''' /* This file is generated from numpy/distutils/system_info.py */ void ATL_buildinfo(void); int main(void) { ATL_buildinfo(); return 0; } ''' _cached_atlas_version = {} def get_atlas_version(**config): libraries = config.get('libraries', []) library_dirs = config.get('library_dirs', []) key = (tuple(libraries), tuple(library_dirs)) if key in _cached_atlas_version: return _cached_atlas_version[key] c = cmd_config(Distribution()) atlas_version = None info = {} try: s, o = c.get_output(atlas_version_c_text, libraries=libraries, library_dirs=library_dirs, use_tee=(system_info.verbosity > 0)) if s and re.search(r'undefined reference to `_gfortran', o, re.M): s, o = c.get_output(atlas_version_c_text, libraries=libraries + ['gfortran'], library_dirs=library_dirs, use_tee=(system_info.verbosity > 0)) if not s: warnings.warn(""" ***************************************************** Linkage with ATLAS requires gfortran. Use python setup.py config_fc --fcompiler=gnu95 ... when building extension libraries that use ATLAS. Make sure that -lgfortran is used for C++ extensions. ***************************************************** """) dict_append(info, language='f90', define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) except Exception: # failed to get version from file -- maybe on Windows # look at directory name for o in library_dirs: m = re.search(r'ATLAS_(?P<version>\d+[.]\d+[.]\d+)_', o) if m: atlas_version = m.group('version') if atlas_version is not None: break # final choice --- look at ATLAS_VERSION environment # variable if atlas_version is None: atlas_version = os.environ.get('ATLAS_VERSION', None) if atlas_version: dict_append(info, define_macros=[( 'ATLAS_INFO', '"\\"%s\\""' % atlas_version) ]) else: dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) return atlas_version or '?.?.?', info if not s: m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)', o) if m: atlas_version = m.group('version') if atlas_version is None: if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): atlas_version = '3.2.1_pre3.3.6' else: log.info('Status: %d', s) log.info('Output: %s', o) if atlas_version == '3.2.1_pre3.3.6': dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) else: dict_append(info, define_macros=[( 'ATLAS_INFO', '"\\"%s\\""' % atlas_version) ]) result = _cached_atlas_version[key] = atlas_version, info return result class lapack_opt_info(system_info): notfounderror = LapackNotFoundError def calc_info(self): openblas_info = get_info('openblas_lapack') if openblas_info: self.set_info(**openblas_info) return lapack_mkl_info = get_info('lapack_mkl') if lapack_mkl_info: self.set_info(**lapack_mkl_info) return atlas_info = get_info('atlas_3_10_threads') if not atlas_info: atlas_info = get_info('atlas_3_10') if not atlas_info: atlas_info = get_info('atlas_threads') if not atlas_info: atlas_info = get_info('atlas') if sys.platform == 'darwin' and not atlas_info: # Use the system lapack from Accelerate or vecLib under OSX args = [] link_args = [] if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ 'x86_64' in get_platform() or \ 'i386' in platform.platform(): intel = 1 else: intel = 0 if os.path.exists('/System/Library/Frameworks' '/Accelerate.framework/'): if intel: args.extend(['-msse3']) else: args.extend(['-faltivec']) link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) elif os.path.exists('/System/Library/Frameworks' '/vecLib.framework/'): if intel: args.extend(['-msse3']) else: args.extend(['-faltivec']) link_args.extend(['-Wl,-framework', '-Wl,vecLib']) if args: self.set_info(extra_compile_args=args, extra_link_args=link_args, define_macros=[('NO_ATLAS_INFO', 3), ('HAVE_CBLAS', None)]) return need_lapack = 0 need_blas = 0 info = {} if atlas_info: l = atlas_info.get('define_macros', []) if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ or ('ATLAS_WITHOUT_LAPACK', None) in l: need_lapack = 1 info = atlas_info else: warnings.warn(AtlasNotFoundError.__doc__) need_blas = 1 need_lapack = 1 dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) if need_lapack: lapack_info = get_info('lapack') #lapack_info = {} ## uncomment for testing if lapack_info: dict_append(info, **lapack_info) else: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: warnings.warn(LapackSrcNotFoundError.__doc__) return dict_append(info, libraries=[('flapack_src', lapack_src_info)]) if need_blas: blas_info = get_info('blas') if blas_info: dict_append(info, **blas_info) else: warnings.warn(BlasNotFoundError.__doc__) blas_src_info = get_info('blas_src') if not blas_src_info: warnings.warn(BlasSrcNotFoundError.__doc__) return dict_append(info, libraries=[('fblas_src', blas_src_info)]) self.set_info(**info) return class blas_opt_info(system_info): notfounderror = BlasNotFoundError def calc_info(self): blas_mkl_info = get_info('blas_mkl') if blas_mkl_info: self.set_info(**blas_mkl_info) return openblas_info = get_info('openblas') if openblas_info: self.set_info(**openblas_info) return atlas_info = get_info('atlas_3_10_blas_threads') if not atlas_info: atlas_info = get_info('atlas_3_10_blas') if not atlas_info: atlas_info = get_info('atlas_blas_threads') if not atlas_info: atlas_info = get_info('atlas_blas') if sys.platform == 'darwin' and not atlas_info: # Use the system BLAS from Accelerate or vecLib under OSX args = [] link_args = [] if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ 'x86_64' in get_platform() or \ 'i386' in platform.platform(): intel = 1 else: intel = 0 if os.path.exists('/System/Library/Frameworks' '/Accelerate.framework/'): if intel: args.extend(['-msse3']) else: args.extend(['-faltivec']) args.extend([ '-I/System/Library/Frameworks/vecLib.framework/Headers']) link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) elif os.path.exists('/System/Library/Frameworks' '/vecLib.framework/'): if intel: args.extend(['-msse3']) else: args.extend(['-faltivec']) args.extend([ '-I/System/Library/Frameworks/vecLib.framework/Headers']) link_args.extend(['-Wl,-framework', '-Wl,vecLib']) if args: self.set_info(extra_compile_args=args, extra_link_args=link_args, define_macros=[('NO_ATLAS_INFO', 3), ('HAVE_CBLAS', None)]) return need_blas = 0 info = {} if atlas_info: info = atlas_info else: warnings.warn(AtlasNotFoundError.__doc__) need_blas = 1 dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) if need_blas: blas_info = get_info('blas') if blas_info: dict_append(info, **blas_info) else: warnings.warn(BlasNotFoundError.__doc__) blas_src_info = get_info('blas_src') if not blas_src_info: warnings.warn(BlasSrcNotFoundError.__doc__) return dict_append(info, libraries=[('fblas_src', blas_src_info)]) self.set_info(**info) return class blas_info(system_info): section = 'blas' dir_env_var = 'BLAS' _lib_names = ['blas'] notfounderror = BlasNotFoundError def calc_info(self): lib_dirs = self.get_lib_dirs() blas_libs = self.get_libs('blas_libs', self._lib_names) info = self.check_libs(lib_dirs, blas_libs, []) if info is None: return if platform.system() == 'Windows': # The check for windows is needed because has_cblas uses the # same compiler that was used to compile Python and msvc is # often not installed when mingw is being used. This rough # treatment is not desirable, but windows is tricky. info['language'] = 'f77' # XXX: is it generally true? else: lib = self.has_cblas(info) if lib is not None: info['language'] = 'c' info['libraries'] = [lib] info['define_macros'] = [('HAVE_CBLAS', None)] self.set_info(**info) def has_cblas(self, info): # primitive cblas check by looking for the header and trying to link # cblas or blas res = False c = distutils.ccompiler.new_compiler() tmpdir = tempfile.mkdtemp() s = """#include <cblas.h> int main(int argc, const char *argv[]) { double a[4] = {1,2,3,4}; double b[4] = {5,6,7,8}; return cblas_ddot(4, a, 1, b, 1) > 10; }""" src = os.path.join(tmpdir, 'source.c') try: with open(src, 'wt') as f: f.write(s) try: # check we can compile (find headers) obj = c.compile([src], output_dir=tmpdir, include_dirs=self.get_include_dirs()) # check we can link (find library) # some systems have separate cblas and blas libs. First # check for cblas lib, and if not present check for blas lib. try: c.link_executable(obj, os.path.join(tmpdir, "a.out"), libraries=["cblas"], library_dirs=info['library_dirs'], extra_postargs=info.get('extra_link_args', [])) res = "cblas" except distutils.ccompiler.LinkError: c.link_executable(obj, os.path.join(tmpdir, "a.out"), libraries=["blas"], library_dirs=info['library_dirs'], extra_postargs=info.get('extra_link_args', [])) res = "blas" except distutils.ccompiler.CompileError: res = None finally: shutil.rmtree(tmpdir) return res class openblas_info(blas_info): section = 'openblas' dir_env_var = 'OPENBLAS' _lib_names = ['openblas'] notfounderror = BlasNotFoundError def check_embedded_lapack(self, info): return True def calc_info(self): lib_dirs = self.get_lib_dirs() openblas_libs = self.get_libs('libraries', self._lib_names) if openblas_libs == self._lib_names: # backward compat with 1.8.0 openblas_libs = self.get_libs('openblas_libs', self._lib_names) info = self.check_libs(lib_dirs, openblas_libs, []) if info is None: return # Add extra info for OpenBLAS extra_info = self.calc_extra_info() dict_append(info, **extra_info) if not self.check_embedded_lapack(info): return info['language'] = 'c' info['define_macros'] = [('HAVE_CBLAS', None)] self.set_info(**info) class openblas_lapack_info(openblas_info): section = 'openblas' dir_env_var = 'OPENBLAS' _lib_names = ['openblas'] notfounderror = BlasNotFoundError def check_embedded_lapack(self, info): res = False c = distutils.ccompiler.new_compiler() tmpdir = tempfile.mkdtemp() s = """void zungqr(); int main(int argc, const char *argv[]) { zungqr_(); return 0; }""" src = os.path.join(tmpdir, 'source.c') out = os.path.join(tmpdir, 'a.out') # Add the additional "extra" arguments try: extra_args = info['extra_link_args'] except: extra_args = [] try: with open(src, 'wt') as f: f.write(s) obj = c.compile([src], output_dir=tmpdir) try: c.link_executable(obj, out, libraries=info['libraries'], library_dirs=info['library_dirs'], extra_postargs=extra_args) res = True except distutils.ccompiler.LinkError: res = False finally: shutil.rmtree(tmpdir) return res class blas_src_info(system_info): section = 'blas_src' dir_env_var = 'BLAS_SRC' notfounderror = BlasSrcNotFoundError def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend([d] + self.combine_paths(d, ['blas'])) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): src_dirs = self.get_src_dirs() src_dir = '' for d in src_dirs: if os.path.isfile(os.path.join(d, 'daxpy.f')): src_dir = d break if not src_dir: #XXX: Get sources from netlib. May be ask first. return blas1 = ''' caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap scabs1 ''' blas2 = ''' cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv ''' blas3 = ''' cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm ''' sources = [os.path.join(src_dir, f + '.f') \ for f in (blas1 + blas2 + blas3).split()] #XXX: should we check here actual existence of source files? sources = [f for f in sources if os.path.isfile(f)] info = {'sources': sources, 'language': 'f77'} self.set_info(**info) class x11_info(system_info): section = 'x11' notfounderror = X11NotFoundError def __init__(self): system_info.__init__(self, default_lib_dirs=default_x11_lib_dirs, default_include_dirs=default_x11_include_dirs) def calc_info(self): if sys.platform in ['win32']: return lib_dirs = self.get_lib_dirs() include_dirs = self.get_include_dirs() x11_libs = self.get_libs('x11_libs', ['X11']) info = self.check_libs(lib_dirs, x11_libs, []) if info is None: return inc_dir = None for d in include_dirs: if self.combine_paths(d, 'X11/X.h'): inc_dir = d break if inc_dir is not None: dict_append(info, include_dirs=[inc_dir]) self.set_info(**info) class _numpy_info(system_info): section = 'Numeric' modulename = 'Numeric' notfounderror = NumericNotFoundError def __init__(self): include_dirs = [] try: module = __import__(self.modulename) prefix = [] for name in module.__file__.split(os.sep): if name == 'lib': break prefix.append(name) # Ask numpy for its own include path before attempting # anything else try: include_dirs.append(getattr(module, 'get_include')()) except AttributeError: pass include_dirs.append(distutils.sysconfig.get_python_inc( prefix=os.sep.join(prefix))) except ImportError: pass py_incl_dir = distutils.sysconfig.get_python_inc() include_dirs.append(py_incl_dir) py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True) if py_pincl_dir not in include_dirs: include_dirs.append(py_pincl_dir) for d in default_include_dirs: d = os.path.join(d, os.path.basename(py_incl_dir)) if d not in include_dirs: include_dirs.append(d) system_info.__init__(self, default_lib_dirs=[], default_include_dirs=include_dirs) def calc_info(self): try: module = __import__(self.modulename) except ImportError: return info = {} macros = [] for v in ['__version__', 'version']: vrs = getattr(module, v, None) if vrs is None: continue macros = [(self.modulename.upper() + '_VERSION', '"\\"%s\\""' % (vrs)), (self.modulename.upper(), None)] break dict_append(info, define_macros=macros) include_dirs = self.get_include_dirs() inc_dir = None for d in include_dirs: if self.combine_paths(d, os.path.join(self.modulename, 'arrayobject.h')): inc_dir = d break if inc_dir is not None: dict_append(info, include_dirs=[inc_dir]) if info: self.set_info(**info) return class numarray_info(_numpy_info): section = 'numarray' modulename = 'numarray' class Numeric_info(_numpy_info): section = 'Numeric' modulename = 'Numeric' class numpy_info(_numpy_info): section = 'numpy' modulename = 'numpy' class numerix_info(system_info): section = 'numerix' def calc_info(self): which = None, None if os.getenv("NUMERIX"): which = os.getenv("NUMERIX"), "environment var" # If all the above fail, default to numpy. if which[0] is None: which = "numpy", "defaulted" try: import numpy which = "numpy", "defaulted" except ImportError: msg1 = str(get_exception()) try: import Numeric which = "numeric", "defaulted" except ImportError: msg2 = str(get_exception()) try: import numarray which = "numarray", "defaulted" except ImportError: msg3 = str(get_exception()) log.info(msg1) log.info(msg2) log.info(msg3) which = which[0].strip().lower(), which[1] if which[0] not in ["numeric", "numarray", "numpy"]: raise ValueError("numerix selector must be either 'Numeric' " "or 'numarray' or 'numpy' but the value obtained" " from the %s was '%s'." % (which[1], which[0])) os.environ['NUMERIX'] = which[0] self.set_info(**get_info(which[0])) class f2py_info(system_info): def calc_info(self): try: import numpy.f2py as f2py except ImportError: return f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], include_dirs=[f2py_dir]) return class boost_python_info(system_info): section = 'boost_python' dir_env_var = 'BOOST' def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend([d] + self.combine_paths(d, ['boost*'])) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): src_dirs = self.get_src_dirs() src_dir = '' for d in src_dirs: if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', 'module.cpp')): src_dir = d break if not src_dir: return py_incl_dirs = [distutils.sysconfig.get_python_inc()] py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True) if py_pincl_dir not in py_incl_dirs: py_incl_dirs.append(py_pincl_dir) srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) info = {'libraries': [('boost_python_src', {'include_dirs': [src_dir] + py_incl_dirs, 'sources':bpl_srcs} )], 'include_dirs': [src_dir], } if info: self.set_info(**info) return class agg2_info(system_info): section = 'agg2' dir_env_var = 'AGG2' def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend([d] + self.combine_paths(d, ['agg2*'])) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): src_dirs = self.get_src_dirs() src_dir = '' for d in src_dirs: if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): src_dir = d break if not src_dir: return if sys.platform == 'win32': agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', 'win32', 'agg_win32_bmp.cpp')) else: agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) agg2_srcs += [os.path.join(src_dir, 'src', 'platform', 'X11', 'agg_platform_support.cpp')] info = {'libraries': [('agg2_src', {'sources': agg2_srcs, 'include_dirs': [os.path.join(src_dir, 'include')], } )], 'include_dirs': [os.path.join(src_dir, 'include')], } if info: self.set_info(**info) return class _pkg_config_info(system_info): section = None config_env_var = 'PKG_CONFIG' default_config_exe = 'pkg-config' append_config_exe = '' version_macro_name = None release_macro_name = None version_flag = '--modversion' cflags_flag = '--cflags' def get_config_exe(self): if self.config_env_var in os.environ: return os.environ[self.config_env_var] return self.default_config_exe def get_config_output(self, config_exe, option): cmd = config_exe + ' ' + self.append_config_exe + ' ' + option s, o = exec_command(cmd, use_tee=0) if not s: return o def calc_info(self): config_exe = find_executable(self.get_config_exe()) if not config_exe: log.warn('File not found: %s. Cannot determine %s info.' \ % (config_exe, self.section)) return info = {} macros = [] libraries = [] library_dirs = [] include_dirs = [] extra_link_args = [] extra_compile_args = [] version = self.get_config_output(config_exe, self.version_flag) if version: macros.append((self.__class__.__name__.split('.')[-1].upper(), '"\\"%s\\""' % (version))) if self.version_macro_name: macros.append((self.version_macro_name + '_%s' % (version.replace('.', '_')), None)) if self.release_macro_name: release = self.get_config_output(config_exe, '--release') if release: macros.append((self.release_macro_name + '_%s' % (release.replace('.', '_')), None)) opts = self.get_config_output(config_exe, '--libs') if opts: for opt in opts.split(): if opt[:2] == '-l': libraries.append(opt[2:]) elif opt[:2] == '-L': library_dirs.append(opt[2:]) else: extra_link_args.append(opt) opts = self.get_config_output(config_exe, self.cflags_flag) if opts: for opt in opts.split(): if opt[:2] == '-I': include_dirs.append(opt[2:]) elif opt[:2] == '-D': if '=' in opt: n, v = opt[2:].split('=') macros.append((n, v)) else: macros.append((opt[2:], None)) else: extra_compile_args.append(opt) if macros: dict_append(info, define_macros=macros) if libraries: dict_append(info, libraries=libraries) if library_dirs: dict_append(info, library_dirs=library_dirs) if include_dirs: dict_append(info, include_dirs=include_dirs) if extra_link_args: dict_append(info, extra_link_args=extra_link_args) if extra_compile_args: dict_append(info, extra_compile_args=extra_compile_args) if info: self.set_info(**info) return class wx_info(_pkg_config_info): section = 'wx' config_env_var = 'WX_CONFIG' default_config_exe = 'wx-config' append_config_exe = '' version_macro_name = 'WX_VERSION' release_macro_name = 'WX_RELEASE' version_flag = '--version' cflags_flag = '--cxxflags' class gdk_pixbuf_xlib_2_info(_pkg_config_info): section = 'gdk_pixbuf_xlib_2' append_config_exe = 'gdk-pixbuf-xlib-2.0' version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' class gdk_pixbuf_2_info(_pkg_config_info): section = 'gdk_pixbuf_2' append_config_exe = 'gdk-pixbuf-2.0' version_macro_name = 'GDK_PIXBUF_VERSION' class gdk_x11_2_info(_pkg_config_info): section = 'gdk_x11_2' append_config_exe = 'gdk-x11-2.0' version_macro_name = 'GDK_X11_VERSION' class gdk_2_info(_pkg_config_info): section = 'gdk_2' append_config_exe = 'gdk-2.0' version_macro_name = 'GDK_VERSION' class gdk_info(_pkg_config_info): section = 'gdk' append_config_exe = 'gdk' version_macro_name = 'GDK_VERSION' class gtkp_x11_2_info(_pkg_config_info): section = 'gtkp_x11_2' append_config_exe = 'gtk+-x11-2.0' version_macro_name = 'GTK_X11_VERSION' class gtkp_2_info(_pkg_config_info): section = 'gtkp_2' append_config_exe = 'gtk+-2.0' version_macro_name = 'GTK_VERSION' class xft_info(_pkg_config_info): section = 'xft' append_config_exe = 'xft' version_macro_name = 'XFT_VERSION' class freetype2_info(_pkg_config_info): section = 'freetype2' append_config_exe = 'freetype2' version_macro_name = 'FREETYPE2_VERSION' class amd_info(system_info): section = 'amd' dir_env_var = 'AMD' _lib_names = ['amd'] def calc_info(self): lib_dirs = self.get_lib_dirs() amd_libs = self.get_libs('amd_libs', self._lib_names) info = self.check_libs(lib_dirs, amd_libs, []) if info is None: return include_dirs = self.get_include_dirs() inc_dir = None for d in include_dirs: p = self.combine_paths(d, 'amd.h') if p: inc_dir = os.path.dirname(p[0]) break if inc_dir is not None: dict_append(info, include_dirs=[inc_dir], define_macros=[('SCIPY_AMD_H', None)], swig_opts=['-I' + inc_dir]) self.set_info(**info) return class umfpack_info(system_info): section = 'umfpack' dir_env_var = 'UMFPACK' notfounderror = UmfpackNotFoundError _lib_names = ['umfpack'] def calc_info(self): lib_dirs = self.get_lib_dirs() umfpack_libs = self.get_libs('umfpack_libs', self._lib_names) info = self.check_libs(lib_dirs, umfpack_libs, []) if info is None: return include_dirs = self.get_include_dirs() inc_dir = None for d in include_dirs: p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') if p: inc_dir = os.path.dirname(p[0]) break if inc_dir is not None: dict_append(info, include_dirs=[inc_dir], define_macros=[('SCIPY_UMFPACK_H', None)], swig_opts=['-I' + inc_dir]) amd = get_info('amd') dict_append(info, **get_info('amd')) self.set_info(**info) return def combine_paths(*args, **kws): """ Return a list of existing paths composed by all combinations of items from arguments. """ r = [] for a in args: if not a: continue if is_string(a): a = [a] r.append(a) args = r if not args: return [] if len(args) == 1: result = reduce(lambda a, b: a + b, map(glob, args[0]), []) elif len(args) == 2: result = [] for a0 in args[0]: for a1 in args[1]: result.extend(glob(os.path.join(a0, a1))) else: result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) verbosity = kws.get('verbosity', 1) log.debug('(paths: %s)', ','.join(result)) return result language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} def dict_append(d, **kws): languages = [] for k, v in kws.items(): if k == 'language': languages.append(v) continue if k in d: if k in ['library_dirs', 'include_dirs', 'extra_compile_args', 'extra_link_args', 'runtime_library_dirs', 'define_macros']: [d[k].append(vv) for vv in v if vv not in d[k]] else: d[k].extend(v) else: d[k] = v if languages: l = inv_language_map[max([language_map.get(l, 0) for l in languages])] d['language'] = l return def parseCmdLine(argv=(None,)): import optparse parser = optparse.OptionParser("usage: %prog [-v] [info objs]") parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='be verbose and print more messages') opts, args = parser.parse_args(args=argv[1:]) return opts, args def show_all(argv=None): import inspect if argv is None: argv = sys.argv opts, args = parseCmdLine(argv) if opts.verbose: log.set_threshold(log.DEBUG) else: log.set_threshold(log.INFO) show_only = [] for n in args: if n[-5:] != '_info': n = n + '_info' show_only.append(n) show_all = not show_only _gdict_ = globals().copy() for name, c in _gdict_.items(): if not inspect.isclass(c): continue if not issubclass(c, system_info) or c is system_info: continue if not show_all: if name not in show_only: continue del show_only[show_only.index(name)] conf = c() conf.verbosity = 2 r = conf.get_info() if show_only: log.info('Info classes not defined: %s', ','.join(show_only)) if __name__ == "__main__": show_all()
codeparrot/github-code-clean
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.aodv', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## wifi-mac-header.h (module 'wifi'): ns3::WifiMacType [enumeration] module.add_enum('WifiMacType', ['WIFI_MAC_CTL_RTS', 'WIFI_MAC_CTL_CTS', 'WIFI_MAC_CTL_ACK', 'WIFI_MAC_CTL_BACKREQ', 'WIFI_MAC_CTL_BACKRESP', 'WIFI_MAC_MGT_BEACON', 'WIFI_MAC_MGT_ASSOCIATION_REQUEST', 'WIFI_MAC_MGT_ASSOCIATION_RESPONSE', 'WIFI_MAC_MGT_DISASSOCIATION', 'WIFI_MAC_MGT_REASSOCIATION_REQUEST', 'WIFI_MAC_MGT_REASSOCIATION_RESPONSE', 'WIFI_MAC_MGT_PROBE_REQUEST', 'WIFI_MAC_MGT_PROBE_RESPONSE', 'WIFI_MAC_MGT_AUTHENTICATION', 'WIFI_MAC_MGT_DEAUTHENTICATION', 'WIFI_MAC_MGT_ACTION', 'WIFI_MAC_MGT_ACTION_NO_ACK', 'WIFI_MAC_MGT_MULTIHOP_ACTION', 'WIFI_MAC_DATA', 'WIFI_MAC_DATA_CFACK', 'WIFI_MAC_DATA_CFPOLL', 'WIFI_MAC_DATA_CFACK_CFPOLL', 'WIFI_MAC_DATA_NULL', 'WIFI_MAC_DATA_NULL_CFACK', 'WIFI_MAC_DATA_NULL_CFPOLL', 'WIFI_MAC_DATA_NULL_CFACK_CFPOLL', 'WIFI_MAC_QOSDATA', 'WIFI_MAC_QOSDATA_CFACK', 'WIFI_MAC_QOSDATA_CFPOLL', 'WIFI_MAC_QOSDATA_CFACK_CFPOLL', 'WIFI_MAC_QOSDATA_NULL', 'WIFI_MAC_QOSDATA_NULL_CFPOLL', 'WIFI_MAC_QOSDATA_NULL_CFACK_CFPOLL'], import_from_module='ns.wifi') ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<0> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['0']) ## int-to-type.h (module 'core'): ns3::IntToType<0>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<1> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['1']) ## int-to-type.h (module 'core'): ns3::IntToType<1>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<2> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['2']) ## int-to-type.h (module 'core'): ns3::IntToType<2>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<3> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['3']) ## int-to-type.h (module 'core'): ns3::IntToType<3>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<4> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['4']) ## int-to-type.h (module 'core'): ns3::IntToType<4>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<5> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['5']) ## int-to-type.h (module 'core'): ns3::IntToType<5>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<6> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['6']) ## int-to-type.h (module 'core'): ns3::IntToType<6>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'], import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class] module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration] module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper [class] module.add_class('Ipv4RoutingHelper', allow_subclassing=True, import_from_module='ns.internet') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress [class] module.add_class('Ipv6InterfaceAddress', import_from_module='ns.internet') ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::State_e [enumeration] module.add_enum('State_e', ['TENTATIVE', 'DEPRECATED', 'PREFERRED', 'PERMANENT', 'HOMEADDRESS', 'TENTATIVE_OPTIMISTIC', 'INVALID'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet') ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Scope_e [enumeration] module.add_enum('Scope_e', ['HOST', 'LINKLOCAL', 'GLOBAL'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] module.add_class('Mac48Address', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address']) ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simulator.h (module 'core'): ns3::Simulator [class] module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core') ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## timer.h (module 'core'): ns3::Timer [class] module.add_class('Timer', import_from_module='ns.core') ## timer.h (module 'core'): ns3::Timer::DestroyPolicy [enumeration] module.add_enum('DestroyPolicy', ['CANCEL_ON_DESTROY', 'REMOVE_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core') ## timer.h (module 'core'): ns3::Timer::State [enumeration] module.add_enum('State', ['RUNNING', 'EXPIRED', 'SUSPENDED'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core') ## timer-impl.h (module 'core'): ns3::TimerImpl [class] module.add_class('TimerImpl', allow_subclassing=True, import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## aodv-helper.h (module 'aodv'): ns3::AodvHelper [class] module.add_class('AodvHelper', parent=root_module['ns3::Ipv4RoutingHelper']) ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header [class] module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType [enumeration] module.add_enum('DscpType', ['DscpDefault', 'CS1', 'AF11', 'AF12', 'AF13', 'CS2', 'AF21', 'AF22', 'AF23', 'CS3', 'AF31', 'AF32', 'AF33', 'CS4', 'AF41', 'AF42', 'AF43', 'CS5', 'EF', 'CS6', 'CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType [enumeration] module.add_enum('EcnType', ['NotECT', 'ECT1', 'ECT0', 'CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## ipv6-header.h (module 'internet'): ns3::Ipv6Header [class] module.add_class('Ipv6Header', import_from_module='ns.internet', parent=root_module['ns3::Header']) ## ipv6-header.h (module 'internet'): ns3::Ipv6Header::NextHeader_e [enumeration] module.add_enum('NextHeader_e', ['IPV6_EXT_HOP_BY_HOP', 'IPV6_IPV4', 'IPV6_TCP', 'IPV6_UDP', 'IPV6_IPV6', 'IPV6_EXT_ROUTING', 'IPV6_EXT_FRAGMENTATION', 'IPV6_EXT_CONFIDENTIALITY', 'IPV6_EXT_AUTHENTIFICATION', 'IPV6_ICMPV6', 'IPV6_EXT_END', 'IPV6_EXT_DESTINATION', 'IPV6_SCTP', 'IPV6_EXT_MOBILITY', 'IPV6_UDP_LITE'], outer_class=root_module['ns3::Ipv6Header'], import_from_module='ns.internet') ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class] module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class] module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## socket.h (module 'network'): ns3::Socket [class] module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object']) ## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration] module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::Socket::SocketType [enumeration] module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::SocketAddressTag [class] module.add_class('SocketAddressTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpTtlTag [class] module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class] module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class] module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class] module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class] module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## wifi-mac-header.h (module 'wifi'): ns3::WifiMacHeader [class] module.add_class('WifiMacHeader', import_from_module='ns.wifi', parent=root_module['ns3::Header']) ## wifi-mac-header.h (module 'wifi'): ns3::WifiMacHeader::QosAckPolicy [enumeration] module.add_enum('QosAckPolicy', ['NORMAL_ACK', 'NO_ACK', 'NO_EXPLICIT_ACK', 'BLOCK_ACK'], outer_class=root_module['ns3::WifiMacHeader'], import_from_module='ns.wifi') ## wifi-mac-header.h (module 'wifi'): ns3::WifiMacHeader::AddressType [enumeration] module.add_enum('AddressType', ['ADDR1', 'ADDR2', 'ADDR3', 'ADDR4'], outer_class=root_module['ns3::WifiMacHeader'], import_from_module='ns.wifi') ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class] module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class] module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## arp-cache.h (module 'internet'): ns3::ArpCache [class] module.add_class('ArpCache', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## arp-cache.h (module 'internet'): ns3::ArpCache::Entry [class] module.add_class('Entry', import_from_module='ns.internet', outer_class=root_module['ns3::ArpCache']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class] module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class] module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class] module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## enum.h (module 'core'): ns3::EnumChecker [class] module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## enum.h (module 'core'): ns3::EnumValue [class] module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class] module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class] module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class] module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## ip-l4-protocol.h (module 'internet'): ns3::IpL4Protocol [class] module.add_class('IpL4Protocol', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ip-l4-protocol.h (module 'internet'): ns3::IpL4Protocol::RxStatus [enumeration] module.add_enum('RxStatus', ['RX_OK', 'RX_CSUM_FAILED', 'RX_ENDPOINT_CLOSED', 'RX_ENDPOINT_UNREACH'], outer_class=root_module['ns3::IpL4Protocol'], import_from_module='ns.internet') ## ipv4.h (module 'internet'): ns3::Ipv4 [class] module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-interface.h (module 'internet'): ns3::Ipv4Interface [class] module.add_class('Ipv4Interface', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol [class] module.add_class('Ipv4L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv4']) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::DropReason [enumeration] module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_BAD_CHECKSUM', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv4L3Protocol'], import_from_module='ns.internet') ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute [class] module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) ## ipv4-route.h (module 'internet'): ns3::Ipv4Route [class] module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol [class] module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-interface.h (module 'internet'): ns3::Ipv6Interface [class] module.add_class('Ipv6Interface', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class] module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class] module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class] module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class] module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class] module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class] module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## nstime.h (module 'core'): ns3::TimeChecker [class] module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) module.add_container('std::list< ns3::Ptr< ns3::Packet > >', 'ns3::Ptr< ns3::Packet >', container_type='list') module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type='map') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace aodv nested_module = module.add_cpp_namespace('aodv') register_types_ns3_aodv(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_aodv(module): root_module = module.get_root() ## aodv-packet.h (module 'aodv'): ns3::aodv::MessageType [enumeration] module.add_enum('MessageType', ['AODVTYPE_RREQ', 'AODVTYPE_RREP', 'AODVTYPE_RERR', 'AODVTYPE_RREP_ACK']) ## aodv-rtable.h (module 'aodv'): ns3::aodv::RouteFlags [enumeration] module.add_enum('RouteFlags', ['VALID', 'INVALID', 'IN_SEARCH']) ## aodv-dpd.h (module 'aodv'): ns3::aodv::DuplicatePacketDetection [class] module.add_class('DuplicatePacketDetection') ## aodv-id-cache.h (module 'aodv'): ns3::aodv::IdCache [class] module.add_class('IdCache') ## aodv-neighbor.h (module 'aodv'): ns3::aodv::Neighbors [class] module.add_class('Neighbors') ## aodv-neighbor.h (module 'aodv'): ns3::aodv::Neighbors::Neighbor [struct] module.add_class('Neighbor', outer_class=root_module['ns3::aodv::Neighbors']) ## aodv-rqueue.h (module 'aodv'): ns3::aodv::QueueEntry [class] module.add_class('QueueEntry') ## aodv-rqueue.h (module 'aodv'): ns3::aodv::RequestQueue [class] module.add_class('RequestQueue') ## aodv-packet.h (module 'aodv'): ns3::aodv::RerrHeader [class] module.add_class('RerrHeader', parent=root_module['ns3::Header']) ## aodv-routing-protocol.h (module 'aodv'): ns3::aodv::RoutingProtocol [class] module.add_class('RoutingProtocol', parent=root_module['ns3::Ipv4RoutingProtocol']) ## aodv-rtable.h (module 'aodv'): ns3::aodv::RoutingTable [class] module.add_class('RoutingTable') ## aodv-rtable.h (module 'aodv'): ns3::aodv::RoutingTableEntry [class] module.add_class('RoutingTableEntry') ## aodv-packet.h (module 'aodv'): ns3::aodv::RrepAckHeader [class] module.add_class('RrepAckHeader', parent=root_module['ns3::Header']) ## aodv-packet.h (module 'aodv'): ns3::aodv::RrepHeader [class] module.add_class('RrepHeader', parent=root_module['ns3::Header']) ## aodv-packet.h (module 'aodv'): ns3::aodv::RreqHeader [class] module.add_class('RreqHeader', parent=root_module['ns3::Header']) ## aodv-packet.h (module 'aodv'): ns3::aodv::TypeHeader [class] module.add_class('TypeHeader', parent=root_module['ns3::Header']) module.add_container('std::map< ns3::Ipv4Address, unsigned int >', ('ns3::Ipv4Address', 'unsigned int'), container_type='map') module.add_container('std::vector< ns3::Ipv4Address >', 'ns3::Ipv4Address', container_type='vector') def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >']) register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >']) register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >']) register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >']) register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >']) register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >']) register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv4RoutingHelper_methods(root_module, root_module['ns3::Ipv4RoutingHelper']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6InterfaceAddress_methods(root_module, root_module['ns3::Ipv6InterfaceAddress']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3Timer_methods(root_module, root_module['ns3::Timer']) register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3AodvHelper_methods(root_module, root_module['ns3::AodvHelper']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header']) register_Ns3Ipv6Header_methods(root_module, root_module['ns3::Ipv6Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream']) register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Socket_methods(root_module, root_module['ns3::Socket']) register_Ns3SocketAddressTag_methods(root_module, root_module['ns3::SocketAddressTag']) register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag']) register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable']) register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable']) register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable']) register_Ns3WifiMacHeader_methods(root_module, root_module['ns3::WifiMacHeader']) register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable']) register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable']) register_Ns3ArpCache_methods(root_module, root_module['ns3::ArpCache']) register_Ns3ArpCacheEntry_methods(root_module, root_module['ns3::ArpCache::Entry']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable']) register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable']) register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker']) register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue']) register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable']) register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable']) register_Ns3IpL4Protocol_methods(root_module, root_module['ns3::IpL4Protocol']) register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4Interface_methods(root_module, root_module['ns3::Ipv4Interface']) register_Ns3Ipv4L3Protocol_methods(root_module, root_module['ns3::Ipv4L3Protocol']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute']) register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route']) register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6Interface_methods(root_module, root_module['ns3::Ipv6Interface']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable']) register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3AodvDuplicatePacketDetection_methods(root_module, root_module['ns3::aodv::DuplicatePacketDetection']) register_Ns3AodvIdCache_methods(root_module, root_module['ns3::aodv::IdCache']) register_Ns3AodvNeighbors_methods(root_module, root_module['ns3::aodv::Neighbors']) register_Ns3AodvNeighborsNeighbor_methods(root_module, root_module['ns3::aodv::Neighbors::Neighbor']) register_Ns3AodvQueueEntry_methods(root_module, root_module['ns3::aodv::QueueEntry']) register_Ns3AodvRequestQueue_methods(root_module, root_module['ns3::aodv::RequestQueue']) register_Ns3AodvRerrHeader_methods(root_module, root_module['ns3::aodv::RerrHeader']) register_Ns3AodvRoutingProtocol_methods(root_module, root_module['ns3::aodv::RoutingProtocol']) register_Ns3AodvRoutingTable_methods(root_module, root_module['ns3::aodv::RoutingTable']) register_Ns3AodvRoutingTableEntry_methods(root_module, root_module['ns3::aodv::RoutingTableEntry']) register_Ns3AodvRrepAckHeader_methods(root_module, root_module['ns3::aodv::RrepAckHeader']) register_Ns3AodvRrepHeader_methods(root_module, root_module['ns3::aodv::RrepHeader']) register_Ns3AodvRreqHeader_methods(root_module, root_module['ns3::aodv::RreqHeader']) register_Ns3AodvTypeHeader_methods(root_module, root_module['ns3::aodv::TypeHeader']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'bool', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'bool', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function] cls.add_method('CreateFullCopy', 'ns3::Buffer', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function] cls.add_method('GetCurrentEndOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function] cls.add_method('GetCurrentStartOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3IntToType__0_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType(ns3::IntToType<0> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 0 > const &', 'arg0')]) return def register_Ns3IntToType__1_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType(ns3::IntToType<1> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 1 > const &', 'arg0')]) return def register_Ns3IntToType__2_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType(ns3::IntToType<2> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 2 > const &', 'arg0')]) return def register_Ns3IntToType__3_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType(ns3::IntToType<3> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 3 > const &', 'arg0')]) return def register_Ns3IntToType__4_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType(ns3::IntToType<4> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 4 > const &', 'arg0')]) return def register_Ns3IntToType__5_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType(ns3::IntToType<5> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 5 > const &', 'arg0')]) return def register_Ns3IntToType__6_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType(ns3::IntToType<6> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 6 > const &', 'arg0')]) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress() [constructor] cls.add_constructor([]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4Address local, ns3::Ipv4Mask mask) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'local'), param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4InterfaceAddress const & o) [copy constructor] cls.add_constructor([param('ns3::Ipv4InterfaceAddress const &', 'o')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetLocal() const [member function] cls.add_method('GetLocal', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Mask ns3::Ipv4InterfaceAddress::GetMask() const [member function] cls.add_method('GetMask', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e ns3::Ipv4InterfaceAddress::GetScope() const [member function] cls.add_method('GetScope', 'ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): bool ns3::Ipv4InterfaceAddress::IsSecondary() const [member function] cls.add_method('IsSecondary', 'bool', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetBroadcast(ns3::Ipv4Address broadcast) [member function] cls.add_method('SetBroadcast', 'void', [param('ns3::Ipv4Address', 'broadcast')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetLocal(ns3::Ipv4Address local) [member function] cls.add_method('SetLocal', 'void', [param('ns3::Ipv4Address', 'local')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetMask(ns3::Ipv4Mask mask) [member function] cls.add_method('SetMask', 'void', [param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetPrimary() [member function] cls.add_method('SetPrimary', 'void', []) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetScope(ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SetScope', 'void', [param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetSecondary() [member function] cls.add_method('SetSecondary', 'void', []) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv4RoutingHelper_methods(root_module, cls): ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper() [constructor] cls.add_constructor([]) ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper(ns3::Ipv4RoutingHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4RoutingHelper const &', 'arg0')]) ## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper * ns3::Ipv4RoutingHelper::Copy() const [member function] cls.add_method('Copy', 'ns3::Ipv4RoutingHelper *', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-helper.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4RoutingHelper::Create(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-helper.h (module 'internet'): void ns3::Ipv4RoutingHelper::PrintRoutingTableAllAt(ns3::Time printTime, ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTableAllAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) ## ipv4-routing-helper.h (module 'internet'): void ns3::Ipv4RoutingHelper::PrintRoutingTableAllEvery(ns3::Time printInterval, ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTableAllEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) ## ipv4-routing-helper.h (module 'internet'): void ns3::Ipv4RoutingHelper::PrintRoutingTableAt(ns3::Time printTime, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTableAt', 'void', [param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) ## ipv4-routing-helper.h (module 'internet'): void ns3::Ipv4RoutingHelper::PrintRoutingTableEvery(ns3::Time printInterval, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTableEvery', 'void', [param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() [member function] cls.add_method('IsIpv4MappedAddress', 'bool', []) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6InterfaceAddress_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Ipv6InterfaceAddress() [constructor] cls.add_constructor([]) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Ipv6InterfaceAddress(ns3::Ipv6Address address) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'address')]) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Ipv6InterfaceAddress(ns3::Ipv6Address address, ns3::Ipv6Prefix prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'address'), param('ns3::Ipv6Prefix', 'prefix')]) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Ipv6InterfaceAddress(ns3::Ipv6InterfaceAddress const & o) [copy constructor] cls.add_constructor([param('ns3::Ipv6InterfaceAddress const &', 'o')]) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6Address ns3::Ipv6InterfaceAddress::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-interface-address.h (module 'internet'): uint32_t ns3::Ipv6InterfaceAddress::GetNsDadUid() const [member function] cls.add_method('GetNsDadUid', 'uint32_t', [], is_const=True) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6Prefix ns3::Ipv6InterfaceAddress::GetPrefix() const [member function] cls.add_method('GetPrefix', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Scope_e ns3::Ipv6InterfaceAddress::GetScope() const [member function] cls.add_method('GetScope', 'ns3::Ipv6InterfaceAddress::Scope_e', [], is_const=True) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::State_e ns3::Ipv6InterfaceAddress::GetState() const [member function] cls.add_method('GetState', 'ns3::Ipv6InterfaceAddress::State_e', [], is_const=True) ## ipv6-interface-address.h (module 'internet'): void ns3::Ipv6InterfaceAddress::SetAddress(ns3::Ipv6Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Ipv6Address', 'address')]) ## ipv6-interface-address.h (module 'internet'): void ns3::Ipv6InterfaceAddress::SetNsDadUid(uint32_t uid) [member function] cls.add_method('SetNsDadUid', 'void', [param('uint32_t', 'uid')]) ## ipv6-interface-address.h (module 'internet'): void ns3::Ipv6InterfaceAddress::SetScope(ns3::Ipv6InterfaceAddress::Scope_e scope) [member function] cls.add_method('SetScope', 'void', [param('ns3::Ipv6InterfaceAddress::Scope_e', 'scope')]) ## ipv6-interface-address.h (module 'internet'): void ns3::Ipv6InterfaceAddress::SetState(ns3::Ipv6InterfaceAddress::State_e state) [member function] cls.add_method('SetState', 'void', [param('ns3::Ipv6InterfaceAddress::State_e', 'state')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3Mac48Address_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac48Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv4Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv6Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function] cls.add_method('GetMulticast6Prefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function] cls.add_method('GetMulticastPrefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function] cls.add_method('IsGroup', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls
codeparrot/github-code-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import time from openerp import tools from openerp.osv import fields, osv from openerp.tools.translate import _ import openerp.addons.decimal_precision as dp import openerp.addons.product.product _logger = logging.getLogger(__name__) class pos_config(osv.osv): _name = 'pos.config' POS_CONFIG_STATE = [ ('active', 'Active'), ('inactive', 'Inactive'), ('deprecated', 'Deprecated') ] def _get_currency(self, cr, uid, ids, fieldnames, args, context=None): result = dict.fromkeys(ids, False) for pos_config in self.browse(cr, uid, ids, context=context): if pos_config.journal_id: currency_id = pos_config.journal_id.currency.id or pos_config.journal_id.company_id.currency_id.id else: currency_id = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.currency_id.id result[pos_config.id] = currency_id return result _columns = { 'name' : fields.char('Point of Sale Name', select=1, required=True, help="An internal identification of the point of sale"), 'journal_ids' : fields.many2many('account.journal', 'pos_config_journal_rel', 'pos_config_id', 'journal_id', 'Available Payment Methods', domain="[('journal_user', '=', True ), ('type', 'in', ['bank', 'cash'])]",), 'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'), 'stock_location_id': fields.many2one('stock.location', 'Stock Location', domain=[('usage', '=', 'internal')], required=True), 'journal_id' : fields.many2one('account.journal', 'Sale Journal', domain=[('type', '=', 'sale')], help="Accounting journal used to post sales entries."), 'currency_id' : fields.function(_get_currency, type="many2one", string="Currency", relation="res.currency"), 'iface_self_checkout' : fields.boolean('Self Checkout Mode', # FIXME : this field is obsolete help="Check this if this point of sale should open by default in a self checkout mode. If unchecked, Odoo uses the normal cashier mode by default."), 'iface_cashdrawer' : fields.boolean('Cashdrawer', help="Automatically open the cashdrawer"), 'iface_payment_terminal' : fields.boolean('Payment Terminal', help="Enables Payment Terminal integration"), 'iface_electronic_scale' : fields.boolean('Electronic Scale', help="Enables Electronic Scale integration"), 'iface_vkeyboard' : fields.boolean('Virtual KeyBoard', help="Enables an integrated Virtual Keyboard"), 'iface_print_via_proxy' : fields.boolean('Print via Proxy', help="Bypass browser printing and prints via the hardware proxy"), 'iface_scan_via_proxy' : fields.boolean('Scan via Proxy', help="Enable barcode scanning with a remotely connected barcode scanner"), 'iface_invoicing': fields.boolean('Invoicing',help='Enables invoice generation from the Point of Sale'), 'iface_big_scrollbars': fields.boolean('Large Scrollbars',help='For imprecise industrial touchscreens'), 'receipt_header': fields.text('Receipt Header',help="A short text that will be inserted as a header in the printed receipt"), 'receipt_footer': fields.text('Receipt Footer',help="A short text that will be inserted as a footer in the printed receipt"), 'proxy_ip': fields.char('IP Address', help='The hostname or ip address of the hardware proxy, Will be autodetected if left empty', size=45), 'state' : fields.selection(POS_CONFIG_STATE, 'Status', required=True, readonly=True, copy=False), 'sequence_id' : fields.many2one('ir.sequence', 'Order IDs Sequence', readonly=True, help="This sequence is automatically created by Odoo but you can change it "\ "to customize the reference numbers of your orders.", copy=False), 'session_ids': fields.one2many('pos.session', 'config_id', 'Sessions'), 'group_by' : fields.boolean('Group Journal Items', help="Check this if you want to group the Journal Items by Product while closing a Session"), 'pricelist_id': fields.many2one('product.pricelist','Pricelist', required=True), 'company_id': fields.many2one('res.company', 'Company', required=True), 'barcode_product': fields.char('Product Barcodes', size=64, help='The pattern that identifies product barcodes'), 'barcode_cashier': fields.char('Cashier Barcodes', size=64, help='The pattern that identifies cashier login barcodes'), 'barcode_customer': fields.char('Customer Barcodes',size=64, help='The pattern that identifies customer\'s client card barcodes'), 'barcode_price': fields.char('Price Barcodes', size=64, help='The pattern that identifies a product with a barcode encoded price'), 'barcode_weight': fields.char('Weight Barcodes', size=64, help='The pattern that identifies a product with a barcode encoded weight'), 'barcode_discount': fields.char('Discount Barcodes', size=64, help='The pattern that identifies a product with a barcode encoded discount'), } def _check_cash_control(self, cr, uid, ids, context=None): return all( (sum(int(journal.cash_control) for journal in record.journal_ids) <= 1) for record in self.browse(cr, uid, ids, context=context) ) def _check_company_location(self, cr, uid, ids, context=None): for config in self.browse(cr, uid, ids, context=context): if config.stock_location_id.company_id and config.stock_location_id.company_id.id != config.company_id.id: return False return True def _check_company_journal(self, cr, uid, ids, context=None): for config in self.browse(cr, uid, ids, context=context): if config.journal_id and config.journal_id.company_id.id != config.company_id.id: return False return True def _check_company_payment(self, cr, uid, ids, context=None): for config in self.browse(cr, uid, ids, context=context): journal_ids = [j.id for j in config.journal_ids] if self.pool['account.journal'].search(cr, uid, [ ('id', 'in', journal_ids), ('company_id', '!=', config.company_id.id) ], count=True, context=context): return False return True _constraints = [ (_check_cash_control, "You cannot have two cash controls in one Point Of Sale !", ['journal_ids']), (_check_company_location, "The company of the stock location is different than the one of point of sale", ['company_id', 'stock_location_id']), (_check_company_journal, "The company of the sale journal is different than the one of point of sale", ['company_id', 'journal_id']), (_check_company_payment, "The company of a payment method is different than the one of point of sale", ['company_id', 'journal_ids']), ] def name_get(self, cr, uid, ids, context=None): result = [] states = { 'opening_control': _('Opening Control'), 'opened': _('In Progress'), 'closing_control': _('Closing Control'), 'closed': _('Closed & Posted'), } for record in self.browse(cr, uid, ids, context=context): if (not record.session_ids) or (record.session_ids[0].state=='closed'): result.append((record.id, record.name+' ('+_('not used')+')')) continue session = record.session_ids[0] result.append((record.id, record.name + ' ('+session.user_id.name+')')) #, '+states[session.state]+')')) return result def _default_sale_journal(self, cr, uid, context=None): company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id res = self.pool.get('account.journal').search(cr, uid, [('type', '=', 'sale'), ('company_id', '=', company_id)], limit=1, context=context) return res and res[0] or False def _default_pricelist(self, cr, uid, context=None): res = self.pool.get('product.pricelist').search(cr, uid, [('type', '=', 'sale')], limit=1, context=context) return res and res[0] or False def _get_default_location(self, cr, uid, context=None): wh_obj = self.pool.get('stock.warehouse') user = self.pool.get('res.users').browse(cr, uid, uid, context) res = wh_obj.search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context) if res and res[0]: return wh_obj.browse(cr, uid, res[0], context=context).lot_stock_id.id return False def _get_default_company(self, cr, uid, context=None): company_id = self.pool.get('res.users')._get_company(cr, uid, context=context) return company_id _defaults = { 'state' : POS_CONFIG_STATE[0][0], 'journal_id': _default_sale_journal, 'group_by' : True, 'pricelist_id': _default_pricelist, 'iface_invoicing': True, 'stock_location_id': _get_default_location, 'company_id': _get_default_company, 'barcode_product': '*', 'barcode_cashier': '041*', 'barcode_customer':'042*', 'barcode_weight': '21xxxxxNNDDD', 'barcode_discount':'22xxxxxxxxNN', 'barcode_price': '23xxxxxNNNDD', } def onchange_picking_type_id(self, cr, uid, ids, picking_type_id, context=None): p_type_obj = self.pool.get("stock.picking.type") p_type = p_type_obj.browse(cr, uid, picking_type_id, context=context) if p_type.default_location_src_id and p_type.default_location_src_id.usage == 'internal' and p_type.default_location_dest_id and p_type.default_location_dest_id.usage == 'customer': return {'value': {'stock_location_id': p_type.default_location_src_id.id}} return False def set_active(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state' : 'active'}, context=context) def set_inactive(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state' : 'inactive'}, context=context) def set_deprecate(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state' : 'deprecated'}, context=context) def create(self, cr, uid, values, context=None): ir_sequence = self.pool.get('ir.sequence') # force sequence_id field to new pos.order sequence values['sequence_id'] = ir_sequence.create(cr, uid, { 'name': 'POS Order %s' % values['name'], 'padding': 4, 'prefix': "%s/" % values['name'], 'code': "pos.order", 'company_id': values.get('company_id', False), }, context=context) # TODO master: add field sequence_line_id on model # this make sure we always have one available per company ir_sequence.create(cr, uid, { 'name': 'POS order line %s' % values['name'], 'padding': 4, 'prefix': "%s/" % values['name'], 'code': "pos.order.line", 'company_id': values.get('company_id', False), }, context=context) return super(pos_config, self).create(cr, uid, values, context=context) def unlink(self, cr, uid, ids, context=None): for obj in self.browse(cr, uid, ids, context=context): if obj.sequence_id: obj.sequence_id.unlink() return super(pos_config, self).unlink(cr, uid, ids, context=context) class pos_session(osv.osv): _name = 'pos.session' _order = 'id desc' POS_SESSION_STATE = [ ('opening_control', 'Opening Control'), # Signal open ('opened', 'In Progress'), # Signal closing ('closing_control', 'Closing Control'), # Signal close ('closed', 'Closed & Posted'), ] def _compute_cash_all(self, cr, uid, ids, fieldnames, args, context=None): result = dict() for record in self.browse(cr, uid, ids, context=context): result[record.id] = { 'cash_journal_id' : False, 'cash_register_id' : False, 'cash_control' : False, } for st in record.statement_ids: if st.journal_id.cash_control == True: result[record.id]['cash_control'] = True result[record.id]['cash_journal_id'] = st.journal_id.id result[record.id]['cash_register_id'] = st.id return result _columns = { 'config_id' : fields.many2one('pos.config', 'Point of Sale', help="The physical point of sale you will use.", required=True, select=1, domain="[('state', '=', 'active')]", ), 'name' : fields.char('Session ID', required=True, readonly=True), 'user_id' : fields.many2one('res.users', 'Responsible', required=True, select=1, readonly=True, states={'opening_control' : [('readonly', False)]} ), 'currency_id' : fields.related('config_id', 'currency_id', type="many2one", relation='res.currency', string="Currnecy"), 'start_at' : fields.datetime('Opening Date', readonly=True), 'stop_at' : fields.datetime('Closing Date', readonly=True), 'state' : fields.selection(POS_SESSION_STATE, 'Status', required=True, readonly=True, select=1, copy=False), 'sequence_number': fields.integer('Order Sequence Number', help='A sequence number that is incremented with each order'), 'login_number': fields.integer('Login Sequence Number', help='A sequence number that is incremented each time a user resumes the pos session'), 'cash_control' : fields.function(_compute_cash_all, multi='cash', type='boolean', string='Has Cash Control'), 'cash_journal_id' : fields.function(_compute_cash_all, multi='cash', type='many2one', relation='account.journal', string='Cash Journal', store=True), 'cash_register_id' : fields.function(_compute_cash_all, multi='cash', type='many2one', relation='account.bank.statement', string='Cash Register', store=True), 'opening_details_ids' : fields.related('cash_register_id', 'opening_details_ids', type='one2many', relation='account.cashbox.line', string='Opening Cash Control'), 'details_ids' : fields.related('cash_register_id', 'details_ids', type='one2many', relation='account.cashbox.line', string='Cash Control'), 'cash_register_balance_end_real' : fields.related('cash_register_id', 'balance_end_real', type='float', digits_compute=dp.get_precision('Account'), string="Ending Balance", help="Total of closing cash control lines.", readonly=True), 'cash_register_balance_start' : fields.related('cash_register_id', 'balance_start', type='float', digits_compute=dp.get_precision('Account'), string="Starting Balance", help="Total of opening cash control lines.", readonly=True), 'cash_register_total_entry_encoding' : fields.related('cash_register_id', 'total_entry_encoding', string='Total Cash Transaction', readonly=True, help="Total of all paid sale orders"), 'cash_register_balance_end' : fields.related('cash_register_id', 'balance_end', type='float', digits_compute=dp.get_precision('Account'), string="Theoretical Closing Balance", help="Sum of opening balance and transactions.", readonly=True), 'cash_register_difference' : fields.related('cash_register_id', 'difference', type='float', string='Difference', help="Difference between the theoretical closing balance and the real closing balance.", readonly=True), 'journal_ids' : fields.related('config_id', 'journal_ids', type='many2many', readonly=True, relation='account.journal', string='Available Payment Methods'), 'order_ids' : fields.one2many('pos.order', 'session_id', 'Orders'), 'statement_ids' : fields.one2many('account.bank.statement', 'pos_session_id', 'Bank Statement', readonly=True), } _defaults = { 'name' : '/', 'user_id' : lambda obj, cr, uid, context: uid, 'state' : 'opening_control', 'sequence_number': 1, 'login_number': 0, } _sql_constraints = [ ('uniq_name', 'unique(name)', "The name of this POS Session must be unique !"), ] def _check_unicity(self, cr, uid, ids, context=None): for session in self.browse(cr, uid, ids, context=None): # open if there is no session in 'opening_control', 'opened', 'closing_control' for one user domain = [ ('state', 'not in', ('closed','closing_control')), ('user_id', '=', session.user_id.id) ] count = self.search_count(cr, uid, domain, context=context) if count>1: return False return True def _check_pos_config(self, cr, uid, ids, context=None): for session in self.browse(cr, uid, ids, context=None): domain = [ ('state', '!=', 'closed'), ('config_id', '=', session.config_id.id) ] count = self.search_count(cr, uid, domain, context=context) if count>1: return False return True _constraints = [ (_check_unicity, "You cannot create two active sessions with the same responsible!", ['user_id', 'state']), (_check_pos_config, "You cannot create two active sessions related to the same point of sale!", ['config_id']), ] def create(self, cr, uid, values, context=None): context = dict(context or {}) config_id = values.get('config_id', False) or context.get('default_config_id', False) if not config_id: raise osv.except_osv( _('Error!'), _("You should assign a Point of Sale to your session.")) # journal_id is not required on the pos_config because it does not # exists at the installation. If nothing is configured at the # installation we do the minimal configuration. Impossible to do in # the .xml files as the CoA is not yet installed. jobj = self.pool.get('pos.config') pos_config = jobj.browse(cr, uid, config_id, context=context) context.update({'company_id': pos_config.company_id.id}) if not pos_config.journal_id: jid = jobj.default_get(cr, uid, ['journal_id'], context=context)['journal_id'] if jid: jobj.write(cr, openerp.SUPERUSER_ID, [pos_config.id], {'journal_id': jid}, context=context) else: raise osv.except_osv( _('error!'), _("Unable to open the session. You have to assign a sale journal to your point of sale.")) # define some cash journal if no payment method exists if not pos_config.journal_ids: journal_proxy = self.pool.get('account.journal') cashids = journal_proxy.search(cr, uid, [('journal_user', '=', True), ('type','=','cash')], context=context) if not cashids: cashids = journal_proxy.search(cr, uid, [('type', '=', 'cash')], context=context) if not cashids: cashids = journal_proxy.search(cr, uid, [('journal_user','=',True)], context=context) journal_proxy.write(cr, openerp.SUPERUSER_ID, cashids, {'journal_user': True}) jobj.write(cr, openerp.SUPERUSER_ID, [pos_config.id], {'journal_ids': [(6,0, cashids)]}) pos_config = jobj.browse(cr, uid, config_id, context=context) bank_statement_ids = [] for journal in pos_config.journal_ids: bank_values = { 'journal_id' : journal.id, 'user_id' : uid, 'company_id' : pos_config.company_id.id } statement_id = self.pool.get('account.bank.statement').create(cr, uid, bank_values, context=context) bank_statement_ids.append(statement_id) values.update({ 'name': self.pool['ir.sequence'].get(cr, uid, 'pos.session'), 'statement_ids' : [(6, 0, bank_statement_ids)], 'config_id': config_id }) return super(pos_session, self).create(cr, uid, values, context=context) def unlink(self, cr, uid, ids, context=None): for obj in self.browse(cr, uid, ids, context=context): for statement in obj.statement_ids: statement.unlink(context=context) return super(pos_session, self).unlink(cr, uid, ids, context=context) def open_cb(self, cr, uid, ids, context=None): """ call the Point Of Sale interface and set the pos.session to 'opened' (in progress) """ if context is None: context = dict() if isinstance(ids, (int, long)): ids = [ids] this_record = self.browse(cr, uid, ids[0], context=context) this_record.signal_workflow('open') context.update(active_id=this_record.id) return { 'type' : 'ir.actions.act_url', 'url' : '/pos/web/', 'target': 'self', } def login(self, cr, uid, ids, context=None): this_record = self.browse(cr, uid, ids[0], context=context) this_record.write({ 'login_number': this_record.login_number+1, }) def wkf_action_open(self, cr, uid, ids, context=None): # second browse because we need to refetch the data from the DB for cash_register_id for record in self.browse(cr, uid, ids, context=context): values = {} if not record.start_at: values['start_at'] = time.strftime('%Y-%m-%d %H:%M:%S') values['state'] = 'opened' record.write(values) for st in record.statement_ids: st.button_open() return self.open_frontend_cb(cr, uid, ids, context=context) def wkf_action_opening_control(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state' : 'opening_control'}, context=context) def wkf_action_closing_control(self, cr, uid, ids, context=None): for session in self.browse(cr, uid, ids, context=context): for statement in session.statement_ids: if (statement != session.cash_register_id) and (statement.balance_end != statement.balance_end_real): self.pool.get('account.bank.statement').write(cr, uid, [statement.id], {'balance_end_real': statement.balance_end}) return self.write(cr, uid, ids, {'state' : 'closing_control', 'stop_at' : time.strftime('%Y-%m-%d %H:%M:%S')}, context=context) def wkf_action_close(self, cr, uid, ids, context=None): # Close CashBox for record in self.browse(cr, uid, ids, context=context): for st in record.statement_ids: if abs(st.difference) > st.journal_id.amount_authorized_diff: # The pos manager can close statements with maximums. if not self.pool.get('ir.model.access').check_groups(cr, uid, "point_of_sale.group_pos_manager"): raise osv.except_osv( _('Error!'), _("Your ending balance is too different from the theoretical cash closing (%.2f), the maximum allowed is: %.2f. You can contact your manager to force it.") % (st.difference, st.journal_id.amount_authorized_diff)) if (st.journal_id.type not in ['bank', 'cash']): raise osv.except_osv(_('Error!'), _("The type of the journal for your payment method should be bank or cash ")) getattr(st, 'button_confirm_%s' % st.journal_id.type)(context=context) self._confirm_orders(cr, uid, ids, context=context) self.write(cr, uid, ids, {'state' : 'closed'}, context=context) obj = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'point_of_sale', 'menu_point_root')[1] return { 'type' : 'ir.actions.client', 'name' : 'Point of Sale Menu', 'tag' : 'reload', 'params' : {'menu_id': obj}, } def _confirm_orders(self, cr, uid, ids, context=None): account_move_obj = self.pool.get('account.move') pos_order_obj = self.pool.get('pos.order') for session in self.browse(cr, uid, ids, context=context): local_context = dict(context or {}, force_company=session.config_id.journal_id.company_id.id) order_ids = [order.id for order in session.order_ids if order.state == 'paid'] move_id = account_move_obj.create(cr, uid, {'ref' : session.name, 'journal_id' : session.config_id.journal_id.id, }, context=local_context) pos_order_obj._create_account_move_line(cr, uid, order_ids, session, move_id, context=local_context) for order in session.order_ids: if order.state == 'done': continue if order.state not in ('paid', 'invoiced'): raise osv.except_osv( _('Error!'), _("You cannot confirm all orders of this session, because they have not the 'paid' status")) else: pos_order_obj.signal_workflow(cr, uid, [order.id], 'done') return True def open_frontend_cb(self, cr, uid, ids, context=None): if not context: context = {} if not ids: return {} for session in self.browse(cr, uid, ids, context=context): if session.user_id.id != uid: raise osv.except_osv( _('Error!'), _("You cannot use the session of another users. This session is owned by %s. Please first close this one to use this point of sale." % session.user_id.name)) context.update({'active_id': ids[0]}) return { 'type' : 'ir.actions.act_url', 'target': 'self', 'url': '/pos/web/', } class pos_order(osv.osv): _name = "pos.order" _description = "Point of Sale" _order = "id desc" def _order_fields(self, cr, uid, ui_order, context=None): return { 'name': ui_order['name'], 'user_id': ui_order['user_id'] or False, 'session_id': ui_order['pos_session_id'], 'lines': ui_order['lines'], 'pos_reference':ui_order['name'], 'partner_id': ui_order['partner_id'] or False, } def _payment_fields(self, cr, uid, ui_paymentline, context=None): return { 'amount': ui_paymentline['amount'] or 0.0, 'payment_date': ui_paymentline['name'], 'statement_id': ui_paymentline['statement_id'], 'payment_name': ui_paymentline.get('note',False), 'journal': ui_paymentline['journal_id'], } def _process_order(self, cr, uid, order, context=None): order_id = self.create(cr, uid, self._order_fields(cr, uid, order, context=context),context) for payments in order['statement_ids']: self.add_payment(cr, uid, order_id, self._payment_fields(cr, uid, payments[2], context=context), context=context) session = self.pool.get('pos.session').browse(cr, uid, order['pos_session_id'], context=context) if session.sequence_number <= order['sequence_number']: session.write({'sequence_number': order['sequence_number'] + 1}) session.refresh() if order['amount_return']: cash_journal = session.cash_journal_id if not cash_journal: cash_journal_ids = filter(lambda st: st.journal_id.type=='cash', session.statement_ids) if not len(cash_journal_ids): raise osv.except_osv( _('error!'), _("No cash statement found for this session. Unable to record returned cash.")) cash_journal = cash_journal_ids[0].journal_id self.add_payment(cr, uid, order_id, { 'amount': -order['amount_return'], 'payment_date': time.strftime('%Y-%m-%d %H:%M:%S'), 'payment_name': _('return'), 'journal': cash_journal.id, }, context=context) return order_id def create_from_ui(self, cr, uid, orders, context=None): # Keep only new orders submitted_references = [o['data']['name'] for o in orders] existing_order_ids = self.search(cr, uid, [('pos_reference', 'in', submitted_references)], context=context) existing_orders = self.read(cr, uid, existing_order_ids, ['pos_reference'], context=context) existing_references = set([o['pos_reference'] for o in existing_orders]) orders_to_save = [o for o in orders if o['data']['name'] not in existing_references] order_ids = [] for tmp_order in orders_to_save: to_invoice = tmp_order['to_invoice'] order = tmp_order['data'] order_id = self._process_order(cr, uid, order, context=context) order_ids.append(order_id) try: self.signal_workflow(cr, uid, [order_id], 'paid') except Exception as e: _logger.error('Could not fully process the POS Order: %s', tools.ustr(e)) if to_invoice: self.action_invoice(cr, uid, [order_id], context) order_obj = self.browse(cr, uid, order_id, context) self.pool['account.invoice'].signal_workflow(cr, uid, [order_obj.invoice_id.id], 'invoice_open') return order_ids def write(self, cr, uid, ids, vals, context=None): res = super(pos_order, self).write(cr, uid, ids, vals, context=context) #If you change the partner of the PoS order, change also the partner of the associated bank statement lines partner_obj = self.pool.get('res.partner') bsl_obj = self.pool.get("account.bank.statement.line") if 'partner_id' in vals: for posorder in self.browse(cr, uid, ids, context=context): if posorder.invoice_id: raise osv.except_osv( _('Error!'), _("You cannot change the partner of a POS order for which an invoice has already been issued.")) if vals['partner_id']: p_id = partner_obj.browse(cr, uid, vals['partner_id'], context=context) part_id = partner_obj._find_accounting_partner(p_id).id else: part_id = False bsl_ids = [x.id for x in posorder.statement_ids] bsl_obj.write(cr, uid, bsl_ids, {'partner_id': part_id}, context=context) return res def unlink(self, cr, uid, ids, context=None): for rec in self.browse(cr, uid, ids, context=context): if rec.state not in ('draft','cancel'): raise osv.except_osv(_('Unable to Delete!'), _('In order to delete a sale, it must be new or cancelled.')) return super(pos_order, self).unlink(cr, uid, ids, context=context) def onchange_partner_id(self, cr, uid, ids, part=False, context=None): if not part: return {'value': {}} pricelist = self.pool.get('res.partner').browse(cr, uid, part, context=context).property_product_pricelist.id return {'value': {'pricelist_id': pricelist}} def _amount_all(self, cr, uid, ids, name, args, context=None): cur_obj = self.pool.get('res.currency') res = {} for order in self.browse(cr, uid, ids, context=context): res[order.id] = { 'amount_paid': 0.0, 'amount_return':0.0, 'amount_tax':0.0, } val1 = val2 = 0.0 cur = order.pricelist_id.currency_id for payment in order.statement_ids: res[order.id]['amount_paid'] += payment.amount res[order.id]['amount_return'] += (payment.amount < 0 and payment.amount or 0) for line in order.lines: val1 += line.price_subtotal_incl val2 += line.price_subtotal res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val1-val2) res[order.id]['amount_total'] = cur_obj.round(cr, uid, cur, val1) return res _columns = { 'name': fields.char('Order Ref', required=True, readonly=True, copy=False), 'company_id':fields.many2one('res.company', 'Company', required=True, readonly=True), 'date_order': fields.datetime('Order Date', readonly=True, select=True), 'user_id': fields.many2one('res.users', 'Salesman', help="Person who uses the the cash register. It can be a reliever, a student or an interim employee."), 'amount_tax': fields.function(_amount_all, string='Taxes', digits_compute=dp.get_precision('Account'), multi='all'), 'amount_total': fields.function(_amount_all, string='Total', digits_compute=dp.get_precision('Account'), multi='all'), 'amount_paid': fields.function(_amount_all, string='Paid', states={'draft': [('readonly', False)]}, readonly=True, digits_compute=dp.get_precision('Account'), multi='all'), 'amount_return': fields.function(_amount_all, 'Returned', digits_compute=dp.get_precision('Account'), multi='all'), 'lines': fields.one2many('pos.order.line', 'order_id', 'Order Lines', states={'draft': [('readonly', False)]}, readonly=True, copy=True), 'statement_ids': fields.one2many('account.bank.statement.line', 'pos_statement_id', 'Payments', states={'draft': [('readonly', False)]}, readonly=True), 'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', required=True, states={'draft': [('readonly', False)]}, readonly=True), 'partner_id': fields.many2one('res.partner', 'Customer', change_default=True, select=1, states={'draft': [('readonly', False)], 'paid': [('readonly', False)]}), 'sequence_number': fields.integer('Sequence Number', help='A session-unique sequence number for the order'), 'session_id' : fields.many2one('pos.session', 'Session', #required=True, select=1, domain="[('state', '=', 'opened')]", states={'draft' : [('readonly', False)]}, readonly=True), 'state': fields.selection([('draft', 'New'), ('cancel', 'Cancelled'), ('paid', 'Paid'), ('done', 'Posted'), ('invoiced', 'Invoiced')], 'Status', readonly=True, copy=False), 'invoice_id': fields.many2one('account.invoice', 'Invoice', copy=False), 'account_move': fields.many2one('account.move', 'Journal Entry', readonly=True, copy=False), 'picking_id': fields.many2one('stock.picking', 'Picking', readonly=True, copy=False), 'picking_type_id': fields.related('session_id', 'config_id', 'picking_type_id', string="Picking Type", type='many2one', relation='stock.picking.type'), 'location_id': fields.related('session_id', 'config_id', 'stock_location_id', string="Location", type='many2one', store=True, relation='stock.location'), 'note': fields.text('Internal Notes'), 'nb_print': fields.integer('Number of Print', readonly=True, copy=False), 'pos_reference': fields.char('Receipt Ref', readonly=True, copy=False), 'sale_journal': fields.related('session_id', 'config_id', 'journal_id', relation='account.journal', type='many2one', string='Sale Journal', store=True, readonly=True), } def _default_session(self, cr, uid, context=None): so = self.pool.get('pos.session') session_ids = so.search(cr, uid, [('state','=', 'opened'), ('user_id','=',uid)], context=context) return session_ids and session_ids[0] or False def _default_pricelist(self, cr, uid, context=None): session_ids = self._default_session(cr, uid, context) if session_ids: session_record = self.pool.get('pos.session').browse(cr, uid, session_ids, context=context) return session_record.config_id.pricelist_id and session_record.config_id.pricelist_id.id or False return False def _get_out_picking_type(self, cr, uid, context=None): return self.pool.get('ir.model.data').xmlid_to_res_id( cr, uid, 'point_of_sale.picking_type_posout', context=context) _defaults = { 'user_id': lambda self, cr, uid, context: uid, 'state': 'draft', 'name': '/', 'date_order': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'), 'nb_print': 0, 'sequence_number': 1, 'session_id': _default_session, 'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id, 'pricelist_id': _default_pricelist, } def create(self, cr, uid, values, context=None): if values.get('session_id'): # set name based on the sequence specified on the config session = self.pool['pos.session'].browse(cr, uid, values['session_id'], context=context) values['name'] = session.config_id.sequence_id._next() else: # fallback on any pos.order sequence values['name'] = self.pool.get('ir.sequence').get_id(cr, uid, 'pos.order', 'code', context=context) return super(pos_order, self).create(cr, uid, values, context=context) def test_paid(self, cr, uid, ids, context=None): """A Point of Sale is paid when the sum @return: True """ for order in self.browse(cr, uid, ids, context=context): if order.lines and not order.amount_total: return True if (not order.lines) or (not order.statement_ids) or \ (abs(order.amount_total-order.amount_paid) > 0.00001): return False return True def create_picking(self, cr, uid, ids, context=None): """Create a picking for each order and validate it.""" picking_obj = self.pool.get('stock.picking') partner_obj = self.pool.get('res.partner') move_obj = self.pool.get('stock.move') for order in self.browse(cr, uid, ids, context=context): addr = order.partner_id and partner_obj.address_get(cr, uid, [order.partner_id.id], ['delivery']) or {} picking_type = order.picking_type_id picking_id = False if picking_type: picking_id = picking_obj.create(cr, uid, { 'origin': order.name, 'partner_id': addr.get('delivery',False), 'picking_type_id': picking_type.id, 'company_id': order.company_id.id, 'move_type': 'direct', 'note': order.note or "", 'invoice_state': 'none', }, context=context) self.write(cr, uid, [order.id], {'picking_id': picking_id}, context=context) location_id = order.location_id.id if order.partner_id: destination_id = order.partner_id.property_stock_customer.id elif picking_type: if not picking_type.default_location_dest_id: raise osv.except_osv(_('Error!'), _('Missing source or destination location for picking type %s. Please configure those fields and try again.' % (picking_type.name,))) destination_id = picking_type.default_location_dest_id.id else: destination_id = partner_obj.default_get(cr, uid, ['property_stock_customer'], context=context)['property_stock_customer'] move_list = [] for line in order.lines: if line.product_id and line.product_id.type == 'service': continue move_list.append(move_obj.create(cr, uid, { 'name': line.name, 'product_uom': line.product_id.uom_id.id, 'product_uos': line.product_id.uom_id.id, 'picking_id': picking_id, 'picking_type_id': picking_type.id, 'product_id': line.product_id.id, 'product_uos_qty': abs(line.qty), 'product_uom_qty': abs(line.qty), 'state': 'draft', 'location_id': location_id if line.qty >= 0 else destination_id, 'location_dest_id': destination_id if line.qty >= 0 else location_id, }, context=context)) if picking_id: picking_obj.action_confirm(cr, uid, [picking_id], context=context) picking_obj.force_assign(cr, uid, [picking_id], context=context) picking_obj.action_done(cr, uid, [picking_id], context=context) elif move_list: move_obj.action_confirm(cr, uid, move_list, context=context) move_obj.force_assign(cr, uid, move_list, context=context) move_obj.action_done(cr, uid, move_list, context=context) return True def cancel_order(self, cr, uid, ids, context=None): """ Changes order state to cancel @return: True """ stock_picking_obj = self.pool.get('stock.picking') for order in self.browse(cr, uid, ids, context=context): stock_picking_obj.action_cancel(cr, uid, [order.picking_id.id]) if stock_picking_obj.browse(cr, uid, order.picking_id.id, context=context).state <> 'cancel': raise osv.except_osv(_('Error!'), _('Unable to cancel the picking.')) self.write(cr, uid, ids, {'state': 'cancel'}, context=context) return True def add_payment(self, cr, uid, order_id, data, context=None): """Create a new payment for the order""" context = dict(context or {}) statement_line_obj = self.pool.get('account.bank.statement.line') property_obj = self.pool.get('ir.property') order = self.browse(cr, uid, order_id, context=context) args = { 'amount': data['amount'], 'date': data.get('payment_date', time.strftime('%Y-%m-%d')), 'name': order.name + ': ' + (data.get('payment_name', '') or ''), 'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False, } journal_id = data.get('journal', False) statement_id = data.get('statement_id', False) assert journal_id or statement_id, "No statement_id or journal_id passed to the method!" journal = self.pool['account.journal'].browse(cr, uid, journal_id, context=context) # use the company of the journal and not of the current user company_cxt = dict(context, force_company=journal.company_id.id) account_def = property_obj.get(cr, uid, 'property_account_receivable', 'res.partner', context=company_cxt) args['account_id'] = (order.partner_id and order.partner_id.property_account_receivable \ and order.partner_id.property_account_receivable.id) or (account_def and account_def.id) or False if not args['account_id']: if not args['partner_id']: msg = _('There is no receivable account defined to make payment.') else: msg = _('There is no receivable account defined to make payment for the partner: "%s" (id:%d).') % (order.partner_id.name, order.partner_id.id,) raise osv.except_osv(_('Configuration Error!'), msg) context.pop('pos_session_id', False) for statement in order.session_id.statement_ids: if statement.id == statement_id: journal_id = statement.journal_id.id break elif statement.journal_id.id == journal_id: statement_id = statement.id break if not statement_id: raise osv.except_osv(_('Error!'), _('You have to open at least one cashbox.')) args.update({ 'statement_id': statement_id, 'pos_statement_id': order_id, 'journal_id': journal_id, 'ref': order.session_id.name, }) statement_line_obj.create(cr, uid, args, context=context) return statement_id def refund(self, cr, uid, ids, context=None): """Create a copy of order for refund order""" clone_list = [] line_obj = self.pool.get('pos.order.line') for order in self.browse(cr, uid, ids, context=context): current_session_ids = self.pool.get('pos.session').search(cr, uid, [ ('state', '!=', 'closed'), ('user_id', '=', uid)], context=context) if not current_session_ids: raise osv.except_osv(_('Error!'), _('To return product(s), you need to open a session that will be used to register the refund.')) clone_id = self.copy(cr, uid, order.id, { 'name': order.name + ' REFUND', # not used, name forced by create 'session_id': current_session_ids[0], 'date_order': time.strftime('%Y-%m-%d %H:%M:%S'), }, context=context) clone_list.append(clone_id) for clone in self.browse(cr, uid, clone_list, context=context): for order_line in clone.lines: line_obj.write(cr, uid, [order_line.id], { 'qty': -order_line.qty }, context=context) abs = { 'name': _('Return Products'), 'view_type': 'form', 'view_mode': 'form', 'res_model': 'pos.order', 'res_id':clone_list[0], 'view_id': False, 'context':context, 'type': 'ir.actions.act_window', 'nodestroy': True, 'target': 'current', } return abs def action_invoice_state(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state':'invoiced'}, context=context) def action_invoice(self, cr, uid, ids, context=None): inv_ref = self.pool.get('account.invoice') inv_line_ref = self.pool.get('account.invoice.line') product_obj = self.pool.get('product.product') inv_ids = [] for order in self.pool.get('pos.order').browse(cr, uid, ids, context=context): if order.invoice_id: inv_ids.append(order.invoice_id.id) continue if not order.partner_id: raise osv.except_osv(_('Error!'), _('Please provide a partner for the sale.')) acc = order.partner_id.property_account_receivable.id inv = { 'name': order.name, 'origin': order.name, 'account_id': acc, 'journal_id': order.sale_journal.id or None, 'type': 'out_invoice', 'reference': order.name, 'partner_id': order.partner_id.id, 'comment': order.note or '', 'currency_id': order.pricelist_id.currency_id.id, # considering partner's sale pricelist's currency } inv.update(inv_ref.onchange_partner_id(cr, uid, [], 'out_invoice', order.partner_id.id)['value']) if not inv.get('account_id', None): inv['account_id'] = acc inv_id = inv_ref.create(cr, uid, inv, context=context) self.write(cr, uid, [order.id], {'invoice_id': inv_id, 'state': 'invoiced'}, context=context) inv_ids.append(inv_id) for line in order.lines: inv_line = { 'invoice_id': inv_id, 'product_id': line.product_id.id, 'quantity': line.qty, } inv_name = product_obj.name_get(cr, uid, [line.product_id.id], context=context)[0][1] inv_line.update(inv_line_ref.product_id_change(cr, uid, [], line.product_id.id, line.product_id.uom_id.id, line.qty, partner_id = order.partner_id.id, fposition_id=order.partner_id.property_account_position.id)['value']) inv_line['price_unit'] = line.price_unit inv_line['discount'] = line.discount inv_line['name'] = inv_name inv_line['invoice_line_tax_id'] = [(6, 0, [x.id for x in line.product_id.taxes_id] )] inv_line_ref.create(cr, uid, inv_line, context=context) inv_ref.button_reset_taxes(cr, uid, [inv_id], context=context) self.signal_workflow(cr, uid, [order.id], 'invoice') inv_ref.signal_workflow(cr, uid, [inv_id], 'validate') if not inv_ids: return {} mod_obj = self.pool.get('ir.model.data') res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form') res_id = res and res[1] or False return { 'name': _('Customer Invoice'), 'view_type': 'form', 'view_mode': 'form', 'view_id': [res_id], 'res_model': 'account.invoice', 'context': "{'type':'out_invoice'}", 'type': 'ir.actions.act_window', 'nodestroy': True, 'target': 'current', 'res_id': inv_ids and inv_ids[0] or False, } def create_account_move(self, cr, uid, ids, context=None): return self._create_account_move_line(cr, uid, ids, None, None, context=context) def _prepare_analytic_account(self, cr, uid, line, context=None): '''This method is designed to be inherited in a custom module''' return False def _create_account_move_line(self, cr, uid, ids, session=None, move_id=None, context=None): # Tricky, via the workflow, we only have one id in the ids variable """Create a account move line of order grouped by products or not.""" account_move_obj = self.pool.get('account.move') account_period_obj = self.pool.get('account.period') account_tax_obj = self.pool.get('account.tax') property_obj = self.pool.get('ir.property') cur_obj = self.pool.get('res.currency') #session_ids = set(order.session_id for order in self.browse(cr, uid, ids, context=context)) if session and not all(session.id == order.session_id.id for order in self.browse(cr, uid, ids, context=context)): raise osv.except_osv(_('Error!'), _('Selected orders do not have the same session!')) grouped_data = {} have_to_group_by = session and session.config_id.group_by or False def compute_tax(amount, tax, line): if amount > 0: tax_code_id = tax['base_code_id'] tax_amount = line.price_subtotal * tax['base_sign'] else: tax_code_id = tax['ref_base_code_id'] tax_amount = line.price_subtotal * tax['ref_base_sign'] return (tax_code_id, tax_amount,) for order in self.browse(cr, uid, ids, context=context): if order.account_move: continue if order.state != 'paid': continue current_company = order.sale_journal.company_id group_tax = {} account_def = property_obj.get(cr, uid, 'property_account_receivable', 'res.partner', context=context) order_account = order.partner_id and \ order.partner_id.property_account_receivable and \ order.partner_id.property_account_receivable.id or \ account_def and account_def.id or current_company.account_receivable.id if move_id is None: # Create an entry for the sale move_id = account_move_obj.create(cr, uid, { 'ref' : order.name, 'journal_id': order.sale_journal.id, }, context=context) def insert_data(data_type, values): # if have_to_group_by: sale_journal_id = order.sale_journal.id period = account_period_obj.find(cr, uid, context=dict(context or {}, company_id=current_company.id))[0] # 'quantity': line.qty, # 'product_id': line.product_id.id, values.update({ 'date': order.date_order[:10], 'ref': order.name, 'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False, 'journal_id' : sale_journal_id, 'period_id' : period, 'move_id' : move_id, 'company_id': current_company.id, }) if data_type == 'product': key = ('product', values['partner_id'], values['product_id'], values['analytic_account_id'], values['debit'] > 0) elif data_type == 'tax': key = ('tax', values['partner_id'], values['tax_code_id'], values['debit'] > 0) elif data_type == 'counter_part': key = ('counter_part', values['partner_id'], values['account_id'], values['debit'] > 0) else: return grouped_data.setdefault(key, []) # if not have_to_group_by or (not grouped_data[key]): # grouped_data[key].append(values) # else: # pass if have_to_group_by: if not grouped_data[key]: grouped_data[key].append(values) else: current_value = grouped_data[key][0] current_value['quantity'] = current_value.get('quantity', 0.0) + values.get('quantity', 0.0) current_value['credit'] = current_value.get('credit', 0.0) + values.get('credit', 0.0) current_value['debit'] = current_value.get('debit', 0.0) + values.get('debit', 0.0) current_value['tax_amount'] = current_value.get('tax_amount', 0.0) + values.get('tax_amount', 0.0) else: grouped_data[key].append(values) #because of the weird way the pos order is written, we need to make sure there is at least one line, #because just after the 'for' loop there are references to 'line' and 'income_account' variables (that #are set inside the for loop) #TOFIX: a deep refactoring of this method (and class!) is needed in order to get rid of this stupid hack assert order.lines, _('The POS order must have lines when calling this method') # Create an move for each order line cur = order.pricelist_id.currency_id for line in order.lines: tax_amount = 0 taxes = [] for t in line.product_id.taxes_id: if t.company_id.id == current_company.id: taxes.append(t) computed_taxes = account_tax_obj.compute_all(cr, uid, taxes, line.price_unit * (100.0-line.discount) / 100.0, line.qty)['taxes'] for tax in computed_taxes: tax_amount += cur_obj.round(cr, uid, cur, tax['amount']) group_key = (tax['tax_code_id'], tax['base_code_id'], tax['account_collected_id'], tax['id']) group_tax.setdefault(group_key, 0) group_tax[group_key] += cur_obj.round(cr, uid, cur, tax['amount']) amount = line.price_subtotal # Search for the income account if line.product_id.property_account_income.id: income_account = line.product_id.property_account_income.id elif line.product_id.categ_id.property_account_income_categ.id: income_account = line.product_id.categ_id.property_account_income_categ.id else: raise osv.except_osv(_('Error!'), _('Please define income '\ 'account for this product: "%s" (id:%d).') \ % (line.product_id.name, line.product_id.id, )) # Empty the tax list as long as there is no tax code: tax_code_id = False tax_amount = 0 while computed_taxes: tax = computed_taxes.pop(0) tax_code_id, tax_amount = compute_tax(amount, tax, line) # If there is one we stop if tax_code_id: break # Create a move for the line insert_data('product', { 'name': line.product_id.name, 'quantity': line.qty, 'product_id': line.product_id.id, 'account_id': income_account, 'analytic_account_id': self._prepare_analytic_account(cr, uid, line, context=context), 'credit': ((amount>0) and amount) or 0.0, 'debit': ((amount<0) and -amount) or 0.0, 'tax_code_id': tax_code_id, 'tax_amount': tax_amount, 'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False }) # For each remaining tax with a code, whe create a move line for tax in computed_taxes: tax_code_id, tax_amount = compute_tax(amount, tax, line) if not tax_code_id: continue insert_data('tax', { 'name': _('Tax'), 'product_id':line.product_id.id, 'quantity': line.qty, 'account_id': income_account, 'credit': 0.0, 'debit': 0.0, 'tax_code_id': tax_code_id, 'tax_amount': tax_amount, 'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False }) # Create a move for each tax group (tax_code_pos, base_code_pos, account_pos, tax_id)= (0, 1, 2, 3) for key, tax_amount in group_tax.items(): tax = self.pool.get('account.tax').browse(cr, uid, key[tax_id], context=context) insert_data('tax', { 'name': _('Tax') + ' ' + tax.name, 'quantity': line.qty, 'product_id': line.product_id.id, 'account_id': key[account_pos] or income_account, 'credit': ((tax_amount>0) and tax_amount) or 0.0, 'debit': ((tax_amount<0) and -tax_amount) or 0.0, 'tax_code_id': key[tax_code_pos], 'tax_amount': tax_amount, 'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False }) # counterpart insert_data('counter_part', { 'name': _("Trade Receivables"), #order.name, 'account_id': order_account, 'credit': ((order.amount_total < 0) and -order.amount_total) or 0.0, 'debit': ((order.amount_total > 0) and order.amount_total) or 0.0, 'partner_id': order.partner_id and self.pool.get("res.partner")._find_accounting_partner(order.partner_id).id or False }) order.write({'state':'done', 'account_move': move_id}) all_lines = [] for group_key, group_data in grouped_data.iteritems(): for value in group_data: all_lines.append((0, 0, value),) if move_id: #In case no order was changed self.pool.get("account.move").write(cr, uid, [move_id], {'line_id':all_lines}, context=context) return True def action_payment(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'payment'}, context=context) def action_paid(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'state': 'paid'}, context=context) self.create_picking(cr, uid, ids, context=context) return True def action_cancel(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'state': 'cancel'}, context=context) return True def action_done(self, cr, uid, ids, context=None): self.create_account_move(cr, uid, ids, context=context) return True class account_bank_statement(osv.osv): _inherit = 'account.bank.statement' _columns= { 'user_id': fields.many2one('res.users', 'User', readonly=True), } _defaults = { 'user_id': lambda self,cr,uid,c={}: uid } class account_bank_statement_line(osv.osv): _inherit = 'account.bank.statement.line' _columns= { 'pos_statement_id': fields.many2one('pos.order', ondelete='cascade'), } class pos_order_line(osv.osv): _name = "pos.order.line" _description = "Lines of Point of Sale" _rec_name = "product_id" def _amount_line_all(self, cr, uid, ids, field_names, arg, context=None): res = dict([(i, {}) for i in ids]) account_tax_obj = self.pool.get('account.tax') cur_obj = self.pool.get('res.currency') for line in self.browse(cr, uid, ids, context=context): taxes_ids = [ tax for tax in line.product_id.taxes_id if tax.company_id.id == line.order_id.company_id.id ] price = line.price_unit * (1 - (line.discount or 0.0) / 100.0) taxes = account_tax_obj.compute_all(cr, uid, taxes_ids, price, line.qty, product=line.product_id, partner=line.order_id.partner_id or False) cur = line.order_id.pricelist_id.currency_id res[line.id]['price_subtotal'] = cur_obj.round(cr, uid, cur, taxes['total']) res[line.id]['price_subtotal_incl'] = cur_obj.round(cr, uid, cur, taxes['total_included']) return res def onchange_product_id(self, cr, uid, ids, pricelist, product_id, qty=0, partner_id=False, context=None): context = context or {} if not product_id: return {} if not pricelist: raise osv.except_osv(_('No Pricelist!'), _('You have to select a pricelist in the sale form !\n' \ 'Please set one before choosing a product.')) price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist], product_id, qty or 1.0, partner_id)[pricelist] result = self.onchange_qty(cr, uid, ids, product_id, 0.0, qty, price, context=context) result['value']['price_unit'] = price return result def onchange_qty(self, cr, uid, ids, product, discount, qty, price_unit, context=None): result = {} if not product: return result account_tax_obj = self.pool.get('account.tax') cur_obj = self.pool.get('res.currency') prod = self.pool.get('product.product').browse(cr, uid, product, context=context) price = price_unit * (1 - (discount or 0.0) / 100.0) taxes = account_tax_obj.compute_all(cr, uid, prod.taxes_id, price, qty, product=prod, partner=False) result['price_subtotal'] = taxes['total'] result['price_subtotal_incl'] = taxes['total_included'] return {'value': result} _columns = { 'company_id': fields.many2one('res.company', 'Company', required=True), 'name': fields.char('Line No', required=True, copy=False), 'notice': fields.char('Discount Notice'), 'product_id': fields.many2one('product.product', 'Product', domain=[('sale_ok', '=', True)], required=True, change_default=True), 'price_unit': fields.float(string='Unit Price', digits_compute=dp.get_precision('Account')), 'qty': fields.float('Quantity', digits_compute=dp.get_precision('Product UoS')), 'price_subtotal': fields.function(_amount_line_all, multi='pos_order_line_amount', digits_compute=dp.get_precision('Account'), string='Subtotal w/o Tax', store=True), 'price_subtotal_incl': fields.function(_amount_line_all, multi='pos_order_line_amount', digits_compute=dp.get_precision('Account'), string='Subtotal', store=True), 'discount': fields.float('Discount (%)', digits_compute=dp.get_precision('Account')), 'order_id': fields.many2one('pos.order', 'Order Ref', ondelete='cascade'), 'create_date': fields.datetime('Creation Date', readonly=True), } _defaults = { 'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'pos.order.line'), 'qty': lambda *a: 1, 'discount': lambda *a: 0.0, 'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id, } class ean_wizard(osv.osv_memory): _name = 'pos.ean_wizard' _columns = { 'ean13_pattern': fields.char('Reference', size=13, required=True, translate=True), } def sanitize_ean13(self, cr, uid, ids, context): for r in self.browse(cr,uid,ids): ean13 = openerp.addons.product.product.sanitize_ean13(r.ean13_pattern) m = context.get('active_model') m_id = context.get('active_id') self.pool[m].write(cr,uid,[m_id],{'ean13':ean13}) return { 'type' : 'ir.actions.act_window_close' } class pos_category(osv.osv): _name = "pos.category" _description = "Public Category" _order = "sequence, name" _constraints = [ (osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id']) ] def name_get(self, cr, uid, ids, context=None): res = [] for cat in self.browse(cr, uid, ids, context=context): names = [cat.name] pcat = cat.parent_id while pcat: names.append(pcat.name) pcat = pcat.parent_id res.append((cat.id, ' / '.join(reversed(names)))) return res def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None): res = self.name_get(cr, uid, ids, context=context) return dict(res) def _get_image(self, cr, uid, ids, name, args, context=None): result = dict.fromkeys(ids, False) for obj in self.browse(cr, uid, ids, context=context): result[obj.id] = tools.image_get_resized_images(obj.image) return result def _set_image(self, cr, uid, id, name, value, args, context=None): return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context) _columns = { 'name': fields.char('Name', required=True, translate=True), 'complete_name': fields.function(_name_get_fnc, type="char", string='Name'), 'parent_id': fields.many2one('pos.category','Parent Category', select=True), 'child_id': fields.one2many('pos.category', 'parent_id', string='Children Categories'), 'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of product categories."), # NOTE: there is no 'default image', because by default we don't show thumbnails for categories. However if we have a thumbnail # for at least one category, then we display a default image on the other, so that the buttons have consistent styling. # In this case, the default image is set by the js code. # NOTE2: image: all image fields are base64 encoded and PIL-supported 'image': fields.binary("Image", help="This field holds the image used as image for the cateogry, limited to 1024x1024px."), 'image_medium': fields.function(_get_image, fnct_inv=_set_image, string="Medium-sized image", type="binary", multi="_get_image", store={ 'pos.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10), }, help="Medium-sized image of the category. It is automatically "\ "resized as a 128x128px image, with aspect ratio preserved. "\ "Use this field in form views or some kanban views."), 'image_small': fields.function(_get_image, fnct_inv=_set_image, string="Smal-sized image", type="binary", multi="_get_image", store={ 'pos.category': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10), }, help="Small-sized image of the category. It is automatically "\ "resized as a 64x64px image, with aspect ratio preserved. "\ "Use this field anywhere a small image is required."), } class product_template(osv.osv): _inherit = 'product.template' _columns = { 'income_pdt': fields.boolean('Point of Sale Cash In', help="Check if, this is a product you can use to put cash into a statement for the point of sale backend."), 'expense_pdt': fields.boolean('Point of Sale Cash Out', help="Check if, this is a product you can use to take cash from a statement for the point of sale backend, example: money lost, transfer to bank, etc."), 'available_in_pos': fields.boolean('Available in the Point of Sale', help='Check if you want this product to appear in the Point of Sale'), 'to_weight' : fields.boolean('To Weigh With Scale', help="Check if the product should be weighted using the hardware scale integration"), 'pos_categ_id': fields.many2one('pos.category','Point of Sale Category', help="Those categories are used to group similar products for point of sale."), } _defaults = { 'to_weight' : False, 'available_in_pos': True, } def unlink(self, cr, uid, ids, context=None): product_ctx = dict(context or {}, active_test=False) if self.search_count(cr, uid, [('id', 'in', ids), ('available_in_pos', '=', True)], context=product_ctx): if self.pool['pos.session'].search_count(cr, uid, [('state', '!=', 'closed')], context=context): raise osv.except_osv(_('Error!'), _('You cannot delete a product saleable in point of sale while a session is still opened.')) return super(product_template, self).unlink(cr, uid, ids, context=context) class res_partner(osv.osv): _inherit = 'res.partner' def create_from_ui(self, cr, uid, partner, context=None): """ create or modify a partner from the point of sale ui. partner contains the partner's fields. """ #image is a dataurl, get the data after the comma if partner.get('image',False): img = partner['image'].split(',')[1] partner['image'] = img if partner.get('id',False): # Modifying existing partner partner_id = partner['id'] del partner['id'] self.write(cr, uid, [partner_id], partner, context=context) else: partner_id = self.create(cr, uid, partner, context=context) return partner_id # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
codeparrot/github-code-clean
import datetime import pickle from io import StringIO from operator import attrgetter from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType from django.core import management from django.db import DEFAULT_DB_ALIAS, connections, router, transaction from django.db.models import signals from django.db.utils import ConnectionRouter from django.test import SimpleTestCase, TestCase, override_settings from .models import Book, Person, Pet, Review, UserProfile from .routers import AuthRouter, TestRouter, WriteRouter class QueryTestCase(TestCase): multi_db = True def test_db_selection(self): "Querysets will use the default database by default" self.assertEqual(Book.objects.db, DEFAULT_DB_ALIAS) self.assertEqual(Book.objects.all().db, DEFAULT_DB_ALIAS) self.assertEqual(Book.objects.using('other').db, 'other') self.assertEqual(Book.objects.db_manager('other').db, 'other') self.assertEqual(Book.objects.db_manager('other').all().db, 'other') def test_default_creation(self): "Objects created on the default database don't leak onto other databases" # Create a book on the default database using create() Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) # Create a book on the default database using a save dive = Book() dive.title = "Dive into Python" dive.published = datetime.date(2009, 5, 4) dive.save() # Book exists on the default database, but not on other database try: Book.objects.get(title="Pro Django") Book.objects.using('default').get(title="Pro Django") except Book.DoesNotExist: self.fail('"Pro Django" should exist on default database') with self.assertRaises(Book.DoesNotExist): Book.objects.using('other').get(title="Pro Django") try: Book.objects.get(title="Dive into Python") Book.objects.using('default').get(title="Dive into Python") except Book.DoesNotExist: self.fail('"Dive into Python" should exist on default database') with self.assertRaises(Book.DoesNotExist): Book.objects.using('other').get(title="Dive into Python") def test_other_creation(self): "Objects created on another database don't leak onto the default database" # Create a book on the second database Book.objects.using('other').create(title="Pro Django", published=datetime.date(2008, 12, 16)) # Create a book on the default database using a save dive = Book() dive.title = "Dive into Python" dive.published = datetime.date(2009, 5, 4) dive.save(using='other') # Book exists on the default database, but not on other database try: Book.objects.using('other').get(title="Pro Django") except Book.DoesNotExist: self.fail('"Pro Django" should exist on other database') with self.assertRaises(Book.DoesNotExist): Book.objects.get(title="Pro Django") with self.assertRaises(Book.DoesNotExist): Book.objects.using('default').get(title="Pro Django") try: Book.objects.using('other').get(title="Dive into Python") except Book.DoesNotExist: self.fail('"Dive into Python" should exist on other database') with self.assertRaises(Book.DoesNotExist): Book.objects.get(title="Dive into Python") with self.assertRaises(Book.DoesNotExist): Book.objects.using('default').get(title="Dive into Python") def test_refresh(self): dive = Book(title="Dive into Python", published=datetime.date(2009, 5, 4)) dive.save(using='other') dive2 = Book.objects.using('other').get() dive2.title = "Dive into Python (on default)" dive2.save(using='default') dive.refresh_from_db() self.assertEqual(dive.title, "Dive into Python") dive.refresh_from_db(using='default') self.assertEqual(dive.title, "Dive into Python (on default)") self.assertEqual(dive._state.db, "default") def test_basic_queries(self): "Queries are constrained to a single database" dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) dive = Book.objects.using('other').get(published=datetime.date(2009, 5, 4)) self.assertEqual(dive.title, "Dive into Python") with self.assertRaises(Book.DoesNotExist): Book.objects.using('default').get(published=datetime.date(2009, 5, 4)) dive = Book.objects.using('other').get(title__icontains="dive") self.assertEqual(dive.title, "Dive into Python") with self.assertRaises(Book.DoesNotExist): Book.objects.using('default').get(title__icontains="dive") dive = Book.objects.using('other').get(title__iexact="dive INTO python") self.assertEqual(dive.title, "Dive into Python") with self.assertRaises(Book.DoesNotExist): Book.objects.using('default').get(title__iexact="dive INTO python") dive = Book.objects.using('other').get(published__year=2009) self.assertEqual(dive.title, "Dive into Python") self.assertEqual(dive.published, datetime.date(2009, 5, 4)) with self.assertRaises(Book.DoesNotExist): Book.objects.using('default').get(published__year=2009) years = Book.objects.using('other').dates('published', 'year') self.assertEqual([o.year for o in years], [2009]) years = Book.objects.using('default').dates('published', 'year') self.assertEqual([o.year for o in years], []) months = Book.objects.using('other').dates('published', 'month') self.assertEqual([o.month for o in months], [5]) months = Book.objects.using('default').dates('published', 'month') self.assertEqual([o.month for o in months], []) def test_m2m_separation(self): "M2M fields are constrained to a single database" # Create a book and author on the default database pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.create(name="Marty Alchin") # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") # Save the author relations pro.authors.set([marty]) dive.authors.set([mark]) # Inspect the m2m tables directly. # There should be 1 entry in each database self.assertEqual(Book.authors.through.objects.using('default').count(), 1) self.assertEqual(Book.authors.through.objects.using('other').count(), 1) # Queries work across m2m joins self.assertEqual( list(Book.objects.using('default').filter(authors__name='Marty Alchin').values_list('title', flat=True)), ['Pro Django'] ) self.assertEqual( list(Book.objects.using('other').filter(authors__name='Marty Alchin').values_list('title', flat=True)), [] ) self.assertEqual( list(Book.objects.using('default').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), [] ) self.assertEqual( list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), ['Dive into Python'] ) # Reget the objects to clear caches dive = Book.objects.using('other').get(title="Dive into Python") mark = Person.objects.using('other').get(name="Mark Pilgrim") # Retrieve related object by descriptor. Related objects should be database-bound self.assertEqual(list(dive.authors.all().values_list('name', flat=True)), ['Mark Pilgrim']) self.assertEqual(list(mark.book_set.all().values_list('title', flat=True)), ['Dive into Python']) def test_m2m_forward_operations(self): "M2M forward manipulations are all constrained to a single DB" # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") # Save the author relations dive.authors.set([mark]) # Add a second author john = Person.objects.using('other').create(name="John Smith") self.assertEqual( list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)), [] ) dive.authors.add(john) self.assertEqual( list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), ['Dive into Python'] ) self.assertEqual( list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)), ['Dive into Python'] ) # Remove the second author dive.authors.remove(john) self.assertEqual( list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), ['Dive into Python'] ) self.assertEqual( list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)), [] ) # Clear all authors dive.authors.clear() self.assertEqual( list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), [] ) self.assertEqual( list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)), [] ) # Create an author through the m2m interface dive.authors.create(name='Jane Brown') self.assertEqual( list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), [] ) self.assertEqual( list(Book.objects.using('other').filter(authors__name='Jane Brown').values_list('title', flat=True)), ['Dive into Python'] ) def test_m2m_reverse_operations(self): "M2M reverse manipulations are all constrained to a single DB" # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") # Save the author relations dive.authors.set([mark]) # Create a second book on the other database grease = Book.objects.using('other').create(title="Greasemonkey Hacks", published=datetime.date(2005, 11, 1)) # Add a books to the m2m mark.book_set.add(grease) self.assertEqual( list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)), ['Mark Pilgrim'] ) self.assertEqual( list( Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True) ), ['Mark Pilgrim'] ) # Remove a book from the m2m mark.book_set.remove(grease) self.assertEqual( list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)), ['Mark Pilgrim'] ) self.assertEqual( list( Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True) ), [] ) # Clear the books associated with mark mark.book_set.clear() self.assertEqual( list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)), [] ) self.assertEqual( list( Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True) ), [] ) # Create a book through the m2m interface mark.book_set.create(title="Dive into HTML5", published=datetime.date(2020, 1, 1)) self.assertEqual( list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)), [] ) self.assertEqual( list(Person.objects.using('other').filter(book__title='Dive into HTML5').values_list('name', flat=True)), ['Mark Pilgrim'] ) def test_m2m_cross_database_protection(self): "Operations that involve sharing M2M objects across databases raise an error" # Create a book and author on the default database pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.create(name="Marty Alchin") # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") # Set a foreign key set with an object from a different database msg = ( 'Cannot assign "<Person: Marty Alchin>": the current database ' 'router prevents this relation.' ) with self.assertRaisesMessage(ValueError, msg): with transaction.atomic(using='default'): marty.edited.set([pro, dive]) # Add to an m2m with an object from a different database msg = ( 'Cannot add "<Book: Dive into Python>": instance is on ' 'database "default", value is on database "other"' ) with self.assertRaisesMessage(ValueError, msg): with transaction.atomic(using='default'): marty.book_set.add(dive) # Set a m2m with an object from a different database with self.assertRaisesMessage(ValueError, msg): with transaction.atomic(using='default'): marty.book_set.set([pro, dive]) # Add to a reverse m2m with an object from a different database msg = ( 'Cannot add "<Person: Marty Alchin>": instance is on ' 'database "other", value is on database "default"' ) with self.assertRaisesMessage(ValueError, msg): with transaction.atomic(using='other'): dive.authors.add(marty) # Set a reverse m2m with an object from a different database with self.assertRaisesMessage(ValueError, msg): with transaction.atomic(using='other'): dive.authors.set([mark, marty]) def test_m2m_deletion(self): "Cascaded deletions of m2m relations issue queries on the right database" # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") dive.authors.set([mark]) # Check the initial state self.assertEqual(Person.objects.using('default').count(), 0) self.assertEqual(Book.objects.using('default').count(), 0) self.assertEqual(Book.authors.through.objects.using('default').count(), 0) self.assertEqual(Person.objects.using('other').count(), 1) self.assertEqual(Book.objects.using('other').count(), 1) self.assertEqual(Book.authors.through.objects.using('other').count(), 1) # Delete the object on the other database dive.delete(using='other') self.assertEqual(Person.objects.using('default').count(), 0) self.assertEqual(Book.objects.using('default').count(), 0) self.assertEqual(Book.authors.through.objects.using('default').count(), 0) # The person still exists ... self.assertEqual(Person.objects.using('other').count(), 1) # ... but the book has been deleted self.assertEqual(Book.objects.using('other').count(), 0) # ... and the relationship object has also been deleted. self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Now try deletion in the reverse direction. Set up the relation again dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) dive.authors.set([mark]) # Check the initial state self.assertEqual(Person.objects.using('default').count(), 0) self.assertEqual(Book.objects.using('default').count(), 0) self.assertEqual(Book.authors.through.objects.using('default').count(), 0) self.assertEqual(Person.objects.using('other').count(), 1) self.assertEqual(Book.objects.using('other').count(), 1) self.assertEqual(Book.authors.through.objects.using('other').count(), 1) # Delete the object on the other database mark.delete(using='other') self.assertEqual(Person.objects.using('default').count(), 0) self.assertEqual(Book.objects.using('default').count(), 0) self.assertEqual(Book.authors.through.objects.using('default').count(), 0) # The person has been deleted ... self.assertEqual(Person.objects.using('other').count(), 0) # ... but the book still exists self.assertEqual(Book.objects.using('other').count(), 1) # ... and the relationship object has been deleted. self.assertEqual(Book.authors.through.objects.using('other').count(), 0) def test_foreign_key_separation(self): "FK fields are constrained to a single database" # Create a book and author on the default database pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) george = Person.objects.create(name="George Vilches") # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) chris = Person.objects.using('other').create(name="Chris Mills") # Save the author's favorite books pro.editor = george pro.save() dive.editor = chris dive.save() pro = Book.objects.using('default').get(title="Pro Django") self.assertEqual(pro.editor.name, "George Vilches") dive = Book.objects.using('other').get(title="Dive into Python") self.assertEqual(dive.editor.name, "Chris Mills") # Queries work across foreign key joins self.assertEqual( list(Person.objects.using('default').filter(edited__title='Pro Django').values_list('name', flat=True)), ['George Vilches'] ) self.assertEqual( list(Person.objects.using('other').filter(edited__title='Pro Django').values_list('name', flat=True)), [] ) self.assertEqual( list( Person.objects.using('default').filter(edited__title='Dive into Python').values_list('name', flat=True) ), [] ) self.assertEqual( list( Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True) ), ['Chris Mills'] ) # Reget the objects to clear caches chris = Person.objects.using('other').get(name="Chris Mills") dive = Book.objects.using('other').get(title="Dive into Python") # Retrieve related object by descriptor. Related objects should be database-bound self.assertEqual(list(chris.edited.values_list('title', flat=True)), ['Dive into Python']) def test_foreign_key_reverse_operations(self): "FK reverse manipulations are all constrained to a single DB" dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) chris = Person.objects.using('other').create(name="Chris Mills") # Save the author relations dive.editor = chris dive.save() # Add a second book edited by chris html5 = Book.objects.using('other').create(title="Dive into HTML5", published=datetime.date(2010, 3, 15)) self.assertEqual( list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), [] ) chris.edited.add(html5) self.assertEqual( list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), ['Chris Mills'] ) self.assertEqual( list( Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True) ), ['Chris Mills'] ) # Remove the second editor chris.edited.remove(html5) self.assertEqual( list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), [] ) self.assertEqual( list( Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True) ), ['Chris Mills'] ) # Clear all edited books chris.edited.clear() self.assertEqual( list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), [] ) self.assertEqual( list( Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True) ), [] ) # Create an author through the m2m interface chris.edited.create(title='Dive into Water', published=datetime.date(2010, 3, 15)) self.assertEqual( list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), [] ) self.assertEqual( list(Person.objects.using('other').filter(edited__title='Dive into Water').values_list('name', flat=True)), ['Chris Mills'] ) self.assertEqual( list( Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True) ), [] ) def test_foreign_key_cross_database_protection(self): "Operations that involve sharing FK objects across databases raise an error" # Create a book and author on the default database pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.create(name="Marty Alchin") # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) # Set a foreign key with an object from a different database msg = ( 'Cannot assign "<Person: Marty Alchin>": the current database ' 'router prevents this relation.' ) with self.assertRaisesMessage(ValueError, msg): dive.editor = marty # Set a foreign key set with an object from a different database with self.assertRaisesMessage(ValueError, msg): with transaction.atomic(using='default'): marty.edited.set([pro, dive]) # Add to a foreign key set with an object from a different database with self.assertRaisesMessage(ValueError, msg): with transaction.atomic(using='default'): marty.edited.add(dive) def test_foreign_key_deletion(self): "Cascaded deletions of Foreign Key relations issue queries on the right database" mark = Person.objects.using('other').create(name="Mark Pilgrim") Pet.objects.using('other').create(name="Fido", owner=mark) # Check the initial state self.assertEqual(Person.objects.using('default').count(), 0) self.assertEqual(Pet.objects.using('default').count(), 0) self.assertEqual(Person.objects.using('other').count(), 1) self.assertEqual(Pet.objects.using('other').count(), 1) # Delete the person object, which will cascade onto the pet mark.delete(using='other') self.assertEqual(Person.objects.using('default').count(), 0) self.assertEqual(Pet.objects.using('default').count(), 0) # Both the pet and the person have been deleted from the right database self.assertEqual(Person.objects.using('other').count(), 0) self.assertEqual(Pet.objects.using('other').count(), 0) def test_foreign_key_validation(self): "ForeignKey.validate() uses the correct database" mickey = Person.objects.using('other').create(name="Mickey") pluto = Pet.objects.using('other').create(name="Pluto", owner=mickey) self.assertIsNone(pluto.full_clean()) # Any router that accesses `model` in db_for_read() works here. @override_settings(DATABASE_ROUTERS=[AuthRouter()]) def test_foreign_key_validation_with_router(self): """ ForeignKey.validate() passes `model` to db_for_read() even if model_instance=None. """ mickey = Person.objects.create(name="Mickey") owner_field = Pet._meta.get_field('owner') self.assertEqual(owner_field.clean(mickey.pk, None), mickey.pk) def test_o2o_separation(self): "OneToOne fields are constrained to a single database" # Create a user and profile on the default database alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com') alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate') # Create a user and profile on the other database bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com') bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog') # Retrieve related objects; queries should be database constrained alice = User.objects.using('default').get(username="alice") self.assertEqual(alice.userprofile.flavor, "chocolate") bob = User.objects.using('other').get(username="bob") self.assertEqual(bob.userprofile.flavor, "crunchy frog") # Queries work across joins self.assertEqual( list( User.objects.using('default') .filter(userprofile__flavor='chocolate').values_list('username', flat=True) ), ['alice'] ) self.assertEqual( list( User.objects.using('other') .filter(userprofile__flavor='chocolate').values_list('username', flat=True) ), [] ) self.assertEqual( list( User.objects.using('default') .filter(userprofile__flavor='crunchy frog').values_list('username', flat=True) ), [] ) self.assertEqual( list( User.objects.using('other') .filter(userprofile__flavor='crunchy frog').values_list('username', flat=True) ), ['bob'] ) # Reget the objects to clear caches alice_profile = UserProfile.objects.using('default').get(flavor='chocolate') bob_profile = UserProfile.objects.using('other').get(flavor='crunchy frog') # Retrieve related object by descriptor. Related objects should be database-bound self.assertEqual(alice_profile.user.username, 'alice') self.assertEqual(bob_profile.user.username, 'bob') def test_o2o_cross_database_protection(self): "Operations that involve sharing FK objects across databases raise an error" # Create a user and profile on the default database alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com') # Create a user and profile on the other database bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com') # Set a one-to-one relation with an object from a different database alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate') msg = ( 'Cannot assign "%r": the current database router prevents this ' 'relation.' % alice_profile ) with self.assertRaisesMessage(ValueError, msg): bob.userprofile = alice_profile # BUT! if you assign a FK object when the base object hasn't # been saved yet, you implicitly assign the database for the # base object. bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog') new_bob_profile = UserProfile(flavor="spring surprise") # assigning a profile requires an explicit pk as the object isn't saved charlie = User(pk=51, username='charlie', email='charlie@example.com') charlie.set_unusable_password() # initially, no db assigned self.assertIsNone(new_bob_profile._state.db) self.assertIsNone(charlie._state.db) # old object comes from 'other', so the new object is set to use 'other'... new_bob_profile.user = bob charlie.userprofile = bob_profile self.assertEqual(new_bob_profile._state.db, 'other') self.assertEqual(charlie._state.db, 'other') # ... but it isn't saved yet self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)), ['bob']) self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)), ['crunchy frog']) # When saved (no using required), new objects goes to 'other' charlie.save() bob_profile.save() new_bob_profile.save() self.assertEqual(list(User.objects.using('default').values_list('username', flat=True)), ['alice']) self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)), ['bob', 'charlie']) self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)), ['chocolate']) self.assertEqual( list(UserProfile.objects.using('other').values_list('flavor', flat=True)), ['crunchy frog', 'spring surprise'] ) # This also works if you assign the O2O relation in the constructor denise = User.objects.db_manager('other').create_user('denise', 'denise@example.com') denise_profile = UserProfile(flavor="tofu", user=denise) self.assertEqual(denise_profile._state.db, 'other') # ... but it isn't saved yet self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)), ['chocolate']) self.assertEqual( list(UserProfile.objects.using('other').values_list('flavor', flat=True)), ['crunchy frog', 'spring surprise'] ) # When saved, the new profile goes to 'other' denise_profile.save() self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)), ['chocolate']) self.assertEqual( list(UserProfile.objects.using('other').values_list('flavor', flat=True)), ['crunchy frog', 'spring surprise', 'tofu'] ) def test_generic_key_separation(self): "Generic fields are constrained to a single database" # Create a book and author on the default database pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) review1 = Review.objects.create(source="Python Monthly", content_object=pro) # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) review2 = Review.objects.using('other').create(source="Python Weekly", content_object=dive) review1 = Review.objects.using('default').get(source="Python Monthly") self.assertEqual(review1.content_object.title, "Pro Django") review2 = Review.objects.using('other').get(source="Python Weekly") self.assertEqual(review2.content_object.title, "Dive into Python") # Reget the objects to clear caches dive = Book.objects.using('other').get(title="Dive into Python") # Retrieve related object by descriptor. Related objects should be database-bound self.assertEqual(list(dive.reviews.all().values_list('source', flat=True)), ['Python Weekly']) def test_generic_key_reverse_operations(self): "Generic reverse manipulations are all constrained to a single DB" dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) temp = Book.objects.using('other').create(title="Temp", published=datetime.date(2009, 5, 4)) review1 = Review.objects.using('other').create(source="Python Weekly", content_object=dive) review2 = Review.objects.using('other').create(source="Python Monthly", content_object=temp) self.assertEqual( list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), [] ) self.assertEqual( list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), ['Python Weekly'] ) # Add a second review dive.reviews.add(review2) self.assertEqual( list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), [] ) self.assertEqual( list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), ['Python Monthly', 'Python Weekly'] ) # Remove the second author dive.reviews.remove(review1) self.assertEqual( list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), [] ) self.assertEqual( list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), ['Python Monthly'] ) # Clear all reviews dive.reviews.clear() self.assertEqual( list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), [] ) self.assertEqual( list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), [] ) # Create an author through the generic interface dive.reviews.create(source='Python Daily') self.assertEqual( list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), [] ) self.assertEqual( list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), ['Python Daily'] ) def test_generic_key_cross_database_protection(self): "Operations that involve sharing generic key objects across databases raise an error" # Create a book and author on the default database pro = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) review1 = Review.objects.create(source="Python Monthly", content_object=pro) # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) Review.objects.using('other').create(source="Python Weekly", content_object=dive) # Set a foreign key with an object from a different database msg = ( 'Cannot assign "<ContentType: book>": the current database router ' 'prevents this relation.' ) with self.assertRaisesMessage(ValueError, msg): review1.content_object = dive # Add to a foreign key set with an object from a different database msg = ( "<Review: Python Monthly> instance isn't saved. " "Use bulk=False or save the object first." ) with self.assertRaisesMessage(ValueError, msg): with transaction.atomic(using='other'): dive.reviews.add(review1) # BUT! if you assign a FK object when the base object hasn't # been saved yet, you implicitly assign the database for the # base object. review3 = Review(source="Python Daily") # initially, no db assigned self.assertIsNone(review3._state.db) # Dive comes from 'other', so review3 is set to use 'other'... review3.content_object = dive self.assertEqual(review3._state.db, 'other') # ... but it isn't saved yet self.assertEqual( list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)), ['Python Monthly'] ) self.assertEqual( list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), ['Python Weekly'] ) # When saved, John goes to 'other' review3.save() self.assertEqual( list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)), ['Python Monthly'] ) self.assertEqual( list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), ['Python Daily', 'Python Weekly'] ) def test_generic_key_deletion(self): "Cascaded deletions of Generic Key relations issue queries on the right database" dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) Review.objects.using('other').create(source="Python Weekly", content_object=dive) # Check the initial state self.assertEqual(Book.objects.using('default').count(), 0) self.assertEqual(Review.objects.using('default').count(), 0) self.assertEqual(Book.objects.using('other').count(), 1) self.assertEqual(Review.objects.using('other').count(), 1) # Delete the Book object, which will cascade onto the pet dive.delete(using='other') self.assertEqual(Book.objects.using('default').count(), 0) self.assertEqual(Review.objects.using('default').count(), 0) # Both the pet and the person have been deleted from the right database self.assertEqual(Book.objects.using('other').count(), 0) self.assertEqual(Review.objects.using('other').count(), 0) def test_ordering(self): "get_next_by_XXX commands stick to a single database" Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) learn = Book.objects.using('other').create(title="Learning Python", published=datetime.date(2008, 7, 16)) self.assertEqual(learn.get_next_by_published().title, "Dive into Python") self.assertEqual(dive.get_previous_by_published().title, "Learning Python") def test_raw(self): "test the raw() method across databases" dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) val = Book.objects.db_manager("other").raw('SELECT id FROM multiple_database_book') self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk")) val = Book.objects.raw('SELECT id FROM multiple_database_book').using('other') self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk")) def test_select_related(self): "Database assignment is retained if an object is retrieved with select_related()" # Create a book and author on the other database mark = Person.objects.using('other').create(name="Mark Pilgrim") Book.objects.using('other').create( title="Dive into Python", published=datetime.date(2009, 5, 4), editor=mark, ) # Retrieve the Person using select_related() book = Book.objects.using('other').select_related('editor').get(title="Dive into Python") # The editor instance should have a db state self.assertEqual(book.editor._state.db, 'other') def test_subquery(self): """Make sure as_sql works with subqueries and primary/replica.""" sub = Person.objects.using('other').filter(name='fff') qs = Book.objects.filter(editor__in=sub) # When you call __str__ on the query object, it doesn't know about using # so it falls back to the default. If the subquery explicitly uses a # different database, an error should be raised. msg = ( "Subqueries aren't allowed across different databases. Force the " "inner query to be evaluated using `list(inner_query)`." ) with self.assertRaisesMessage(ValueError, msg): str(qs.query) # Evaluating the query shouldn't work, either with self.assertRaisesMessage(ValueError, msg): for obj in qs: pass def test_related_manager(self): "Related managers return managers, not querysets" mark = Person.objects.using('other').create(name="Mark Pilgrim") # extra_arg is removed by the BookManager's implementation of # create(); but the BookManager's implementation won't get called # unless edited returns a Manager, not a queryset mark.book_set.create(title="Dive into Python", published=datetime.date(2009, 5, 4), extra_arg=True) mark.book_set.get_or_create(title="Dive into Python", published=datetime.date(2009, 5, 4), extra_arg=True) mark.edited.create(title="Dive into Water", published=datetime.date(2009, 5, 4), extra_arg=True) mark.edited.get_or_create(title="Dive into Water", published=datetime.date(2009, 5, 4), extra_arg=True) class ConnectionRouterTestCase(SimpleTestCase): @override_settings(DATABASE_ROUTERS=[ 'multiple_database.tests.TestRouter', 'multiple_database.tests.WriteRouter']) def test_router_init_default(self): connection_router = ConnectionRouter() self.assertEqual([r.__class__.__name__ for r in connection_router.routers], ['TestRouter', 'WriteRouter']) def test_router_init_arg(self): connection_router = ConnectionRouter([ 'multiple_database.tests.TestRouter', 'multiple_database.tests.WriteRouter' ]) self.assertEqual([r.__class__.__name__ for r in connection_router.routers], ['TestRouter', 'WriteRouter']) # Init with instances instead of strings connection_router = ConnectionRouter([TestRouter(), WriteRouter()]) self.assertEqual([r.__class__.__name__ for r in connection_router.routers], ['TestRouter', 'WriteRouter']) # Make the 'other' database appear to be a replica of the 'default' @override_settings(DATABASE_ROUTERS=[TestRouter()]) class RouterTestCase(TestCase): multi_db = True def test_db_selection(self): "Querysets obey the router for db suggestions" self.assertEqual(Book.objects.db, 'other') self.assertEqual(Book.objects.all().db, 'other') self.assertEqual(Book.objects.using('default').db, 'default') self.assertEqual(Book.objects.db_manager('default').db, 'default') self.assertEqual(Book.objects.db_manager('default').all().db, 'default') def test_migrate_selection(self): "Synchronization behavior is predictable" self.assertTrue(router.allow_migrate_model('default', User)) self.assertTrue(router.allow_migrate_model('default', Book)) self.assertTrue(router.allow_migrate_model('other', User)) self.assertTrue(router.allow_migrate_model('other', Book)) with override_settings(DATABASE_ROUTERS=[TestRouter(), AuthRouter()]): # Add the auth router to the chain. TestRouter is a universal # synchronizer, so it should have no effect. self.assertTrue(router.allow_migrate_model('default', User)) self.assertTrue(router.allow_migrate_model('default', Book)) self.assertTrue(router.allow_migrate_model('other', User)) self.assertTrue(router.allow_migrate_model('other', Book)) with override_settings(DATABASE_ROUTERS=[AuthRouter(), TestRouter()]): # Now check what happens if the router order is reversed. self.assertFalse(router.allow_migrate_model('default', User)) self.assertTrue(router.allow_migrate_model('default', Book)) self.assertTrue(router.allow_migrate_model('other', User)) self.assertTrue(router.allow_migrate_model('other', Book)) def test_partial_router(self): "A router can choose to implement a subset of methods" dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) # First check the baseline behavior. self.assertEqual(router.db_for_read(User), 'other') self.assertEqual(router.db_for_read(Book), 'other') self.assertEqual(router.db_for_write(User), 'default') self.assertEqual(router.db_for_write(Book), 'default') self.assertTrue(router.allow_relation(dive, dive)) self.assertTrue(router.allow_migrate_model('default', User)) self.assertTrue(router.allow_migrate_model('default', Book)) with override_settings(DATABASE_ROUTERS=[WriteRouter(), AuthRouter(), TestRouter()]): self.assertEqual(router.db_for_read(User), 'default') self.assertEqual(router.db_for_read(Book), 'other') self.assertEqual(router.db_for_write(User), 'writer') self.assertEqual(router.db_for_write(Book), 'writer') self.assertTrue(router.allow_relation(dive, dive)) self.assertFalse(router.allow_migrate_model('default', User)) self.assertTrue(router.allow_migrate_model('default', Book)) def test_database_routing(self): marty = Person.objects.using('default').create(name="Marty Alchin") pro = Book.objects.using('default').create(title="Pro Django", published=datetime.date(2008, 12, 16), editor=marty) pro.authors.set([marty]) # Create a book and author on the other database Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) # An update query will be routed to the default database Book.objects.filter(title='Pro Django').update(pages=200) with self.assertRaises(Book.DoesNotExist): # By default, the get query will be directed to 'other' Book.objects.get(title='Pro Django') # But the same query issued explicitly at a database will work. pro = Book.objects.using('default').get(title='Pro Django') # The update worked. self.assertEqual(pro.pages, 200) # An update query with an explicit using clause will be routed # to the requested database. Book.objects.using('other').filter(title='Dive into Python').update(pages=300) self.assertEqual(Book.objects.get(title='Dive into Python').pages, 300) # Related object queries stick to the same database # as the original object, regardless of the router self.assertEqual(list(pro.authors.values_list('name', flat=True)), ['Marty Alchin']) self.assertEqual(pro.editor.name, 'Marty Alchin') # get_or_create is a special case. The get needs to be targeted at # the write database in order to avoid potential transaction # consistency problems book, created = Book.objects.get_or_create(title="Pro Django") self.assertFalse(created) book, created = Book.objects.get_or_create(title="Dive Into Python", defaults={'published': datetime.date(2009, 5, 4)}) self.assertTrue(created) # Check the head count of objects self.assertEqual(Book.objects.using('default').count(), 2) self.assertEqual(Book.objects.using('other').count(), 1) # If a database isn't specified, the read database is used self.assertEqual(Book.objects.count(), 1) # A delete query will also be routed to the default database Book.objects.filter(pages__gt=150).delete() # The default database has lost the book. self.assertEqual(Book.objects.using('default').count(), 1) self.assertEqual(Book.objects.using('other').count(), 1) def test_invalid_set_foreign_key_assignment(self): marty = Person.objects.using('default').create(name="Marty Alchin") dive = Book.objects.using('other').create( title="Dive into Python", published=datetime.date(2009, 5, 4), ) # Set a foreign key set with an object from a different database msg = "<Book: Dive into Python> instance isn't saved. Use bulk=False or save the object first." with self.assertRaisesMessage(ValueError, msg): marty.edited.set([dive]) def test_foreign_key_cross_database_protection(self): "Foreign keys can cross databases if they two databases have a common source" # Create a book and author on the default database pro = Book.objects.using('default').create(title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.using('default').create(name="Marty Alchin") # Create a book and author on the other database dive = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('other').create(name="Mark Pilgrim") # Set a foreign key with an object from a different database dive.editor = marty # Database assignments of original objects haven't changed... self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # ... but they will when the affected object is saved. dive.save() self.assertEqual(dive._state.db, 'default') # ...and the source database now has a copy of any object saved Book.objects.using('default').get(title='Dive into Python').delete() # This isn't a real primary/replica database, so restore the original from other dive = Book.objects.using('other').get(title='Dive into Python') self.assertEqual(dive._state.db, 'other') # Set a foreign key set with an object from a different database marty.edited.set([pro, dive], bulk=False) # Assignment implies a save, so database assignments of original objects have changed... self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'default') self.assertEqual(mark._state.db, 'other') # ...and the source database now has a copy of any object saved Book.objects.using('default').get(title='Dive into Python').delete() # This isn't a real primary/replica database, so restore the original from other dive = Book.objects.using('other').get(title='Dive into Python') self.assertEqual(dive._state.db, 'other') # Add to a foreign key set with an object from a different database marty.edited.add(dive, bulk=False) # Add implies a save, so database assignments of original objects have changed... self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'default') self.assertEqual(mark._state.db, 'other') # ...and the source database now has a copy of any object saved Book.objects.using('default').get(title='Dive into Python').delete() # This isn't a real primary/replica database, so restore the original from other dive = Book.objects.using('other').get(title='Dive into Python') # If you assign a FK object when the base object hasn't # been saved yet, you implicitly assign the database for the # base object. chris = Person(name="Chris Mills") html5 = Book(title="Dive into HTML5", published=datetime.date(2010, 3, 15)) # initially, no db assigned self.assertIsNone(chris._state.db) self.assertIsNone(html5._state.db) # old object comes from 'other', so the new object is set to use the # source of 'other'... self.assertEqual(dive._state.db, 'other') chris.save() dive.editor = chris html5.editor = mark self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') self.assertEqual(chris._state.db, 'default') self.assertEqual(html5._state.db, 'default') # This also works if you assign the FK in the constructor water = Book(title="Dive into Water", published=datetime.date(2001, 1, 1), editor=mark) self.assertEqual(water._state.db, 'default') # For the remainder of this test, create a copy of 'mark' in the # 'default' database to prevent integrity errors on backends that # don't defer constraints checks until the end of the transaction mark.save(using='default') # This moved 'mark' in the 'default' database, move it back in 'other' mark.save(using='other') self.assertEqual(mark._state.db, 'other') # If you create an object through a FK relation, it will be # written to the write database, even if the original object # was on the read database cheesecake = mark.edited.create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15)) self.assertEqual(cheesecake._state.db, 'default') # Same goes for get_or_create, regardless of whether getting or creating cheesecake, created = mark.edited.get_or_create( title='Dive into Cheesecake', published=datetime.date(2010, 3, 15), ) self.assertEqual(cheesecake._state.db, 'default') puddles, created = mark.edited.get_or_create(title='Dive into Puddles', published=datetime.date(2010, 3, 15)) self.assertEqual(puddles._state.db, 'default') def test_m2m_cross_database_protection(self): "M2M relations can cross databases if the database share a source" # Create books and authors on the inverse to the usual database pro = Book.objects.using('other').create(pk=1, title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.using('other').create(pk=1, name="Marty Alchin") dive = Book.objects.using('default').create(pk=2, title="Dive into Python", published=datetime.date(2009, 5, 4)) mark = Person.objects.using('default').create(pk=2, name="Mark Pilgrim") # Now save back onto the usual database. # This simulates primary/replica - the objects exist on both database, # but the _state.db is as it is for all other tests. pro.save(using='default') marty.save(using='default') dive.save(using='other') mark.save(using='other') # We have 2 of both types of object on both databases self.assertEqual(Book.objects.using('default').count(), 2) self.assertEqual(Book.objects.using('other').count(), 2) self.assertEqual(Person.objects.using('default').count(), 2) self.assertEqual(Person.objects.using('other').count(), 2) # Set a m2m set with an object from a different database marty.book_set.set([pro, dive]) # Database assignments don't change self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # All m2m relations should be saved on the default database self.assertEqual(Book.authors.through.objects.using('default').count(), 2) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Reset relations Book.authors.through.objects.using('default').delete() # Add to an m2m with an object from a different database marty.book_set.add(dive) # Database assignments don't change self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # All m2m relations should be saved on the default database self.assertEqual(Book.authors.through.objects.using('default').count(), 1) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Reset relations Book.authors.through.objects.using('default').delete() # Set a reverse m2m with an object from a different database dive.authors.set([mark, marty]) # Database assignments don't change self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # All m2m relations should be saved on the default database self.assertEqual(Book.authors.through.objects.using('default').count(), 2) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Reset relations Book.authors.through.objects.using('default').delete() self.assertEqual(Book.authors.through.objects.using('default').count(), 0) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # Add to a reverse m2m with an object from a different database dive.authors.add(marty) # Database assignments don't change self.assertEqual(marty._state.db, 'default') self.assertEqual(pro._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(mark._state.db, 'other') # All m2m relations should be saved on the default database self.assertEqual(Book.authors.through.objects.using('default').count(), 1) self.assertEqual(Book.authors.through.objects.using('other').count(), 0) # If you create an object through a M2M relation, it will be # written to the write database, even if the original object # was on the read database alice = dive.authors.create(name='Alice') self.assertEqual(alice._state.db, 'default') # Same goes for get_or_create, regardless of whether getting or creating alice, created = dive.authors.get_or_create(name='Alice') self.assertEqual(alice._state.db, 'default') bob, created = dive.authors.get_or_create(name='Bob') self.assertEqual(bob._state.db, 'default') def test_o2o_cross_database_protection(self): "Operations that involve sharing FK objects across databases raise an error" # Create a user and profile on the default database alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com') # Create a user and profile on the other database bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com') # Set a one-to-one relation with an object from a different database alice_profile = UserProfile.objects.create(user=alice, flavor='chocolate') bob.userprofile = alice_profile # Database assignments of original objects haven't changed... self.assertEqual(alice._state.db, 'default') self.assertEqual(alice_profile._state.db, 'default') self.assertEqual(bob._state.db, 'other') # ... but they will when the affected object is saved. bob.save() self.assertEqual(bob._state.db, 'default') def test_generic_key_cross_database_protection(self): "Generic Key operations can span databases if they share a source" # Create a book and author on the default database pro = Book.objects.using( 'default').create(title="Pro Django", published=datetime.date(2008, 12, 16)) review1 = Review.objects.using( 'default').create(source="Python Monthly", content_object=pro) # Create a book and author on the other database dive = Book.objects.using( 'other').create(title="Dive into Python", published=datetime.date(2009, 5, 4)) review2 = Review.objects.using( 'other').create(source="Python Weekly", content_object=dive) # Set a generic foreign key with an object from a different database review1.content_object = dive # Database assignments of original objects haven't changed... self.assertEqual(pro._state.db, 'default') self.assertEqual(review1._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(review2._state.db, 'other') # ... but they will when the affected object is saved. dive.save() self.assertEqual(review1._state.db, 'default') self.assertEqual(dive._state.db, 'default') # ...and the source database now has a copy of any object saved Book.objects.using('default').get(title='Dive into Python').delete() # This isn't a real primary/replica database, so restore the original from other dive = Book.objects.using('other').get(title='Dive into Python') self.assertEqual(dive._state.db, 'other') # Add to a generic foreign key set with an object from a different database dive.reviews.add(review1) # Database assignments of original objects haven't changed... self.assertEqual(pro._state.db, 'default') self.assertEqual(review1._state.db, 'default') self.assertEqual(dive._state.db, 'other') self.assertEqual(review2._state.db, 'other') # ... but they will when the affected object is saved. dive.save() self.assertEqual(dive._state.db, 'default') # ...and the source database now has a copy of any object saved Book.objects.using('default').get(title='Dive into Python').delete() # BUT! if you assign a FK object when the base object hasn't # been saved yet, you implicitly assign the database for the # base object. review3 = Review(source="Python Daily") # initially, no db assigned self.assertIsNone(review3._state.db) # Dive comes from 'other', so review3 is set to use the source of 'other'... review3.content_object = dive self.assertEqual(review3._state.db, 'default') # If you create an object through a M2M relation, it will be # written to the write database, even if the original object # was on the read database dive = Book.objects.using('other').get(title='Dive into Python') nyt = dive.reviews.create(source="New York Times", content_object=dive) self.assertEqual(nyt._state.db, 'default') def test_m2m_managers(self): "M2M relations are represented by managers, and can be controlled like managers" pro = Book.objects.using('other').create(pk=1, title="Pro Django", published=datetime.date(2008, 12, 16)) marty = Person.objects.using('other').create(pk=1, name="Marty Alchin") self.assertEqual(pro.authors.db, 'other') self.assertEqual(pro.authors.db_manager('default').db, 'default') self.assertEqual(pro.authors.db_manager('default').all().db, 'default') self.assertEqual(marty.book_set.db, 'other') self.assertEqual(marty.book_set.db_manager('default').db, 'default') self.assertEqual(marty.book_set.db_manager('default').all().db, 'default') def test_foreign_key_managers(self): "FK reverse relations are represented by managers, and can be controlled like managers" marty = Person.objects.using('other').create(pk=1, name="Marty Alchin") Book.objects.using('other').create(pk=1, title="Pro Django", published=datetime.date(2008, 12, 16), editor=marty) self.assertEqual(marty.edited.db, 'other') self.assertEqual(marty.edited.db_manager('default').db, 'default') self.assertEqual(marty.edited.db_manager('default').all().db, 'default') def test_generic_key_managers(self): "Generic key relations are represented by managers, and can be controlled like managers" pro = Book.objects.using('other').create(title="Pro Django", published=datetime.date(2008, 12, 16)) Review.objects.using('other').create(source="Python Monthly", content_object=pro) self.assertEqual(pro.reviews.db, 'other') self.assertEqual(pro.reviews.db_manager('default').db, 'default') self.assertEqual(pro.reviews.db_manager('default').all().db, 'default') def test_subquery(self): """Make sure as_sql works with subqueries and primary/replica.""" # Create a book and author on the other database mark = Person.objects.using('other').create(name="Mark Pilgrim") Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4), editor=mark) sub = Person.objects.filter(name='Mark Pilgrim') qs = Book.objects.filter(editor__in=sub) # When you call __str__ on the query object, it doesn't know about using # so it falls back to the default. Don't let routing instructions # force the subquery to an incompatible database. str(qs.query) # If you evaluate the query, it should work, running on 'other' self.assertEqual(list(qs.values_list('title', flat=True)), ['Dive into Python']) def test_deferred_models(self): mark_def = Person.objects.using('default').create(name="Mark Pilgrim") mark_other = Person.objects.using('other').create(name="Mark Pilgrim") orig_b = Book.objects.using('other').create(title="Dive into Python", published=datetime.date(2009, 5, 4), editor=mark_other) b = Book.objects.using('other').only('title').get(pk=orig_b.pk) self.assertEqual(b.published, datetime.date(2009, 5, 4)) b = Book.objects.using('other').only('title').get(pk=orig_b.pk) b.editor = mark_def b.save(using='default') self.assertEqual(Book.objects.using('default').get(pk=b.pk).published, datetime.date(2009, 5, 4)) @override_settings(DATABASE_ROUTERS=[AuthRouter()]) class AuthTestCase(TestCase): multi_db = True def test_auth_manager(self): "The methods on the auth manager obey database hints" # Create one user using default allocation policy User.objects.create_user('alice', 'alice@example.com') # Create another user, explicitly specifying the database User.objects.db_manager('default').create_user('bob', 'bob@example.com') # The second user only exists on the other database alice = User.objects.using('other').get(username='alice') self.assertEqual(alice.username, 'alice') self.assertEqual(alice._state.db, 'other') with self.assertRaises(User.DoesNotExist): User.objects.using('default').get(username='alice') # The second user only exists on the default database bob = User.objects.using('default').get(username='bob') self.assertEqual(bob.username, 'bob') self.assertEqual(bob._state.db, 'default') with self.assertRaises(User.DoesNotExist): User.objects.using('other').get(username='bob') # That is... there is one user on each database self.assertEqual(User.objects.using('default').count(), 1) self.assertEqual(User.objects.using('other').count(), 1) def test_dumpdata(self): "dumpdata honors allow_migrate restrictions on the router" User.objects.create_user('alice', 'alice@example.com') User.objects.db_manager('default').create_user('bob', 'bob@example.com') # dumping the default database doesn't try to include auth because # allow_migrate prohibits auth on default new_io = StringIO() management.call_command('dumpdata', 'auth', format='json', database='default', stdout=new_io) command_output = new_io.getvalue().strip() self.assertEqual(command_output, '[]') # dumping the other database does include auth new_io = StringIO() management.call_command('dumpdata', 'auth', format='json', database='other', stdout=new_io) command_output = new_io.getvalue().strip() self.assertIn('"email": "alice@example.com"', command_output) class AntiPetRouter: # A router that only expresses an opinion on migrate, # passing pets to the 'other' database def allow_migrate(self, db, app_label, model_name=None, **hints): if db == 'other': return model_name == 'pet' else: return model_name != 'pet' class FixtureTestCase(TestCase): multi_db = True fixtures = ['multidb-common', 'multidb'] @override_settings(DATABASE_ROUTERS=[AntiPetRouter()]) def test_fixture_loading(self): "Multi-db fixtures are loaded correctly" # "Pro Django" exists on the default database, but not on other database Book.objects.get(title="Pro Django") Book.objects.using('default').get(title="Pro Django") with self.assertRaises(Book.DoesNotExist): Book.objects.using('other').get(title="Pro Django") # "Dive into Python" exists on the default database, but not on other database Book.objects.using('other').get(title="Dive into Python") with self.assertRaises(Book.DoesNotExist): Book.objects.get(title="Dive into Python") with self.assertRaises(Book.DoesNotExist): Book.objects.using('default').get(title="Dive into Python") # "Definitive Guide" exists on the both databases Book.objects.get(title="The Definitive Guide to Django") Book.objects.using('default').get(title="The Definitive Guide to Django") Book.objects.using('other').get(title="The Definitive Guide to Django") @override_settings(DATABASE_ROUTERS=[AntiPetRouter()]) def test_pseudo_empty_fixtures(self): """ A fixture can contain entries, but lead to nothing in the database; this shouldn't raise an error (#14068). """ new_io = StringIO() management.call_command('loaddata', 'pets', stdout=new_io, stderr=new_io) command_output = new_io.getvalue().strip() # No objects will actually be loaded self.assertEqual(command_output, "Installed 0 object(s) (of 2) from 1 fixture(s)") class PickleQuerySetTestCase(TestCase): multi_db = True def test_pickling(self): for db in connections: Book.objects.using(db).create(title='Dive into Python', published=datetime.date(2009, 5, 4)) qs = Book.objects.all() self.assertEqual(qs.db, pickle.loads(pickle.dumps(qs)).db) class DatabaseReceiver: """ Used in the tests for the database argument in signals (#13552) """ def __call__(self, signal, sender, **kwargs): self._database = kwargs['using'] class WriteToOtherRouter: """ A router that sends all writes to the other database. """ def db_for_write(self, model, **hints): return "other" class SignalTests(TestCase): multi_db = True def override_router(self): return override_settings(DATABASE_ROUTERS=[WriteToOtherRouter()]) def test_database_arg_save_and_delete(self): """ The pre/post_save signal contains the correct database. """ # Make some signal receivers pre_save_receiver = DatabaseReceiver() post_save_receiver = DatabaseReceiver() pre_delete_receiver = DatabaseReceiver() post_delete_receiver = DatabaseReceiver() # Make model and connect receivers signals.pre_save.connect(sender=Person, receiver=pre_save_receiver) signals.post_save.connect(sender=Person, receiver=post_save_receiver) signals.pre_delete.connect(sender=Person, receiver=pre_delete_receiver) signals.post_delete.connect(sender=Person, receiver=post_delete_receiver) p = Person.objects.create(name='Darth Vader') # Save and test receivers got calls p.save() self.assertEqual(pre_save_receiver._database, DEFAULT_DB_ALIAS) self.assertEqual(post_save_receiver._database, DEFAULT_DB_ALIAS) # Delete, and test p.delete() self.assertEqual(pre_delete_receiver._database, DEFAULT_DB_ALIAS) self.assertEqual(post_delete_receiver._database, DEFAULT_DB_ALIAS) # Save again to a different database p.save(using="other") self.assertEqual(pre_save_receiver._database, "other") self.assertEqual(post_save_receiver._database, "other") # Delete, and test p.delete(using="other") self.assertEqual(pre_delete_receiver._database, "other") self.assertEqual(post_delete_receiver._database, "other") signals.pre_save.disconnect(sender=Person, receiver=pre_save_receiver) signals.post_save.disconnect(sender=Person, receiver=post_save_receiver) signals.pre_delete.disconnect(sender=Person, receiver=pre_delete_receiver) signals.post_delete.disconnect(sender=Person, receiver=post_delete_receiver) def test_database_arg_m2m(self): """ The m2m_changed signal has a correct database arg. """ # Make a receiver receiver = DatabaseReceiver() # Connect it signals.m2m_changed.connect(receiver=receiver) # Create the models that will be used for the tests b = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) p = Person.objects.create(name="Marty Alchin") # Create a copy of the models on the 'other' database to prevent # integrity errors on backends that don't defer constraints checks Book.objects.using('other').create(pk=b.pk, title=b.title, published=b.published) Person.objects.using('other').create(pk=p.pk, name=p.name) # Test addition b.authors.add(p) self.assertEqual(receiver._database, DEFAULT_DB_ALIAS) with self.override_router(): b.authors.add(p) self.assertEqual(receiver._database, "other") # Test removal b.authors.remove(p) self.assertEqual(receiver._database, DEFAULT_DB_ALIAS) with self.override_router(): b.authors.remove(p) self.assertEqual(receiver._database, "other") # Test addition in reverse p.book_set.add(b) self.assertEqual(receiver._database, DEFAULT_DB_ALIAS) with self.override_router(): p.book_set.add(b) self.assertEqual(receiver._database, "other") # Test clearing b.authors.clear() self.assertEqual(receiver._database, DEFAULT_DB_ALIAS) with self.override_router(): b.authors.clear() self.assertEqual(receiver._database, "other") class AttributeErrorRouter: "A router to test the exception handling of ConnectionRouter" def db_for_read(self, model, **hints): raise AttributeError def db_for_write(self, model, **hints): raise AttributeError class RouterAttributeErrorTestCase(TestCase): multi_db = True def override_router(self): return override_settings(DATABASE_ROUTERS=[AttributeErrorRouter()]) def test_attribute_error_read(self): "The AttributeError from AttributeErrorRouter bubbles up" b = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) with self.override_router(): with self.assertRaises(AttributeError): Book.objects.get(pk=b.pk) def test_attribute_error_save(self): "The AttributeError from AttributeErrorRouter bubbles up" dive = Book() dive.title = "Dive into Python" dive.published = datetime.date(2009, 5, 4) with self.override_router(): with self.assertRaises(AttributeError): dive.save() def test_attribute_error_delete(self): "The AttributeError from AttributeErrorRouter bubbles up" b = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) p = Person.objects.create(name="Marty Alchin") b.authors.set([p]) b.editor = p with self.override_router(): with self.assertRaises(AttributeError): b.delete() def test_attribute_error_m2m(self): "The AttributeError from AttributeErrorRouter bubbles up" b = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) p = Person.objects.create(name="Marty Alchin") with self.override_router(): with self.assertRaises(AttributeError): b.authors.set([p]) class ModelMetaRouter: "A router to ensure model arguments are real model classes" def db_for_write(self, model, **hints): if not hasattr(model, '_meta'): raise ValueError @override_settings(DATABASE_ROUTERS=[ModelMetaRouter()]) class RouterModelArgumentTestCase(TestCase): multi_db = True def test_m2m_collection(self): b = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) p = Person.objects.create(name="Marty Alchin") # test add b.authors.add(p) # test remove b.authors.remove(p) # test clear b.authors.clear() # test setattr b.authors.set([p]) # test M2M collection b.delete() def test_foreignkey_collection(self): person = Person.objects.create(name='Bob') Pet.objects.create(owner=person, name='Wart') # test related FK collection person.delete() class SyncOnlyDefaultDatabaseRouter: def allow_migrate(self, db, app_label, **hints): return db == DEFAULT_DB_ALIAS class MigrateTestCase(TestCase): # Limit memory usage when calling 'migrate'. available_apps = [ 'multiple_database', 'django.contrib.auth', 'django.contrib.contenttypes' ] multi_db = True def test_migrate_to_other_database(self): """Regression test for #16039: migrate with --database option.""" cts = ContentType.objects.using('other').filter(app_label='multiple_database') count = cts.count() self.assertGreater(count, 0) cts.delete() management.call_command('migrate', verbosity=0, interactive=False, database='other') self.assertEqual(cts.count(), count) def test_migrate_to_other_database_with_router(self): """Regression test for #16039: migrate with --database option.""" cts = ContentType.objects.using('other').filter(app_label='multiple_database') cts.delete() with override_settings(DATABASE_ROUTERS=[SyncOnlyDefaultDatabaseRouter()]): management.call_command('migrate', verbosity=0, interactive=False, database='other') self.assertEqual(cts.count(), 0) class RouterUsed(Exception): WRITE = 'write' def __init__(self, mode, model, hints): self.mode = mode self.model = model self.hints = hints class RouteForWriteTestCase(TestCase): multi_db = True class WriteCheckRouter: def db_for_write(self, model, **hints): raise RouterUsed(mode=RouterUsed.WRITE, model=model, hints=hints) def override_router(self): return override_settings(DATABASE_ROUTERS=[RouteForWriteTestCase.WriteCheckRouter()]) def test_fk_delete(self): owner = Person.objects.create(name='Someone') pet = Pet.objects.create(name='fido', owner=owner) with self.assertRaises(RouterUsed) as cm: with self.override_router(): pet.owner.delete() e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Person) self.assertEqual(e.hints, {'instance': owner}) def test_reverse_fk_delete(self): owner = Person.objects.create(name='Someone') to_del_qs = owner.pet_set.all() with self.assertRaises(RouterUsed) as cm: with self.override_router(): to_del_qs.delete() e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Pet) self.assertEqual(e.hints, {'instance': owner}) def test_reverse_fk_get_or_create(self): owner = Person.objects.create(name='Someone') with self.assertRaises(RouterUsed) as cm: with self.override_router(): owner.pet_set.get_or_create(name='fido') e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Pet) self.assertEqual(e.hints, {'instance': owner}) def test_reverse_fk_update(self): owner = Person.objects.create(name='Someone') Pet.objects.create(name='fido', owner=owner) with self.assertRaises(RouterUsed) as cm: with self.override_router(): owner.pet_set.update(name='max') e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Pet) self.assertEqual(e.hints, {'instance': owner}) def test_m2m_add(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) with self.assertRaises(RouterUsed) as cm: with self.override_router(): book.authors.add(auth) e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book.authors.through) self.assertEqual(e.hints, {'instance': book}) def test_m2m_clear(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) with self.assertRaises(RouterUsed) as cm: with self.override_router(): book.authors.clear() e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book.authors.through) self.assertEqual(e.hints, {'instance': book}) def test_m2m_delete(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) with self.assertRaises(RouterUsed) as cm: with self.override_router(): book.authors.all().delete() e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Person) self.assertEqual(e.hints, {'instance': book}) def test_m2m_get_or_create(self): Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) with self.assertRaises(RouterUsed) as cm: with self.override_router(): book.authors.get_or_create(name='Someone else') e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book) self.assertEqual(e.hints, {'instance': book}) def test_m2m_remove(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) with self.assertRaises(RouterUsed) as cm: with self.override_router(): book.authors.remove(auth) e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book.authors.through) self.assertEqual(e.hints, {'instance': book}) def test_m2m_update(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) with self.assertRaises(RouterUsed) as cm: with self.override_router(): book.authors.all().update(name='Different') e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Person) self.assertEqual(e.hints, {'instance': book}) def test_reverse_m2m_add(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) with self.assertRaises(RouterUsed) as cm: with self.override_router(): auth.book_set.add(book) e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book.authors.through) self.assertEqual(e.hints, {'instance': auth}) def test_reverse_m2m_clear(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) with self.assertRaises(RouterUsed) as cm: with self.override_router(): auth.book_set.clear() e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book.authors.through) self.assertEqual(e.hints, {'instance': auth}) def test_reverse_m2m_delete(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) with self.assertRaises(RouterUsed) as cm: with self.override_router(): auth.book_set.all().delete() e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book) self.assertEqual(e.hints, {'instance': auth}) def test_reverse_m2m_get_or_create(self): auth = Person.objects.create(name='Someone') Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) with self.assertRaises(RouterUsed) as cm: with self.override_router(): auth.book_set.get_or_create(title="New Book", published=datetime.datetime.now()) e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Person) self.assertEqual(e.hints, {'instance': auth}) def test_reverse_m2m_remove(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) with self.assertRaises(RouterUsed) as cm: with self.override_router(): auth.book_set.remove(book) e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book.authors.through) self.assertEqual(e.hints, {'instance': auth}) def test_reverse_m2m_update(self): auth = Person.objects.create(name='Someone') book = Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16)) book.authors.add(auth) with self.assertRaises(RouterUsed) as cm: with self.override_router(): auth.book_set.all().update(title='Different') e = cm.exception self.assertEqual(e.mode, RouterUsed.WRITE) self.assertEqual(e.model, Book) self.assertEqual(e.hints, {'instance': auth})
codeparrot/github-code-clean
# region Description """ test_network.py: Unit tests for Raw-packet network classes Author: Vladimir Ivanov License: MIT Copyright 2020, Raw-packet Project """ # endregion # region Import from sys import path from os.path import dirname, abspath import unittest # endregion # region Authorship information __author__ = 'Vladimir Ivanov' __copyright__ = 'Copyright 2020, Raw-packet Project' __credits__ = [''] __license__ = 'MIT' __version__ = '0.2.1' __maintainer__ = 'Vladimir Ivanov' __email__ = 'ivanov.vladimir.mail@gmail.com' __status__ = 'Development' # endregion # region Main class - NetworkTest class NetworkTest(unittest.TestCase): # region Properties path.append(dirname(dirname(dirname(dirname(dirname(abspath(__file__))))))) from raw_packet.Utils.base import Base from raw_packet.Utils.network import RawEthernet, RawARP, RawIPv4, RawUDP, RawDNS, RawICMPv4, RawDHCPv4 from raw_packet.Utils.network import RawIPv6, RawICMPv6, RawDHCPv6 base: Base = Base() ethernet: RawEthernet = RawEthernet() arp: RawARP = RawARP() ipv4: RawIPv4 = RawIPv4() ipv6: RawIPv6 = RawIPv6() udp: RawUDP = RawUDP() dns: RawDNS = RawDNS() icmpv4: RawICMPv4 = RawICMPv4() dhcpv4: RawDHCPv4 = RawDHCPv4() icmpv6: RawICMPv6 = RawICMPv6() dhcpv6: RawDHCPv6 = RawDHCPv6() # endregion # region Test RawEthernet methods def test_ethernet_init(self): self.assertIn('00:18:de', self.ethernet.macs) def test_ethernet_make_random_mac(self): self.assertTrue(self.base.mac_address_validation(self.ethernet.make_random_mac())) def test_ethernet_convert_mac(self): # Convert string MAC address to bytes self.assertEqual(self.ethernet.convert_mac('30:31:32:33:34:35', True, 41), b'012345') # Convert bytes MAC address to string self.assertEqual(self.ethernet.convert_mac(b'012345', True, 41), '30:31:32:33:34:35') # Bad MAC address string self.assertIsNone(self.ethernet.convert_mac('30:31:32:33:34:356', False, 41)) # Bad MAC address string self.assertIsNone(self.ethernet.convert_mac('30:31:32:33:34567', False, 41)) # Bad MAC address bytes self.assertIsNone(self.ethernet.convert_mac(b'01234', False, 41)) def test_ethernet_get_mac_prefix(self): # Prefix from MAC address string self.assertEqual(self.ethernet.get_mac_prefix('ab:cd:ef:01:23:45', 3, True, 42), 'ABCDEF') # Prefix from MAC address bytes self.assertEqual(self.ethernet.get_mac_prefix(b'012345', 3, True, 42), '303132') # Bad MAC address string self.assertIsNone(self.ethernet.get_mac_prefix('30:31:32:33:34:356', 3, False, 42)) # Bad MAC address string self.assertIsNone(self.ethernet.get_mac_prefix('30:31:32:33:34567', 3, False, 42)) # Bad MAC address bytes self.assertIsNone(self.ethernet.get_mac_prefix(b'01234', 3, False, 42)) def test_ethernet_parse_header(self): # Normal packet self.assertEqual(self.ethernet.parse_header(b'6789@A012345\x08\x00', True, 43), {'destination': '36:37:38:39:40:41', 'source': '30:31:32:33:34:35', 'type': 2048}) # Bad packet self.assertIsNone(self.ethernet.parse_header(b'6789@A012345\x08\x00\x01', False, 43)) def test_ethernet_make_header(self): # MAC addresses string self.assertEqual(self.ethernet.make_header('30:31:32:33:34:35', '36:37:38:39:40:41', 2048, True, 44), b'6789@A012345\x08\x00') # Bad first MAC address bytes self.assertIsNone(self.ethernet.make_header('30:31:32:33:34567', '36:37:38:39:40:41', 2048, False, 44)) # Bad second MAC address bytes self.assertIsNone(self.ethernet.make_header('30:31:32:33:34:56', '36:37:38:39:40123', 2048, False, 44)) # Bad network type self.assertIsNone(self.ethernet.make_header('30:31:32:33:34:56', '36:37:38:39:40:41', 123123, False, 44)) # endregion # region Test RawARP methods def test_arp_parse_packet(self): # Normal packet self.assertEqual(self.arp.parse_packet(b'\x00\x01\x08\x00\x06\x04\x00\x01\x01#Eg\x89\n\xc0\xa8\x01\x01\x01' + b'#Eg\x89\x0b\xc0\xa8\x01\x02', True, 45), {'hardware-type': 1, 'protocol-type': 2048, 'hardware-size': 6, 'protocol-size': 4, 'opcode': 1, 'sender-mac': '01:23:45:67:89:0a', 'sender-ip': '192.168.1.1', 'target-mac': '01:23:45:67:89:0b', 'target-ip': '192.168.1.2'}) # Bad packet self.assertIsNone(self.arp.parse_packet(b'\x00\x01\x08\x00\x06\x04\x00\x01\x01#Eg\x89\n\xc0\xa8\x01\x01\x01' + b'#Eg\x89\x0b\xc0\xa8\x01\x02\x03', False, 45)) def test_arp_make_packet(self): # Normal self.assertEqual(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a', '192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 6, 4, True, 46), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x06\x00\x01\x08\x00\x06\x04\x00\x01\x01#Eg\x89\n\xc0\xa8' + b'\x01\x01\x01#Eg\x89\x0b\xc0\xa8\x01\x02') # Bad ethernet src MAC address self.assertIsNone(self.arp.make_packet('01:23:45:67:890ab', '01:23:45:67:89:0b', '01:23:45:67:89:0a', '192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 6, 4, False, 46)) # Bad ethernet dst MAC address self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:890ab', '01:23:45:67:89:0a', '192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 6, 4, False, 46)) # Bad sender MAC address self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0a', '01:23:45:67:890ab', '192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 6, 4, False, 46)) # Bad target MAC address self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0a', '01:23:45:67:89:0a', '192.168.1.1', '01:23:45:67:890ab', '192.168.1.2', 1, 1, 2048, 6, 4, False, 46)) # Bad sender IP address self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a', '192.168.1.300', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 6, 4, False, 46)) # Bad target IP address self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a', '192.168.1.1', '01:23:45:67:89:0b', '192.168.1.400', 1, 1, 2048, 6, 4, False, 46)) # Bad ARP opcode self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a', '192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 123123, 1, 2048, 6, 4, False, 46)) # Bad hardware type self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a', '192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 123123, 2048, 6, 4, False, 46)) # Bad protocol type self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a', '192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 123123, 6, 4, False, 46)) # Bad hardware size self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a', '192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 123123, 4, False, 46)) # Bad protocol size self.assertIsNone(self.arp.make_packet('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a', '192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', 1, 1, 2048, 6, 123123, False, 46)) def test_arp_make_request(self): # Normal self.assertEqual(self.arp.make_request('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a', '192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', True, 47), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x06\x00\x01\x08\x00\x06\x04\x00\x01\x01#Eg\x89\n' + b'\xc0\xa8\x01\x01\x01#Eg\x89\x0b\xc0\xa8\x01\x02') def test_arp_make_response(self): # Normal self.assertEqual(self.arp.make_response('01:23:45:67:89:0a', '01:23:45:67:89:0b', '01:23:45:67:89:0a', '192.168.1.1', '01:23:45:67:89:0b', '192.168.1.2', True, 48), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x06\x00\x01\x08\x00\x06\x04\x00\x02\x01#Eg\x89\n' + b'\xc0\xa8\x01\x01\x01#Eg\x89\x0b\xc0\xa8\x01\x02') # endregion # region Test RawIPv4 methods def test_ipv4_make_random_ip(self): self.assertTrue(self.base.ip_address_validation(self.ipv4.make_random_ip())) def test_ipv4_parse_header(self): # Normal self.assertEqual(self.ipv4.parse_header(b'E\x00\x00\x1c\x01\x00\x00\x00@\x11\xf6}' + b'\xc0\xa8\x01\x01\xc0\xa8\x01\x02', True, 49), {'version': 4, 'length': 5, 'dscp_ecn': 0, 'total-length': 28, 'identification': 256, 'flags': 0, 'fragment-offset': 0, 'time-to-live': 64, 'protocol': 17, 'checksum': 63101, 'source-ip': '192.168.1.1', 'destination-ip': '192.168.1.2'}) # Bad packet self.assertIsNone(self.ipv4.parse_header(b'\x61\x00\x00\x1c\x8d/\x00\x00@\x11jN' + b'\xc0\xa8\x01\x01\xc0\xa8\x01\x02', False, 49)) # Bad packet self.assertIsNone(self.ipv4.parse_header(b'\x61\x00\x00\x1c\x8d/\x00\x00@\x11jN' + b'\xc0\xa8\x01\x01\xc0\xa8\x01', False, 49)) def test_ipv4_make_header(self): # Normal self.assertEqual(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.2', data_len=0, transport_protocol_len=8, transport_protocol_type=17, ttl=64, identification=1, exit_on_failure=True, exit_code=50), b'E\x00\x00\x1c\x01\x00\x00\x00@\x11\xf6}\xc0\xa8\x01\x01\xc0\xa8\x01\x02') # Bad source IP self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.300', destination_ip='192.168.1.2', data_len=0, transport_protocol_len=8, transport_protocol_type=17, ttl=64, identification=1, exit_on_failure=False, exit_code=50)) # Bad destination IP self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.400', data_len=0, transport_protocol_len=8, transport_protocol_type=17, ttl=64, identification=1, exit_on_failure=False, exit_code=50)) # Bad identification self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.2', data_len=0, transport_protocol_len=8, transport_protocol_type=17, ttl=64, identification=123123, exit_on_failure=False, exit_code=50)) # Bad data length self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.2', data_len=123123, transport_protocol_len=8, transport_protocol_type=17, ttl=64, identification=1, exit_on_failure=False, exit_code=50)) # Bad transport protocol header length self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.2', data_len=0, transport_protocol_len=123123, transport_protocol_type=17, ttl=64, identification=1, exit_on_failure=False, exit_code=50)) # Bad transport protocol type self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.2', data_len=0, transport_protocol_len=8, transport_protocol_type=123123, ttl=64, identification=1, exit_on_failure=False, exit_code=50)) # Bad ttl self.assertIsNone(self.ipv4.make_header(source_ip='192.168.1.1', destination_ip='192.168.1.2', data_len=0, transport_protocol_len=8, transport_protocol_type=17, ttl=123123, identification=1, exit_on_failure=False, exit_code=50)) # endregion # region Test RawIPv6 methods def test_ipv6_make_random_ip(self): # Normal self.assertTrue(self.base.ipv6_address_validation(self.ipv6.make_random_ip(octets=3, prefix='fd00::', exit_on_failure=True, exit_code=51))) # Bad prefix self.assertIsNone(self.ipv6.make_random_ip(octets=1, prefix='fd00:::', exit_on_failure=False, exit_code=51)) # Bad octets count self.assertIsNone(self.ipv6.make_random_ip(octets=123, prefix='fd00::', exit_on_failure=False, exit_code=51)) def test_ipv6_pack_addr(self): # Normal self.assertEqual(self.ipv6.pack_addr(ipv6_address='3132:3334::1', exit_on_failure=True, exit_code=52), b'1234\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01') # Bad IPv6 address self.assertIsNone(self.ipv6.pack_addr(ipv6_address='fd00:::1', exit_on_failure=False, exit_code=52)) def test_ipv6_parse_header(self): # Normal self.assertEqual(self.ipv6.parse_header(b'`\x00\x00\x00\x00\x08\x11@\xfd\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x02', True, 53), {'version': 6, 'traffic-class': 0, 'flow-label': 0, 'payload-length': 8, 'next-header': 17, 'hop-limit': 64, 'source-ip': 'fd00::1', 'destination-ip': 'fd00::2'}) # Bad packet self.assertIsNone(self.ipv6.parse_header(b'`\x00\x00\x00\x00\x08\x11@\xfd\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00', False, 53)) # Bad packet self.assertIsNone(self.ipv6.parse_header(b'E\x00\x00\x00\x00\x08\x11@\xfd\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x02', False, 53)) def test_ipv6_make_header(self): # Normal self.assertEqual(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00::2', traffic_class=0, flow_label=0, payload_len=8, next_header=17, hop_limit=64, exit_on_failure=True, exit_code=54), b'`\x00\x00\x00\x00\x08\x11@\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02') # Bad source IP self.assertIsNone(self.ipv6.make_header(source_ip='fd00:::1', destination_ip='fd00::2', traffic_class=0, flow_label=0, payload_len=8, next_header=17, hop_limit=64, exit_on_failure=False, exit_code=54)) # Bad destination IP self.assertIsNone(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00:::2', traffic_class=0, flow_label=0, payload_len=8, next_header=17, hop_limit=64, exit_on_failure=False, exit_code=54)) # Bad traffic class self.assertIsNone(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00::2', traffic_class=123123123, flow_label=0, payload_len=8, next_header=17, hop_limit=64, exit_on_failure=False, exit_code=54)) # Bad flow label self.assertIsNone(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00::2', traffic_class=0, flow_label=123123123123, payload_len=8, next_header=17, hop_limit=64, exit_on_failure=False, exit_code=54)) # Bad payload len self.assertIsNone(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00::2', traffic_class=0, flow_label=0, payload_len=123123123123, next_header=17, hop_limit=64, exit_on_failure=False, exit_code=54)) # Bad next header self.assertIsNone(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00::2', traffic_class=0, flow_label=0, payload_len=8, next_header=123123123123123, hop_limit=64, exit_on_failure=False, exit_code=54)) # Bad hop limit self.assertIsNone(self.ipv6.make_header(source_ip='fd00::1', destination_ip='fd00::2', traffic_class=0, flow_label=0, payload_len=8, next_header=17, hop_limit=123123123123123, exit_on_failure=False, exit_code=54)) # endregion # region Test RawUDP methods def test_udp_parse_header(self): # Normal self.assertEqual(self.udp.parse_header(packet=b'\x14\xe9\x14\xe9\x00\x08\xdc\x07', exit_on_failure=True, exit_code=55), {'source-port': 5353, 'destination-port': 5353, 'length': 8, 'checksum': 56327}) # Bad packet length self.assertIsNone(self.udp.parse_header(packet=b'\x14\xe9\x14\xe9\x00\x08\xdc', exit_on_failure=False, exit_code=55)) def test_udp_make_header(self): # Normal self.assertEqual(self.udp.make_header(source_port=5353, destination_port=5353, data_length=0, exit_on_failure=True, exit_code=56), b'\x14\xe9\x14\xe9\x00\x08\x00\x00') # Bad source port self.assertIsNone(self.udp.make_header(source_port=123123, destination_port=5353, data_length=0, exit_on_failure=False, exit_code=56)) # Bad destination port self.assertIsNone(self.udp.make_header(source_port=5353, destination_port=123123, data_length=0, exit_on_failure=False, exit_code=56)) # Bad data length self.assertIsNone(self.udp.make_header(source_port=5353, destination_port=5353, data_length=123123, exit_on_failure=False, exit_code=56)) def test_udp_make_header_with_ipv6_checksum(self): # Normal self.assertEqual(self.udp.make_header_with_ipv6_checksum(ipv6_src='fd00::1', ipv6_dst='fd00::2', port_src=5353, port_dst=5353, payload_len=0, payload_data=b'', exit_on_failure=True, exit_code=57), b'\x14\xe9\x14\xe9\x00\x08\xdc\x07') # Bad source IPv6 address self.assertIsNone(self.udp.make_header_with_ipv6_checksum(ipv6_src='fd00:::1', ipv6_dst='fd00::2', port_src=5353, port_dst=5353, payload_len=0, payload_data=b'', exit_on_failure=False, exit_code=57)) # Bad destination IPv6 address self.assertIsNone(self.udp.make_header_with_ipv6_checksum(ipv6_src='fd00::1', ipv6_dst='fd00:::2', port_src=5353, port_dst=5353, payload_len=0, payload_data=b'', exit_on_failure=False, exit_code=57)) # Bad source port self.assertIsNone(self.udp.make_header_with_ipv6_checksum(ipv6_src='fd00::1', ipv6_dst='fd00::2', port_src=123123, port_dst=5353, payload_len=0, payload_data=b'', exit_on_failure=False, exit_code=57)) # Bad destination port self.assertIsNone(self.udp.make_header_with_ipv6_checksum(ipv6_src='fd00::1', ipv6_dst='fd00::2', port_src=5353, port_dst=123123, payload_len=0, payload_data=b'', exit_on_failure=False, exit_code=57)) # Bad payload length self.assertIsNone(self.udp.make_header_with_ipv6_checksum(ipv6_src='fd00::1', ipv6_dst='fd00::2', port_src=5353, port_dst=5353, payload_len=123123, payload_data=b'', exit_on_failure=False, exit_code=57)) # endregion # region Test RawDNS methods def test_dns_get_top_level_domain(self): # Normal self.assertEqual(self.dns.get_top_level_domain(name='www.test.com'), 'test.com') # Bad name self.assertEqual(self.dns.get_top_level_domain(name='test'), 'test') def test_dns_pack_dns_name(self): # Normal self.assertEqual(self.dns.pack_dns_name(name='test.com', exit_on_failure=True, exit_code=65), b'\x04test\x03com\x00') # Bad name self.assertIsNone(self.dns.pack_dns_name(name='AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + '.com', exit_on_failure=False, exit_code=65)) def test_dns_parse_packet(self): self.assertEqual(self.dns.parse_packet(packet=b'\x00\x01\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00' + b'\x04test\x03com\x00\x00\x01\x00\x01\x04test\x03com\x00' + b'\x00\x01\x00\x01\x00\x00\xff\xff\x00\x04\xc0\xa8\x01\x01', exit_on_failure=True, exit_code=67), {'additional-rrs': 0, 'answer-rrs': 1, 'authority-rrs': 0, 'questions': 1, 'flags': 33152, 'transaction-id': 1, 'answers': [ {'address': '192.168.1.1', 'class': 1, 'name': 'test.com.', 'ttl': 65535, 'type': 1}], 'queries': [{'class': 1, 'name': 'test.com.', 'type': 1}]}) def test_dns_unpack_dns_name(self): self.assertEqual(self.dns.unpack_dns_name(packed_name=b'\x03www\x04test\x03com\x00'), 'www.test.com.') self.assertEqual(self.dns.unpack_dns_name(packed_name=b'\x04mail\xc0\x11', name='pop3.test.com'), 'mail.test.com') self.assertEqual(self.dns.unpack_dns_name(packed_name=b'\xc0\x10', name='test.com'), 'test.com') self.assertEqual(self.dns.unpack_dns_name(packed_name=b'\x03www\xc0\x0c', name='test.com'), 'www.test.com') def test_dns_make_ipv4_request_packet(self): # Normal self.assertEqual(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], flags=0), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x006\x01\x00\x00\x00@\x11\xf6c\xc0\xa8\x01\x01' + b'\xc0\xa8\x01\x02\x14\xe9\x005\x00"\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00' + b'\x04test\x03com\x00\x00\x01\x00\x01') # Bad source MAC address self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:890ab', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], flags=0)) # Bad destination MAC address self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:890ab', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], flags=0)) # Bad source IPv4 address self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.300', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], flags=0)) # Bad destination IPv4 address self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.400', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], flags=0)) # Bad source UDP port self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=123123, udp_dst_port=53, transaction_id=1, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], flags=0)) # Bad destination UDP port self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=123123, transaction_id=1, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], flags=0)) # Bad transaction id self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=53, transaction_id=123123123, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], flags=0)) # Bad query type self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'type': 123123, 'class': 1, 'name': 'test.com'}], flags=0)) # Bad query class self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'type': 1, 'class': 123123, 'name': 'test.com'}], flags=0)) # Bad flags self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], flags=123123)) # Bad queries self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'type': 1, 'name': 'test.com'}], flags=0)) # Bad queries self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'class': 1, 'name': 'test.com'}], flags=0)) # Bad queries self.assertIsNone(self.dns.make_ipv4_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'class': 1, 'type': 1}], flags=0)) def test_dns_make_ipv6_request_packet(self): # Normal self.assertEqual(self.dns.make_ipv6_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='fd00::1', ip_dst='fd00::2', ip_ttl=64, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], flags=0), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00"\x11@\xfd\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x02\x14\xe9\x005\x00"B)\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00' + b'\x04test\x03com\x00\x00\x01\x00\x01') # Bad source IPv6 address self.assertIsNone(self.dns.make_ipv6_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='fd00:::1', ip_dst='fd00::2', ip_ttl=64, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], flags=0)) # Bad destination IPv6 address self.assertIsNone(self.dns.make_ipv6_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='fd00::1', ip_dst='fd00:::2', ip_ttl=64, udp_src_port=5353, udp_dst_port=53, transaction_id=1, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], flags=0)) def test_dns_make_response_packet(self): # Normal IPv4 response self.assertEqual(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_dst='192.168.1.2', ip_ttl=64, ip_ident=1, udp_src_port=53, udp_dst_port=5353, transaction_id=1, flags=0x8180, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], answers_address=[{'name': 'test.com', 'type': 1, 'class': 1, 'ttl': 65535, 'address': '192.168.1.1'}], name_servers={}, exit_on_failure=True), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x00F\x01\x00\x00\x00@\x11\xf6S\xc0\xa8\x01\x01' + b'\xc0\xa8\x01\x02\x005\x14\xe9\x002\xb5{\x00\x01\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00' + b'\x04test\x03com\x00\x00\x01\x00\x01\xc0\x0c\x00\x01\x00\x01\x00\x00\xff\xff\x00\x04\xc0' + b'\xa8\x01\x01') # Normal IPv6 response self.assertEqual(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='fd00::1', ip_dst='fd00::2', ip_ttl=64, ip_ident=1, udp_src_port=53, udp_dst_port=5353, transaction_id=1, flags=0x8180, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], answers_address=[{'name': 'test.com', 'type': 28, 'class': 1, 'ttl': 65535, 'address': 'fd00::1'}], name_servers={}, exit_on_failure=True), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00>\x11@\xfd\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x02\x005\x14\xe9\x00>\x034\x00\x01\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\x04' + b'test\x03com\x00\x00\x01\x00\x01\xc0\x0c\x00\x1c\x00\x01\x00\x00\xff\xff\x00\x10\xfd\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01') # Bad MAC address self.assertIsNone(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:890ab', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='fd00::1', ip_dst='fd00::2', ip_ttl=64, ip_ident=1, udp_src_port=53, udp_dst_port=5353, transaction_id=1, flags=0, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], answers_address=[{'name': 'test.com', 'type': 28, 'class': 1, 'ttl': 65535, 'address': 'fd00::1'}], name_servers={}, exit_on_failure=False)) # Bad IP address self.assertIsNone(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='fd00:::1', ip_dst='fd00::2', ip_ttl=64, ip_ident=1, udp_src_port=53, udp_dst_port=5353, transaction_id=1, flags=0, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], answers_address=[{'name': 'test.com', 'type': 28, 'class': 1, 'ttl': 65535, 'address': 'fd00::1'}], name_servers={}, exit_on_failure=False)) # Bad UDP port self.assertIsNone(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='fd00::1', ip_dst='fd00::2', ip_ttl=64, ip_ident=1, udp_src_port=123123, udp_dst_port=5353, transaction_id=1, flags=0, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], answers_address=[{'name': 'test.com', 'type': 28, 'class': 1, 'ttl': 65535, 'address': 'fd00::1'}], name_servers={}, exit_on_failure=False)) # Bad IPv4 address in answer self.assertIsNone(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='fd00::1', ip_dst='fd00::2', ip_ttl=64, ip_ident=1, udp_src_port=53, udp_dst_port=5353, transaction_id=1, flags=0, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], answers_address=[{'name': 'test.com', 'type': 1, 'class': 1, 'ttl': 65535, 'address': '192.168.1.300'}], name_servers={}, exit_on_failure=False)) # Bad IPv6 address in answer self.assertIsNone(self.dns.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='fd00::1', ip_dst='fd00::2', ip_ttl=64, ip_ident=1, udp_src_port=53, udp_dst_port=5353, transaction_id=1, flags=0, queries=[{'type': 1, 'class': 1, 'name': 'test.com'}], answers_address=[{'name': 'test.com', 'type': 28, 'class': 1, 'ttl': 65535, 'address': 'fd00:::1'}], name_servers={}, exit_on_failure=False)) # endregion # endregion # region Test RawICMPv4 methods def test_icmpv4_make_host_unreachable_packet(self): # Normal self.assertEqual(self.icmpv4.make_host_unreachable_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.0.1', ip_dst='192.168.0.2', ip_ident=1), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x000\x01\x00\x00\x00@\x01\xf8y\xc0\xa8\x00' + b'\x01\xc0\xa8\x00\x02\x03\x01\xfc\xfe\x00\x00\x00\x00E\x00\x00\x1c\x01\x00\x00\x00@\x01' + b'\xf8\x8d\xc0\xa8\x00\x02\xc0\xa8\x00\x01') # Bad MAC address self.assertIsNone(self.icmpv4.make_host_unreachable_packet(ethernet_src_mac='01:23:45:67:89:0ab', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.0.1', ip_dst='192.168.0.2', ip_ident=1)) # Bad IP address self.assertIsNone(self.icmpv4.make_host_unreachable_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.0.1111', ip_dst='192.168.0.2', ip_ident=1)) def test_icmpv4_make_udp_port_unreachable_packet(self): # Normal self.assertEqual(self.icmpv4.make_udp_port_unreachable_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.0.1', ip_dst='192.168.0.2', udp_src_port=5353, udp_dst_port=5353, ip_ident=1), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x008\x01\x00\x00\x00@\x01\xf8q\xc0\xa8\x00\x01' + b'\xc0\xa8\x00\x02\x03\x03\xd3"\x00\x00\x00\x00E\x00\x00$\x01\x00\x00\x00@\x11\xf8u\xc0\xa8' + b'\x00\x02\xc0\xa8\x00\x01\x14\xe9\x14\xe9\x00\x08\x00\x00') def test_icmpv4_make_ping_request_packet(self): # Normal self.assertEqual(self.icmpv4.make_ping_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.0.1', ip_dst='192.168.0.2', ip_ident=1, data=b'0123456789'), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x00&\x01\x00\x00\x00@\x01\xf8\x83\xc0\xa8\x00' + b'\x01\xc0\xa8\x00\x02\x08\x00\xf2\xf5\x00\x00\x00\x000123456789') def test_icmpv4_make_redirect_packet(self): # Normal self.assertEqual(self.icmpv4.make_redirect_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.0.1', ip_dst='192.168.0.2', ip_ttl=64, ip_ident=1, gateway_address='192.168.0.1', payload_ip_src='192.168.0.1', payload_ip_dst='192.168.0.2'), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x008\x01\x00\x00\x00@\x01\xf8q\xc0\xa8\x00\x01' + b'\xc0\xa8\x00\x02\x05\x019\xe3\xc0\xa8\x00\x01E\x00\x00\x1c\x01\x00\x00\x00@\x11\xf8}\xc0' + b'\xa8\x00\x01\xc0\xa8\x00\x02\x005\x005\x00\x08\x00\x00') # Bad gateway address self.assertIsNone(self.icmpv4.make_redirect_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.0.1', ip_dst='192.168.0.2', ip_ttl=64, ip_ident=1, gateway_address='192.168.0.1111', payload_ip_src='192.168.0.1', payload_ip_dst='192.168.0.2')) # Bad payload IP address self.assertIsNone(self.icmpv4.make_redirect_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.0.1', ip_dst='192.168.0.2', ip_ttl=64, ip_ident=1, gateway_address='192.168.0.1', payload_ip_src='192.168.0.1111', payload_ip_dst='192.168.0.2')) # endregion # region Test RawDHCPv4 methods def test_dhcpv4_discover_packet(self): # Normal self.assertEqual(self.dhcpv4.make_discover_packet(ethernet_src_mac='01:23:45:67:89:0a', client_mac='01:23:45:67:89:0a', ip_ident=1, transaction_id=1, host_name='dhcp.discover.test', exit_on_failure=True, exit_code=76), b'\xff\xff\xff\xff\xff\xff\x01#Eg\x89\n\x08\x00E\x00\x02<\x01\x00\x00\x00@\x11w\xb2\x00' + b'\x00\x00\x00\xff\xff\xff\xff\x00D\x00C\x02(\x00\x00\x01\x01\x06\x00\x00\x00\x00\x01\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01#Eg\x89' + b'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00c\x82Sc5\x01\x01\x0c\x12dhcp.discover.test7\xfe\x01\x02\x03\x04\x05' + b'\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c' + b'\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefg' + b'hijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e' + b'\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4' + b'\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba' + b'\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0' + b'\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6' + b'\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc' + b'\xfd\xfe\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00') def test_dhcpv4_make_request_packet(self): # Normal self.assertEqual(self.dhcpv4.make_request_packet(ethernet_src_mac='01:23:45:67:89:0a', client_mac='01:23:45:67:89:0a', ip_ident=1, transaction_id=1, requested_ip='192.168.1.1', host_name='dhcp.request.test', exit_on_failure=True, exit_code=77), b'\xff\xff\xff\xff\xff\xff\x01#Eg\x89\n\x08\x00E\x00\x01J\x01\x00\x00\x00@\x11x\xa4\x00' + b'\x00\x00\x00\xff\xff\xff\xff\x00D\x00C\x016\x00\x00\x01\x01\x06\x00\x00\x00\x00\x01\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01#Eg\x89' + b'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00c\x82Sc5\x01\x032\x04\xc0\xa8\x01\x01\x0c\x11dhcp.request.test7\x07' + b'\x01\x02\x03\x06\x1c\x0f\x1a\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') def test_dhcpv4_make_response_packet(self): # DHCPv4 Offer self.assertEqual(self.dhcpv4.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_ident=1, transaction_id=1, dhcp_message_type=2, your_client_ip='192.168.1.2', exit_on_failure=True, exit_code=78), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x01F\x01\x00\x00\x00@\x11\xb6\xfe\xc0\xa8\x01' + b'\x01\xff\xff\xff\xff\x00C\x00D\x012\x00\x00\x02\x01\x06\x00\x00\x00\x00\x01\x00\x00\x00' + b'\x00\x00\x00\x00\x00\xc0\xa8\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x01#Eg\x89\x0b\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00c\x82Sc5\x01\x026\x04\xc0\xa8\x01\x013\x04\x00\x00\xff\xff\x01\x04\xff\xff\xff' + b'\x00\x03\x04\xc0\xa8\x01\x01\x06\x04\xc0\xa8\x01\x01\xff\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') # DHCPv4 ACK self.assertEqual(self.dhcpv4.make_response_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ip_src='192.168.1.1', ip_ident=1, transaction_id=1, dhcp_message_type=5, your_client_ip='192.168.1.2', exit_on_failure=True, exit_code=78), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x08\x00E\x00\x01F\x01\x00\x00\x00@\x11\xb6\xfe\xc0\xa8\x01' + b'\x01\xff\xff\xff\xff\x00C\x00D\x012\x00\x00\x02\x01\x06\x00\x00\x00\x00\x01\x00\x00\x00' + b'\x00\x00\x00\x00\x00\xc0\xa8\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x01#Eg\x89\x0b\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00c\x82Sc5\x01\x056\x04\xc0\xa8\x01\x013\x04\x00\x00\xff\xff\x01\x04\xff\xff\xff' + b'\x00\x03\x04\xc0\xa8\x01\x01\x06\x04\xc0\xa8\x01\x01\xff\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') # endregion # region Test RawICMPv6 methods def test_icmpv6_make_option(self): # Normal self.assertEqual(self.icmpv6.make_option(option_type=1, option_value=b'test_option_value'), b'\x01\x03\x00\x00\x00\x00\x00test_option_value') def test_icmpv6_make_router_solicit_packet(self): # Normal self.assertEqual(self.icmpv6.make_router_solicit_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='33:33:00:00:00:02', ipv6_src='fd00::1', ipv6_dst='fd00::2', ipv6_flow=0x835d1, need_source_link_layer_address=True, source_link_layer_address=None), b'33\x00\x00\x00\x02\x01#Eg\x89\n\x86\xdd`\x085\xd1\x00\x10:\xff\xfd\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x02\x85\x00\xb0\x1a\x00\x00\x00\x00\x01\x01\x01#Eg\x89\n') def test_icmpv6_make_router_advertisement_packet(self): # Normal self.assertEqual(self.icmpv6.make_router_advertisement_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ipv6_src='fd00::1', ipv6_dst='fd00::2', dns_address='fd00::1', domain_search='test.local', prefix='fd00::/64'), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\x0bGU\x00\x80:\xff\xfd\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x02\x86\x00\xb3>@\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x04@\xc0\xff\xff' + b'\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x01\x01\x01#Eg\x89\n\x05\x01\x00\x00\x00\x00\x05\xdc\x19\x03\x00\x00\x00' + b'\x00\x17p\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x1f\x04\x00\x00' + b'\x00\x00\x17p\x04test\x05local\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x01' + b'\x00\x00\x00\x00\xea`') def test_icmpv6_make_neighbor_solicitation_packet(self): # Normal self.assertEqual(self.icmpv6.make_neighbor_solicitation_packet(ethernet_src_mac='01:23:45:67:89:0a', ipv6_src='fd00::1'), b'33\x00\x00\x00\x01\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00 :\xff\xfd\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x01\x87\x00\xac\x05\x00\x00\x00\x00\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x01\x02\x01\x01#Eg\x89\n') def test_icmpv6_make_neighbor_advertisement_packet(self): # Normal self.assertEqual(self.icmpv6.make_neighbor_advertisement_packet(ethernet_src_mac='01:23:45:67:89:0a', ipv6_src='fd00::1', target_ipv6_address='fd00::2'), b'33\x00\x00\x00\x01\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00 :\xff\xfd\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x01\x88\x00\x8d\x06 \x00\x00\x00\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x02\x02\x01\x01#Eg\x89\n') def test_icmpv6_make_echo_request_packet(self): # Normal self.assertEqual(self.icmpv6.make_echo_request_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ipv6_src='fd00::1', ipv6_dst='fd00::2', id=1, sequence=1), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00@:\xff\xfd\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x02\x80\x00\x8ek\x00\x01\x00\x01\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c' + b'\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f ' + b'!"#$%&\'()*+,-./01234567') def test_icmpv6_make_echo_reply_packet(self): # Normal self.assertEqual(self.icmpv6.make_echo_reply_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ipv6_src='fd00::1', ipv6_dst='fd00::2', id=1, sequence=1), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00@:\xff\xfd\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x02\x81\x00\x8dk\x00\x01\x00\x01\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c' + b'\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f ' + b'!"#$%&\'()*+,-./01234567') # endregion # region Test RawDHCPv6 methods def test_dhcpv6_make_option(self): # Normal self.assertEqual(self.dhcpv6._make_duid(mac_address='01:23:45:67:89:0a'), b'\x00\x03\x00\x01\x01#Eg\x89\n') def test_dhcpv6_make_solicit_packet(self): # Normal self.assertEqual(self.dhcpv6.make_solicit_packet(ethernet_src_mac='01:23:45:67:89:0a', ipv6_src='fd00::1', transaction_id=1, client_mac_address='01:23:45:67:89:0a', option_request_list=[23, 24]), b'33\x00\x01\x00\x02\x01#Eg\x89\n\x86\xdd`\x00\x00\x00\x00H\x11@\xfd\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x01\x00\x02\x02"\x02#\x00H.\x01\x01\x00\x00\x01\x00\x03\x00\x18\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00' + b'\x00\x00\x08\x00\x02\x00\x00\x00\x01\x00\n\x00\x03\x00\x01\x01#Eg\x89\n\x00\x06\x00\x04' + b'\x00\x17\x00\x18') def test_dhcpv6_make_relay_forw_packet(self): # Normal self.assertEqual(self.dhcpv6.make_relay_forw_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ipv6_src='fd00::1', ipv6_dst='fd00::2', ipv6_flow=1, hop_count=10, link_addr='fd00::2', peer_addr='fd00::3'), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\x00\x00\x01\x00*\x11@\xfd\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x02\x02"\x02#\x00*\xfb?\x0c\n\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x02\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03') def test_dhcpv6_make_advertise_packet(self): # Normal self.assertEqual(self.dhcpv6.make_advertise_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ipv6_src='fd00::1', ipv6_dst='fd00::2', transaction_id=1, dns_address='fd00::1', domain_search='test.local', ipv6_address='fd00::2'), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\n\x1b\x82\x00\x84\x11@\xfd\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x02\x02#\x02"\x00\x84n\xf4\x02\x00\x00\x01\x00\x01\x00\n\x00\x03\x00\x01\x01#Eg' + b'\x89\x0b\x00\x02\x00\n\x00\x03\x00\x01\x01#Eg\x89\n\x00\x14\x00\x00\x00\x17\x00\x10\xfd' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x18\x00\x0c\x04test\x05' + b'local\x00\x00R\x00\x04\x00\x00\x00<\x00\x03\x00(\x00\x00\x00\x01\x00\x00T`\x00\x00\x87' + b'\x00\x00\x05\x00\x18\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xff' + b'\xff\xff\xff\xff\xff\xff\xff') def test_dhcpv6_make_reply_packet(self): # Normal self.assertEqual(self.dhcpv6.make_reply_packet(ethernet_src_mac='01:23:45:67:89:0a', ethernet_dst_mac='01:23:45:67:89:0b', ipv6_src='fd00::1', ipv6_dst='fd00::2', transaction_id=1, dns_address='fd00::1', domain_search='test.local', ipv6_address='fd00::2'), b'\x01#Eg\x89\x0b\x01#Eg\x89\n\x86\xdd`\n\x1b\x82\x00\x84\x11@\xfd\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x02\x02#\x02"\x00\x84i\xf4\x07\x00\x00\x01\x00\x01\x00\n\x00\x03\x00\x01\x01#Eg' + b'\x89\x0b\x00\x02\x00\n\x00\x03\x00\x01\x01#Eg\x89\n\x00\x14\x00\x00\x00\x17\x00\x10\xfd' + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x18\x00\x0c\x04test\x05' + b'local\x00\x00R\x00\x04\x00\x00\x00<\x00\x03\x00(\x00\x00\x00\x01\x00\x00T`\x00\x00\x87' + b'\x00\x00\x05\x00\x18\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xff' + b'\xff\xff\xff\xff\xff\xff\xff') # endregion # endregion
codeparrot/github-code-clean
# -*- coding: utf-8 -*- # # Copyright (c) 2009-2021 Tom Keffer <tkeffer@gmail.com> # # See the file LICENSE.txt for your full rights. # """Classes and functions for interfacing with a Davis VantagePro, VantagePro2, or VantageVue weather station""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import logging import struct import sys import time import six from six import int2byte, indexbytes, byte2int from six.moves import map from six.moves import zip import weeutil.weeutil import weewx.drivers import weewx.engine import weewx.units from weeutil.weeutil import to_int, to_sorted_string from weewx.crc16 import crc16 log = logging.getLogger(__name__) DRIVER_NAME = 'Vantage' DRIVER_VERSION = '3.2.3' def loader(config_dict, engine): return VantageService(engine, config_dict) def configurator_loader(config_dict): # @UnusedVariable return VantageConfigurator() def confeditor_loader(): return VantageConfEditor() # A few handy constants: _ack = b'\x06' _resend = b'\x15' # NB: The Davis documentation gives this code as 0x21, but it's actually decimal 21 #=============================================================================== # class BaseWrapper #=============================================================================== class BaseWrapper(object): """Base class for (Serial|Ethernet)Wrapper""" def __init__(self, wait_before_retry, command_delay): self.wait_before_retry = wait_before_retry self.command_delay = command_delay def read(self, nbytes=1): raise NotImplementedError def write(self, buf): raise NotImplementedError def flush_input(self): raise NotImplementedError #=============================================================================== # Primitives for working with the Davis Console #=============================================================================== def wakeup_console(self, max_tries=3): """Wake up a Davis Vantage console. This call has three purposes: 1. Wake up a sleeping console; 2. Cancel pending LOOP data (if any); 3. Flush the input buffer Note: a flushed buffer is important before sending a command; we want to make sure the next received character is the expected ACK. If unsuccessful, an exception of type weewx.WakeupError is thrown""" for count in range(max_tries): try: # Wake up console and cancel pending LOOP data. # First try a gentle wake up self.write(b'\n') _resp = self.read(2) if _resp == b'\n\r': # LF, CR = 0x0a, 0x0d # We're done; the console accepted our cancel LOOP command; nothing to flush log.debug("Gentle wake up of console successful") return # That didn't work. Try a rude wake up. # Flush any pending LOOP packets self.flush_input() # Look for the acknowledgment of the sent '\n' _resp = self.read(2) if _resp == b'\n\r': log.debug("Rude wake up of console successful") return except weewx.WeeWxIOError: pass log.debug("Retry #%d failed", count) print("Unable to wake up console... sleeping") time.sleep(self.wait_before_retry) print("Unable to wake up console... retrying") log.error("Unable to wake up console") raise weewx.WakeupError("Unable to wake up Vantage console") def send_data(self, data): """Send data to the Davis console, waiting for an acknowledging <ACK> If the <ACK> is not received, no retry is attempted. Instead, an exception of type weewx.WeeWxIOError is raised data: The data to send, as a byte string""" self.write(data) # Look for the acknowledging ACK character _resp = self.read() if _resp != _ack: log.error("No <ACK> received from console") raise weewx.WeeWxIOError("No <ACK> received from Vantage console") def send_data_with_crc16(self, data, max_tries=3): """Send data to the Davis console along with a CRC check, waiting for an acknowledging <ack>. If none received, resend up to max_tries times. data: The data to send, as a byte string""" # Calculate the crc for the data: _crc = crc16(data) # ...and pack that on to the end of the data in big-endian order: _data_with_crc = data + struct.pack(">H", _crc) # Retry up to max_tries times: for count in range(max_tries): try: self.write(_data_with_crc) # Look for the acknowledgment. _resp = self.read() if _resp == _ack: return except weewx.WeeWxIOError: pass log.debug("send_data_with_crc16; try #%d", count + 1) log.error("Unable to pass CRC16 check while sending data") raise weewx.CRCError("Unable to pass CRC16 check while sending data to Vantage console") def send_command(self, command, max_tries=3): """Send a command to the console, then look for the byte string 'OK' in the response. Any response from the console is split on \n\r characters and returned as a list.""" for count in range(max_tries): try: self.wakeup_console(max_tries=max_tries) self.write(command) # Takes some time for the Vantage to react and fill up the buffer. Sleep for a bit: time.sleep(self.command_delay) # Can't use function serial.readline() because the VP responds with \n\r, not just \n. # So, instead find how many bytes are waiting and fetch them all nc = self.queued_bytes() _buffer = self.read(nc) # Split the buffer on the newlines _buffer_list = _buffer.strip().split(b'\n\r') # The first member should be the 'OK' in the VP response if _buffer_list[0] == b'OK': # Return the rest: return _buffer_list[1:] except weewx.WeeWxIOError: # Caught an error. Keep trying... pass log.debug("send_command; try #%d failed", count + 1) log.error("Max retries exceeded while sending command %s", command) raise weewx.RetriesExceeded("Max retries exceeded while sending command %s" % command) def get_data_with_crc16(self, nbytes, prompt=None, max_tries=3): """Get a packet of data and do a CRC16 check on it, asking for retransmit if necessary. It is guaranteed that the length of the returned data will be of the requested length. An exception of type CRCError will be thrown if the data cannot pass the CRC test in the requested number of retries. nbytes: The number of bytes (including the 2 byte CRC) to get. prompt: Any string to be sent before requesting the data. Default=None max_tries: Number of tries before giving up. Default=3 returns: the packet data as a byte string. The last 2 bytes will be the CRC""" if prompt: self.write(prompt) first_time = True _buffer = b'' for count in range(max_tries): try: if not first_time: self.write(_resend) _buffer = self.read(nbytes) if crc16(_buffer) == 0: return _buffer log.debug("Get_data_with_crc16; try #%d failed. CRC error", count + 1) except weewx.WeeWxIOError as e: log.debug("Get_data_with_crc16; try #%d failed: %s", count + 1, e) first_time = False if _buffer: log.error("Unable to pass CRC16 check while getting data") raise weewx.CRCError("Unable to pass CRC16 check while getting data") else: log.debug("Timeout in get_data_with_crc16") raise weewx.WeeWxIOError("Timeout in get_data_with_crc16") #=============================================================================== # class Serial Wrapper #=============================================================================== def guard_termios(fn): """Decorator function that converts termios exceptions into weewx exceptions.""" # Some functions in the module 'serial' can raise undocumented termios # exceptions. This catches them and converts them to weewx exceptions. try: import termios def guarded_fn(*args, **kwargs): try: return fn(*args, **kwargs) except termios.error as e: raise weewx.WeeWxIOError(e) except ImportError: def guarded_fn(*args, **kwargs): return fn(*args, **kwargs) return guarded_fn class SerialWrapper(BaseWrapper): """Wraps a serial connection returned from package serial""" def __init__(self, port, baudrate, timeout, wait_before_retry, command_delay): super(SerialWrapper, self).__init__(wait_before_retry=wait_before_retry, command_delay=command_delay) self.port = port self.baudrate = baudrate self.timeout = timeout @guard_termios def flush_input(self): self.serial_port.flushInput() @guard_termios def flush_output(self): self.serial_port.flushOutput() @guard_termios def queued_bytes(self): return self.serial_port.inWaiting() def read(self, chars=1): import serial try: _buffer = self.serial_port.read(chars) except serial.serialutil.SerialException as e: log.error("SerialException on read.") log.error(" **** %s", e) log.error(" **** Is there a competing process running??") # Reraise as a Weewx error I/O error: raise weewx.WeeWxIOError(e) N = len(_buffer) if N != chars: raise weewx.WeeWxIOError("Expected to read %d chars; got %d instead" % (chars, N)) return _buffer def write(self, data): import serial try: N = self.serial_port.write(data) except serial.serialutil.SerialException as e: log.error("SerialException on write.") log.error(" **** %s", e) # Reraise as a Weewx error I/O error: raise weewx.WeeWxIOError(e) # Python version 2.5 and earlier returns 'None', so it cannot be used to test for completion. if N is not None and N != len(data): raise weewx.WeeWxIOError("Expected to write %d chars; sent %d instead" % (len(data), N)) def openPort(self): import serial # Open up the port and store it self.serial_port = serial.Serial(self.port, self.baudrate, timeout=self.timeout) log.debug("Opened up serial port %s; baud %d; timeout %.2f", self.port, self.baudrate, self.timeout) def closePort(self): try: # This will cancel any pending loop: self.write(b'\n') except: pass self.serial_port.close() #=============================================================================== # class EthernetWrapper #=============================================================================== class EthernetWrapper(BaseWrapper): """Wrap a socket""" def __init__(self, host, port, timeout, tcp_send_delay, wait_before_retry, command_delay): super(EthernetWrapper, self).__init__(wait_before_retry=wait_before_retry, command_delay=command_delay) self.host = host self.port = port self.timeout = timeout self.tcp_send_delay = tcp_send_delay def openPort(self): import socket try: self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.settimeout(self.timeout) self.socket.connect((self.host, self.port)) except (socket.error, socket.timeout, socket.herror) as ex: log.error("Socket error while opening port %d to ethernet host %s.", self.port, self.host) # Reraise as a weewx I/O error: raise weewx.WeeWxIOError(ex) except: log.error("Unable to connect to ethernet host %s on port %d.", self.host, self.port) raise log.debug("Opened up ethernet host %s on port %d. timeout=%s, tcp_send_delay=%s", self.host, self.port, self.timeout, self.tcp_send_delay) def closePort(self): import socket try: # This will cancel any pending loop: self.write(b'\n') except: pass self.socket.shutdown(socket.SHUT_RDWR) self.socket.close() def flush_input(self): """Flush the input buffer from WeatherLinkIP""" import socket try: # This is a bit of a hack, but there is no analogue to pyserial's flushInput() # Set socket timeout to 0 to get immediate result self.socket.settimeout(0) self.socket.recv(4096) except (socket.timeout, socket.error): pass finally: # set socket timeout back to original value self.socket.settimeout(self.timeout) def flush_output(self): """Flush the output buffer to WeatherLinkIP This function does nothing as there should never be anything left in the buffer when using socket.sendall()""" pass def queued_bytes(self): """Determine how many bytes are in the buffer""" import socket length = 0 try: self.socket.settimeout(0) length = len(self.socket.recv(8192, socket.MSG_PEEK)) except socket.error: pass finally: self.socket.settimeout(self.timeout) return length def read(self, chars=1): """Read bytes from WeatherLinkIP""" import socket _buffer = b'' _remaining = chars while _remaining: _N = min(4096, _remaining) try: _recv = self.socket.recv(_N) except (socket.timeout, socket.error) as ex: log.error("ip-read error: %s", ex) # Reraise as a weewx I/O error: raise weewx.WeeWxIOError(ex) _nread = len(_recv) if _nread == 0: raise weewx.WeeWxIOError("Expected %d characters; got zero instead" % (_N,)) _buffer += _recv _remaining -= _nread return _buffer def write(self, data): """Write to a WeatherLinkIP""" import socket try: self.socket.sendall(data) # A delay of 0.0 gives socket write error; 0.01 gives no ack error; 0.05 is OK for weewx program # Note: a delay of 0.5 s is required for wee_device --logger=logger_info time.sleep(self.tcp_send_delay) except (socket.timeout, socket.error) as ex: log.error("ip-write error: %s", ex) # Reraise as a weewx I/O error: raise weewx.WeeWxIOError(ex) #=============================================================================== # class Vantage #=============================================================================== class Vantage(weewx.drivers.AbstractDevice): """Class that represents a connection to a Davis Vantage console. The connection to the console will be open after initialization""" # Various codes used internally by the VP2: barometer_unit_dict = {0:'inHg', 1:'mmHg', 2:'hPa', 3:'mbar'} temperature_unit_dict = {0:'degree_F', 1:'degree_10F', 2:'degree_C', 3:'degree_10C'} altitude_unit_dict = {0:'foot', 1:'meter'} rain_unit_dict = {0:'inch', 1:'mm'} wind_unit_dict = {0:'mile_per_hour', 1:'meter_per_second', 2:'km_per_hour', 3:'knot'} wind_cup_dict = {0:'small', 1:'large'} rain_bucket_dict = {0:'0.01 inches', 1:'0.2 mm', 2:'0.1 mm'} transmitter_type_dict = {0:'iss', 1:'temp', 2:'hum', 3:'temp_hum', 4:'wind', 5:'rain', 6:'leaf', 7:'soil', 8:'leaf_soil', 9:'sensorlink', 10:'none'} repeater_dict = {0:'none', 1:'A', 2:'B', 3:'C', 4:'D', 5:'E', 6:'F', 7:'G', 8:'H'} listen_dict = {0:'inactive', 1:'active'} def __init__(self, **vp_dict): """Initialize an object of type Vantage. NAMED ARGUMENTS: connection_type: The type of connection (serial|ethernet) [Required] port: The serial port of the VP. [Required if serial/USB communication] host: The Vantage network host [Required if Ethernet communication] baudrate: Baudrate of the port. [Optional. Default 19200] tcp_port: TCP port to connect to [Optional. Default 22222] tcp_send_delay: Block after sending data to WeatherLinkIP to allow it to process the command [Optional. Default is 0.5] timeout: How long to wait before giving up on a response from the serial port. [Optional. Default is 4] wait_before_retry: How long to wait before retrying. [Optional. Default is 1.2 seconds] command_delay: How long to wait after sending a command before looking for acknowledgement. [Optional. Default is 0.5 seconds] max_tries: How many times to try again before giving up. [Optional. Default is 4] iss_id: The station number of the ISS [Optional. Default is 1] model_type: Vantage Pro model type. 1=Vantage Pro; 2=Vantage Pro2 [Optional. Default is 2] loop_request: Requested packet type. 1=LOOP; 2=LOOP2; 3=both. """ log.debug('Driver version is %s', DRIVER_VERSION) self.hardware_type = None # These come from the configuration dictionary: self.max_tries = to_int(vp_dict.get('max_tries', 4)) self.iss_id = to_int(vp_dict.get('iss_id')) self.model_type = to_int(vp_dict.get('model_type', 2)) if self.model_type not in list(range(1, 3)): raise weewx.UnsupportedFeature("Unknown model_type (%d)" % self.model_type) self.loop_request = to_int(vp_dict.get('loop_request', 1)) log.debug("Option loop_request=%d", self.loop_request) self.save_day_rain = None self.max_dst_jump = 7200 # Get an appropriate port, depending on the connection type: self.port = Vantage._port_factory(vp_dict) # Open it up: self.port.openPort() # Read the EEPROM and fill in properties in this instance self._setup() log.debug("Hardware name: %s", self.hardware_name) def openPort(self): """Open up the connection to the console""" self.port.openPort() def closePort(self): """Close the connection to the console. """ self.port.closePort() def genLoopPackets(self): """Generator function that returns loop packets""" while True: # Get LOOP packets in big batches This is necessary because there is # an undocumented limit to how many LOOP records you can request # on the VP (somewhere around 220). for _loop_packet in self.genDavisLoopPackets(200): yield _loop_packet def genDavisLoopPackets(self, N=1): """Generator function to return N loop packets from a Vantage console N: The number of packets to generate [default is 1] yields: up to N loop packets (could be less in the event of a read or CRC error). """ log.debug("Requesting %d LOOP packets.", N) self.port.wakeup_console(self.max_tries) if self.loop_request == 1: # If asking for old-fashioned LOOP1 data, send the older command in case the # station does not support the LPS command: self.port.send_data(b"LOOP %d\n" % N) else: # Request N packets of type "loop_request": self.port.send_data(b"LPS %d %d\n" % (self.loop_request, N)) for loop in range(N): for count in range(self.max_tries): try: loop_packet = self._get_packet() except weewx.WeeWxIOError as e: log.error("LOOP try #%d; error: %s", count + 1, e) else: yield loop_packet break else: log.error("LOOP max tries (%d) exceeded.", self.max_tries) raise weewx.RetriesExceeded("Max tries exceeded while getting LOOP data.") def _get_packet(self): """Get a single LOOP packet""" # Fetch a packet... _buffer = self.port.read(99) # ... see if it passes the CRC test ... crc = crc16(_buffer) if crc: if weewx.debug > 1: log.error("LOOP buffer failed CRC check. Calculated CRC=%d" % crc) if six.PY2: log.error("Buffer: " + "".join("\\x%02x" % ord(c) for c in _buffer)) else: log.error("Buffer: %s", _buffer) raise weewx.CRCError("LOOP buffer failed CRC check") # ... decode it ... loop_packet = self._unpackLoopPacket(_buffer[:95]) # .. then return it return loop_packet def genArchiveRecords(self, since_ts): """A generator function to return archive packets from a Davis Vantage station. since_ts: A timestamp. All data since (but not including) this time will be returned. Pass in None for all data yields: a sequence of dictionaries containing the data """ count = 0 while count < self.max_tries: try: for _record in self.genDavisArchiveRecords(since_ts): # Successfully retrieved record. Set count back to zero. count = 0 since_ts = _record['dateTime'] yield _record # The generator loop exited. We're done. return except weewx.WeeWxIOError as e: # Problem. Increment retry count count += 1 log.error("DMPAFT try #%d; error: %s", count, e) log.error("DMPAFT max tries (%d) exceeded.", self.max_tries) raise weewx.RetriesExceeded("Max tries exceeded while getting archive data.") def genDavisArchiveRecords(self, since_ts): """A generator function to return archive records from a Davis Vantage station. This version does not catch any exceptions.""" if since_ts: since_tt = time.localtime(since_ts) # NB: note that some of the Davis documentation gives the year offset as 1900. # From experimentation, 2000 seems to be right, at least for the newer models: _vantageDateStamp = since_tt[2] + (since_tt[1] << 5) + ((since_tt[0] - 2000) << 9) _vantageTimeStamp = since_tt[3] * 100 + since_tt[4] log.debug('Getting archive packets since %s', weeutil.weeutil.timestamp_to_string(since_ts)) else: _vantageDateStamp = _vantageTimeStamp = 0 log.debug('Getting all archive packets') # Pack the date and time into a string, little-endian order _datestr = struct.pack("<HH", _vantageDateStamp, _vantageTimeStamp) # Save the last good time: _last_good_ts = since_ts if since_ts else 0 # Get the starting page and index. First, wake up the console... self.port.wakeup_console(self.max_tries) # ... request a dump... self.port.send_data(b'DMPAFT\n') # ... from the designated date (allow only one try because that's all the console allows): self.port.send_data_with_crc16(_datestr, max_tries=1) # Get the response with how many pages and starting index and decode it. Again, allow only one try: _buffer = self.port.get_data_with_crc16(6, max_tries=1) (_npages, _start_index) = struct.unpack("<HH", _buffer[:4]) log.debug("Retrieving %d page(s); starting index= %d", _npages, _start_index) # Cycle through the pages... for ipage in range(_npages): # ... get a page of archive data _page = self.port.get_data_with_crc16(267, prompt=_ack, max_tries=1) # Now extract each record from the page for _index in range(_start_index, 5): # Get the record string buffer for this index: _record_string = _page[1 + 52 * _index:53 + 52 * _index] # If the console has been recently initialized, there will # be unused records, which are filled with 0xff. Detect this # by looking at the first 4 bytes (the date and time): if _record_string[0:4] == 4 * b'\xff' or _record_string[0:4] == 4 * b'\x00': # This record has never been used. We're done. log.debug("Empty record page %d; index %d", ipage, _index) return # Unpack the archive packet from the string buffer: _record = self._unpackArchivePacket(_record_string) # Check to see if the time stamps are declining, which would # signal that we are done. if _record['dateTime'] is None or _record['dateTime'] <= _last_good_ts - self.max_dst_jump: # The time stamp is declining. We're done. log.debug("DMPAFT complete: page timestamp %s less than final timestamp %s", weeutil.weeutil.timestamp_to_string(_record['dateTime']), weeutil.weeutil.timestamp_to_string(_last_good_ts)) log.debug("Catch up complete.") return # Set the last time to the current time, and yield the packet _last_good_ts = _record['dateTime'] yield _record # The starting index for pages other than the first is always zero _start_index = 0 def genArchiveDump(self, progress_fn=None): """ A generator function to return all archive packets in the memory of a Davis Vantage station. Args: progress_fn: A function that will be called before every page request. It should have a single argument: the page number. If set to None, no progress will be reported. Yields: a sequence of dictionaries containing the data """ import weewx.wxformulas # Wake up the console... self.port.wakeup_console(self.max_tries) # ... request a dump... self.port.send_data(b'DMP\n') log.debug("Dumping all records.") # Cycle through the pages... for ipage in range(512): # If requested, provide users with some feedback: if progress_fn: progress_fn(ipage) # ... get a page of archive data _page = self.port.get_data_with_crc16(267, prompt=_ack, max_tries=self.max_tries) # Now extract each record from the page for _index in range(5): # Get the record string buffer for this index: _record_string = _page[1 + 52 * _index:53 + 52 * _index] # If the console has been recently initialized, there will # be unused records, which are filled with 0xff. Detect this # by looking at the first 4 bytes (the date and time): if _record_string[0:4] == 4 * b'\xff' or _record_string[0:4] == 4 * b'\x00': # This record has never been used. Skip it log.debug("Empty record page %d; index %d", ipage, _index) continue # Unpack the raw archive packet: _record = self._unpackArchivePacket(_record_string) # Because the dump command does not go through the normal weewx # engine pipeline, we have to add these important software derived # variables here. try: T = _record['outTemp'] R = _record['outHumidity'] W = _record['windSpeed'] _record['dewpoint'] = weewx.wxformulas.dewpointF(T, R) _record['heatindex'] = weewx.wxformulas.heatindexF(T, R) _record['windchill'] = weewx.wxformulas.windchillF(T, W) except KeyError: pass yield _record def genLoggerSummary(self): """A generator function to return a summary of each page in the logger. yields: A 8-way tuple containing (page, index, year, month, day, hour, minute, timestamp) """ # Wake up the console... self.port.wakeup_console(self.max_tries) # ... request a dump... self.port.send_data(b'DMP\n') log.debug("Starting logger summary.") # Cycle through the pages... for _ipage in range(512): # ... get a page of archive data _page = self.port.get_data_with_crc16(267, prompt=_ack, max_tries=self.max_tries) # Now extract each record from the page for _index in range(5): # Get the record string buffer for this index: _record_string = _page[1 + 52 * _index:53 + 52 * _index] # If the console has been recently initialized, there will # be unused records, which are filled with 0xff. Detect this # by looking at the first 4 bytes (the date and time): if _record_string[0:4] == 4 * b'\xff' or _record_string[0:4] == 4 * b'\x00': # This record has never been used. y = mo = d = h = mn = time_ts = None else: # Extract the date and time from the raw buffer: datestamp, timestamp = struct.unpack("<HH", _record_string[0:4]) time_ts = _archive_datetime(datestamp, timestamp) y = (0xfe00 & datestamp) >> 9 # year mo = (0x01e0 & datestamp) >> 5 # month d = (0x001f & datestamp) # day h = timestamp // 100 # hour mn = timestamp % 100 # minute yield (_ipage, _index, y, mo, d, h, mn, time_ts) log.debug("Vantage: Finished logger summary.") def getTime(self): """Get the current time from the console, returning it as timestamp""" time_dt = self.getConsoleTime() return time.mktime(time_dt.timetuple()) def getConsoleTime(self): """Return the raw time on the console, uncorrected for DST or timezone.""" # Try up to max_tries times: for unused_count in range(self.max_tries): try: # Wake up the console... self.port.wakeup_console(max_tries=self.max_tries) # ... request the time... self.port.send_data(b'GETTIME\n') # ... get the binary data. No prompt, only one try: _buffer = self.port.get_data_with_crc16(8, max_tries=1) (sec, minute, hr, day, mon, yr, unused_crc) = struct.unpack("<bbbbbbH", _buffer) return datetime.datetime(yr + 1900, mon, day, hr, minute, sec) except weewx.WeeWxIOError: # Caught an error. Keep retrying... continue log.error("Max retries exceeded while getting time") raise weewx.RetriesExceeded("While getting console time") def setTime(self): """Set the clock on the Davis Vantage console""" for unused_count in range(self.max_tries): try: # Wake the console and begin the setTime command self.port.wakeup_console(max_tries=self.max_tries) self.port.send_data(b'SETTIME\n') # Unfortunately, clock resolution is only 1 second, and transmission takes a # little while to complete, so round up the clock up. 0.5 for clock resolution # and 0.25 for transmission delay newtime_tt = time.localtime(int(time.time() + 0.75)) # The Davis expects the time in reversed order, and the year is since 1900 _buffer = struct.pack("<bbbbbb", newtime_tt[5], newtime_tt[4], newtime_tt[3], newtime_tt[2], newtime_tt[1], newtime_tt[0] - 1900) # Complete the setTime command self.port.send_data_with_crc16(_buffer, max_tries=1) log.info("Clock set to %s", weeutil.weeutil.timestamp_to_string(time.mktime(newtime_tt))) return except weewx.WeeWxIOError: # Caught an error. Keep retrying... continue log.error("Max retries exceeded while setting time") raise weewx.RetriesExceeded("While setting console time") def setDST(self, dst='auto'): """Turn DST on or off, or set it to auto. dst: One of 'auto', 'on' or 'off' """ _dst = dst.strip().lower() if _dst not in ['auto', 'on', 'off']: raise weewx.ViolatedPrecondition("Invalid DST setting %s" % dst) # Set flag whether DST is auto or manual: man_auto = 0 if _dst == 'auto' else 1 self.port.send_data(b"EEBWR 12 01\n") self.port.send_data_with_crc16(int2byte(man_auto)) # If DST is manual, set it on or off: if _dst in ['on', 'off']: on_off = 0 if _dst == 'off' else 1 self.port.send_data(b"EEBWR 13 01\n") self.port.send_data_with_crc16(int2byte(on_off)) def setTZcode(self, code): """Set the console's time zone code. See the Davis Vantage manual for the table of preset time zones.""" if code < 0 or code > 46: raise weewx.ViolatedPrecondition("Invalid time zone code %d" % code) # Set the GMT_OR_ZONE byte to use TIME_ZONE value self.port.send_data(b"EEBWR 16 01\n") self.port.send_data_with_crc16(int2byte(0)) # Set the TIME_ZONE value self.port.send_data(b"EEBWR 11 01\n") self.port.send_data_with_crc16(int2byte(code)) def setTZoffset(self, offset): """Set the console's time zone to a custom offset. offset: Offset. This is an integer in hundredths of hours. E.g., -175 would be 1h45m negative offset.""" # Set the GMT_OR_ZONE byte to use GMT_OFFSET value self.port.send_data(b"EEBWR 16 01\n") self.port.send_data_with_crc16(int2byte(1)) # Set the GMT_OFFSET value self.port.send_data(b"EEBWR 14 02\n") self.port.send_data_with_crc16(struct.pack("<h", offset)) def setWindCupType(self, new_wind_cup_code): """Set the wind cup type. new_windCup_code: The new wind cup type. Must be one of 0 or 1 """ if new_wind_cup_code not in (0, 1): raise weewx.ViolatedPrecondition("Invalid wind cup code %d" % new_wind_cup_code) old_setup_bits = self._getEEPROM_value(0x2B)[0] new_setup_bits = (old_setup_bits & 0xF7) | (new_wind_cup_code << 3) # Tell the console to put one byte in hex location 0x2B self.port.send_data(b"EEBWR 2B 01\n") # Follow it up with the data: self.port.send_data_with_crc16(int2byte(new_setup_bits), max_tries=1) # Then call NEWSETUP to get it to stick: self.port.send_data(b"NEWSETUP\n") self._setup() log.info("Wind cup type set to %d (%s)", self.wind_cup_type, self.wind_cup_size) def setBucketType(self, new_bucket_code): """Set the rain bucket type. new_bucket_code: The new bucket type. Must be one of 0, 1, or 2 """ if new_bucket_code not in (0, 1, 2): raise weewx.ViolatedPrecondition("Invalid bucket code %d" % new_bucket_code) old_setup_bits = self._getEEPROM_value(0x2B)[0] new_setup_bits = (old_setup_bits & 0xCF) | (new_bucket_code << 4) # Tell the console to put one byte in hex location 0x2B self.port.send_data(b"EEBWR 2B 01\n") # Follow it up with the data: self.port.send_data_with_crc16(int2byte(new_setup_bits), max_tries=1) # Then call NEWSETUP to get it to stick: self.port.send_data(b"NEWSETUP\n") self._setup() log.info("Rain bucket type set to %d (%s)", self.rain_bucket_type, self.rain_bucket_size) def setRainYearStart(self, new_rain_year_start): """Set the start of the rain season. new_rain_year_start: Must be in the closed range 1...12 """ if not 1 <= new_rain_year_start <= 12: raise weewx.ViolatedPrecondition("Invalid rain season start %d" % (new_rain_year_start,)) # Tell the console to put one byte in hex location 0x2C self.port.send_data(b"EEBWR 2C 01\n") # Follow it up with the data: self.port.send_data_with_crc16(int2byte(new_rain_year_start), max_tries=1) self._setup() log.info("Rain year start set to %d", self.rain_year_start) def setBarData(self, new_barometer_inHg, new_altitude_foot): """Set the internal barometer calibration and altitude settings in the console. new_barometer_inHg: The local, reference barometric pressure in inHg. new_altitude_foot: The new altitude in feet.""" new_barometer = int(new_barometer_inHg * 1000.0) new_altitude = int(new_altitude_foot) command = b"BAR=%d %d\n" % (new_barometer, new_altitude) self.port.send_command(command) self._setup() log.info("Set barometer calibration.") def setLatitude(self, latitude_dg): """Set the stations latitude. latitude_dg: Must be in the closed range -90.0...90.0 """ latitude = int(round((latitude_dg * 10), 0)) if not -900 <= latitude <= 900: raise weewx.ViolatedPrecondition("Invalid latitude %.1f degree" % (latitude_dg,)) # Tell the console to put one byte in hex location 0x0B self.port.send_data(b"EEBWR 0B 02\n") # Follow it up with the data: self.port.send_data_with_crc16(struct.pack('<BB', latitude & 0xff, (latitude // 256) & 0xff), max_tries=1) # Then call NEWSETUP to get it to stick: self.port.send_data(b"NEWSETUP\n") log.info("Station latitude set to %.1f degree", latitude_dg) def setLongitude(self, longitude_dg): """Set the stations longitude. longitude_dg: Must be in the closed range -180.0...180.0 """ longitude = int(round((longitude_dg * 10), 0)) if not -1800 <= longitude <= 1800: raise weewx.ViolatedPrecondition("Invalid longitude %.1f degree" % (longitude_dg,)) # Tell the console to put one byte in hex location 0x0D self.port.send_data(b"EEBWR 0D 02\n") # Follow it up with the data: self.port.send_data_with_crc16(struct.pack('<BB', longitude & 0xff, (longitude // 256) & 0xff), max_tries = 1) # Then call NEWSETUP to get it to stick: self.port.send_data(b"NEWSETUP\n") log.info("Station longitude set to %.1f degree", longitude_dg) def setArchiveInterval(self, archive_interval_seconds): """Set the archive interval of the Vantage. archive_interval_seconds: The new interval to use in seconds. Must be one of 60, 300, 600, 900, 1800, 3600, or 7200 """ if archive_interval_seconds not in (60, 300, 600, 900, 1800, 3600, 7200): raise weewx.ViolatedPrecondition("Invalid archive interval (%d)" % (archive_interval_seconds,)) # The console expects the interval in minutes. Divide by 60. command = b'SETPER %d\n' % int(archive_interval_seconds // 60) self.port.send_command(command, max_tries=self.max_tries) self._setup() log.info("Archive interval set to %d seconds", archive_interval_seconds) def setLamp(self, onoff='OFF'): """Set the lamp on or off""" try: _setting = {'off': b'0', 'on': b'1'}[onoff.lower()] except KeyError: raise ValueError("Unknown lamp setting '%s'" % onoff) _command = b"LAMPS %s\n" % _setting self.port.send_command(_command, max_tries=self.max_tries) log.info("Lamp set to '%s'", onoff) def setTransmitterType(self, new_channel, new_transmitter_type, new_extra_temp, new_extra_hum, new_repeater): """Set the transmitter type for one of the eight channels.""" # Default value just for tidiness. new_temp_hum_bits = 0xFF # Check arguments are consistent. if new_channel not in list(range(1, 9)): raise weewx.ViolatedPrecondition("Invalid channel %d" % new_channel) if new_repeater not in list(range(0, 9)): raise weewx.ViolatedPrecondition("Invalid repeater %d" % new_repeater) if new_transmitter_type not in list(range(0, 11)): raise weewx.ViolatedPrecondition("Invalid transmitter type %d" % new_transmitter_type) if self.transmitter_type_dict[new_transmitter_type] in ['temp', 'temp_hum']: if new_extra_temp not in list(range(1, 9)): raise weewx.ViolatedPrecondition("Invalid extra temperature number %d" % new_extra_temp) # Extra temp is origin 0. new_temp_hum_bits = new_temp_hum_bits & 0xF0 | (new_extra_temp - 1) if self.transmitter_type_dict[new_transmitter_type] in ['hum', 'temp_hum']: if new_extra_hum not in list(range(1, 9)): raise weewx.ViolatedPrecondition("Invalid extra humidity number %d" % new_extra_hum) # Extra humidity is origin 1. new_temp_hum_bits = new_temp_hum_bits & 0x0F | (new_extra_hum << 4) if new_repeater == 0: new_type_bits = (new_transmitter_type & 0x0F) else: new_type_bits = ((new_repeater + 7) << 4) | (new_transmitter_type & 0x0F) # A transmitter type of 10 indicates that channel does not have a transmitter. # So, turn off its usetx bit as well. Otherwise, turn it on. usetx = 1 if new_transmitter_type != 10 else 0 old_usetx_bits = self._getEEPROM_value(0x17)[0] new_usetx_bits = old_usetx_bits & ~(1 << (new_channel - 1)) | usetx * (1 << (new_channel - 1)) # Each channel uses two bytes. Find the correct starting byte for this channel start_byte = 0x19 + (new_channel - 1) * 2 # Tell the console to put two bytes in that location. self.port.send_data(b"EEBWR %X 02\n" % start_byte) # Follow it up with the two bytes of data, little-endian order: self.port.send_data_with_crc16(struct.pack('<BB', new_type_bits, new_temp_hum_bits), max_tries=1) # Now tell the console to put the one byte "usetx" in hex location 0x17 self.port.send_data(b"EEBWR 17 01\n") # Follow it up with the usetx data: self.port.send_data_with_crc16(struct.pack('>B', new_usetx_bits), max_tries=1) # Then call NEWSETUP to get it all to stick: self.port.send_data(b"NEWSETUP\n") self._setup() log.info("Transmitter type for channel %d set to %d (%s), repeater: %s, %s", new_channel, new_transmitter_type, self.transmitter_type_dict[new_transmitter_type], self.repeater_dict[new_repeater], self.listen_dict[usetx]) def setRetransmit(self, new_channel): """Set console retransmit channel.""" # Tell the console to put one byte in hex location 0x18 self.port.send_data(b"EEBWR 18 01\n") # Follow it up with the data: self.port.send_data_with_crc16(int2byte(new_channel), max_tries=1) # Then call NEWSETUP to get it to stick: self.port.send_data(b"NEWSETUP\n") self._setup() if new_channel != 0: log.info("Retransmit set to 'ON' at channel: %d", new_channel) else: log.info("Retransmit set to 'OFF'") def setTempLogging(self, new_tempLogging='AVERAGE'): """Set console temperature logging to 'AVERAGE' or 'LAST'.""" try: _setting = {'LAST': 1, 'AVERAGE': 0}[new_tempLogging.upper()] except KeyError: raise ValueError("Unknown console temperature logging setting '%s'" % new_tempLogging.upper()) # Tell the console to put one byte in hex location 0x2B self.port.send_data(b"EEBWR FFC 01\n") # Follow it up with the data: self.port.send_data_with_crc16(int2byte(_setting), max_tries=1) # Then call NEWSETUP to get it to stick: self.port.send_data(b"NEWSETUP\n") log.info("Console temperature logging set to '%s'", new_tempLogging.upper()) def setCalibrationWindDir(self, offset): """Set the on-board wind direction calibration.""" if not -359 <= offset <= 359: raise weewx.ViolatedPrecondition("Offset %d out of range [-359, 359]." % offset) # Tell the console to put two bytes in hex location 0x4D self.port.send_data(b"EEBWR 4D 02\n") # Follow it up with the data: self.port.send_data_with_crc16(struct.pack("<h", offset), max_tries=1) log.info("Wind calibration set to %d", offset) def setCalibrationTemp(self, variable, offset): """Set an on-board temperature calibration.""" # Offset is in tenths of degree Fahrenheit. if not -12.8 <= offset <= 12.7: raise weewx.ViolatedPrecondition("Offset %.1f out of range [-12.8, 12.7]." % offset) byte = struct.pack("b", int(round(offset * 10))) variable_dict = { 'outTemp': 0x34 } for i in range(1, 8): variable_dict['extraTemp%d' % i] = 0x34 + i for i in range(1, 5): variable_dict['soilTemp%d' % i] = 0x3B + i for i in range(1, 5): variable_dict['leafTemp%d' % i] = 0x3F + i if variable == "inTemp": # Inside temp is special, needs ones' complement in next byte. complement_byte = struct.pack("B", ~int(round(offset * 10)) & 0xFF) self.port.send_data(b"EEBWR 32 02\n") self.port.send_data_with_crc16(byte + complement_byte, max_tries=1) elif variable in variable_dict: # Other variables are just sent as-is. self.port.send_data(b"EEBWR %X 01\n" % variable_dict[variable]) self.port.send_data_with_crc16(byte, max_tries=1) else: raise weewx.ViolatedPrecondition("Variable name %s not known" % variable) log.info("Temperature calibration %s set to %.1f", variable, offset) def setCalibrationHumid(self, variable, offset): """Set an on-board humidity calibration.""" # Offset is in percentage points. if not -100 <= offset <= 100: raise weewx.ViolatedPrecondition("Offset %d out of range [-100, 100]." % offset) byte = struct.pack("b", offset) variable_dict = { 'inHumid': 0x44, 'outHumid': 0x45 } for i in range(1, 8): variable_dict['extraHumid%d' % i] = 0x45 + i if variable in variable_dict: self.port.send_data(b"EEBWR %X 01\n" % variable_dict[variable]) self.port.send_data_with_crc16(byte, max_tries=1) else: raise weewx.ViolatedPrecondition("Variable name %s not known" % variable) log.info("Humidity calibration %s set to %d", variable, offset) def clearLog(self): """Clear the internal archive memory in the Vantage.""" for unused_count in range(self.max_tries): try: self.port.wakeup_console(max_tries=self.max_tries) self.port.send_data(b"CLRLOG\n") log.info("Archive memory cleared.") return except weewx.WeeWxIOError: # Caught an error. Keey trying... continue log.error("Max retries exceeded while clearing log") raise weewx.RetriesExceeded("While clearing log") def getRX(self): """Returns reception statistics from the console. Returns a tuple with 5 values: (# of packets, # of missed packets, # of resynchronizations, the max # of packets received w/o an error, the # of CRC errors detected.)""" rx_list = self.port.send_command(b'RXCHECK\n') if weewx.debug: assert(len(rx_list) == 1) # The following is a list of the reception statistics, but the elements are byte strings rx_list_str = rx_list[0].split() # Convert to numbers and return as a tuple: rx_list = tuple(int(x) for x in rx_list_str) return rx_list def getBarData(self): """Gets barometer calibration data. Returns as a 9 element list.""" _bardata = self.port.send_command(b"BARDATA\n") _barometer = float(_bardata[0].split()[1])/1000.0 _altitude = float(_bardata[1].split()[1]) _dewpoint = float(_bardata[2].split()[2]) _virt_temp = float(_bardata[3].split()[2]) _c = float(_bardata[4].split()[1])/10.0 _r = float(_bardata[5].split()[1])/1000.0 _barcal = float(_bardata[6].split()[1])/1000.0 _gain = float(_bardata[7].split()[1]) _offset = float(_bardata[8].split()[1]) return (_barometer, _altitude, _dewpoint, _virt_temp, _c, _r, _barcal, _gain, _offset) def getFirmwareDate(self): """Return the firmware date as a string. """ return self.port.send_command(b'VER\n')[0] def getFirmwareVersion(self): """Return the firmware version as a string.""" return self.port.send_command(b'NVER\n')[0] def getStnInfo(self): """Return lat / lon, time zone, etc.""" (stnlat, stnlon) = self._getEEPROM_value(0x0B, "<2h") stnlat /= 10.0 stnlon /= 10.0 man_or_auto = "MANUAL" if self._getEEPROM_value(0x12)[0] else "AUTO" dst = "ON" if self._getEEPROM_value(0x13)[0] else "OFF" gmt_or_zone = "GMT_OFFSET" if self._getEEPROM_value(0x16)[0] else "ZONE_CODE" zone_code = self._getEEPROM_value(0x11)[0] gmt_offset = self._getEEPROM_value(0x14, "<h")[0] / 100.0 tempLogging = "LAST" if self._getEEPROM_value(0xffc)[0] else "AVERAGE" retransmit_channel = self._getEEPROM_value(0x18)[0] return (stnlat, stnlon, man_or_auto, dst, gmt_or_zone, zone_code, gmt_offset, tempLogging, retransmit_channel) def getStnTransmitters(self): """ Get the types of transmitters on the eight channels.""" transmitters = [ ] use_tx = self._getEEPROM_value(0x17)[0] transmitter_data = self._getEEPROM_value(0x19, "16B") for transmitter_id in range(8): transmitter_type = self.transmitter_type_dict[transmitter_data[transmitter_id * 2] & 0x0F] repeater = 0 repeater = transmitter_data[transmitter_id * 2] & 0xF0 repeater = (repeater >> 4) - 7 if repeater > 127 else 0 transmitter = {"transmitter_type": transmitter_type, "repeater": self.repeater_dict[repeater], "listen": self.listen_dict[(use_tx >> transmitter_id) & 1] } if transmitter_type in ['temp', 'temp_hum']: # Extra temperature is origin 0. transmitter['temp'] = (transmitter_data[transmitter_id * 2 + 1] & 0xF) + 1 if transmitter_type in ['hum', 'temp_hum']: # Extra humidity is origin 1. transmitter['hum'] = transmitter_data[transmitter_id * 2 + 1] >> 4 transmitters.append(transmitter) return transmitters def getStnCalibration(self): """ Get the temperature/humidity/wind calibrations built into the console. """ (inTemp, inTempComp, outTemp, extraTemp1, extraTemp2, extraTemp3, extraTemp4, extraTemp5, extraTemp6, extraTemp7, soilTemp1, soilTemp2, soilTemp3, soilTemp4, leafTemp1, leafTemp2, leafTemp3, leafTemp4, inHumid, outHumid, extraHumid1, extraHumid2, extraHumid3, extraHumid4, extraHumid5, extraHumid6, extraHumid7, wind) = self._getEEPROM_value(0x32, "<27bh") # inTempComp is 1's complement of inTemp. if inTemp + inTempComp != -1: log.error("Inconsistent EEPROM calibration values") return None # Temperatures are in tenths of a degree F; Humidity in 1 percent. return { "inTemp": inTemp / 10.0, "outTemp": outTemp / 10.0, "extraTemp1": extraTemp1 / 10.0, "extraTemp2": extraTemp2 / 10.0, "extraTemp3": extraTemp3 / 10.0, "extraTemp4": extraTemp4 / 10.0, "extraTemp5": extraTemp5 / 10.0, "extraTemp6": extraTemp6 / 10.0, "extraTemp7": extraTemp7 / 10.0, "soilTemp1": soilTemp1 / 10.0, "soilTemp2": soilTemp2 / 10.0, "soilTemp3": soilTemp3 / 10.0, "soilTemp4": soilTemp4 / 10.0, "leafTemp1": leafTemp1 / 10.0, "leafTemp2": leafTemp2 / 10.0, "leafTemp3": leafTemp3 / 10.0, "leafTemp4": leafTemp4 / 10.0, "inHumid": inHumid, "outHumid": outHumid, "extraHumid1": extraHumid1, "extraHumid2": extraHumid2, "extraHumid3": extraHumid3, "extraHumid4": extraHumid4, "extraHumid5": extraHumid5, "extraHumid6": extraHumid6, "extraHumid7": extraHumid7, "wind": wind } def startLogger(self): self.port.send_command(b"START\n") def stopLogger(self): self.port.send_command(b'STOP\n') #=========================================================================== # Davis Vantage utility functions #=========================================================================== @property def hardware_name(self): if self.hardware_type == 16: if self.model_type == 1: return "Vantage Pro" else: return "Vantage Pro2" elif self.hardware_type == 17: return "Vantage Vue" else: raise weewx.UnsupportedFeature("Unknown hardware type %d" % self.hardware_type) @property def archive_interval(self): return self.archive_interval_ def _determine_hardware(self): # Determine the type of hardware: for count in range(self.max_tries): try: self.port.send_data(b"WRD\x12\x4d\n") self.hardware_type = byte2int(self.port.read()) log.debug("Hardware type is %d", self.hardware_type) # 16 = Pro, Pro2, 17 = Vue return self.hardware_type except weewx.WeeWxIOError as e: log.error("_determine_hardware; retry #%d: '%s'", count, e) log.error("Unable to read hardware type; raise WeeWxIOError") raise weewx.WeeWxIOError("Unable to read hardware type") def _setup(self): """Retrieve the EEPROM data block from a VP2 and use it to set various properties""" self.port.wakeup_console(max_tries=self.max_tries) # Get hardware type, if not done yet. if self.hardware_type is None: self.hardware_type = self._determine_hardware() # Overwrite model_type if we have Vantage Vue. if self.hardware_type == 17: self.model_type = 2 unit_bits = self._getEEPROM_value(0x29)[0] setup_bits = self._getEEPROM_value(0x2B)[0] self.rain_year_start = self._getEEPROM_value(0x2C)[0] self.archive_interval_ = self._getEEPROM_value(0x2D)[0] * 60 self.altitude = self._getEEPROM_value(0x0F, "<h")[0] self.altitude_vt = weewx.units.ValueTuple(self.altitude, "foot", "group_altitude") barometer_unit_code = unit_bits & 0x03 temperature_unit_code = (unit_bits & 0x0C) >> 2 altitude_unit_code = (unit_bits & 0x10) >> 4 rain_unit_code = (unit_bits & 0x20) >> 5 wind_unit_code = (unit_bits & 0xC0) >> 6 self.wind_cup_type = (setup_bits & 0x08) >> 3 self.rain_bucket_type = (setup_bits & 0x30) >> 4 self.barometer_unit = Vantage.barometer_unit_dict[barometer_unit_code] self.temperature_unit = Vantage.temperature_unit_dict[temperature_unit_code] self.altitude_unit = Vantage.altitude_unit_dict[altitude_unit_code] self.rain_unit = Vantage.rain_unit_dict[rain_unit_code] self.wind_unit = Vantage.wind_unit_dict[wind_unit_code] self.wind_cup_size = Vantage.wind_cup_dict[self.wind_cup_type] self.rain_bucket_size = Vantage.rain_bucket_dict[self.rain_bucket_type] # Try to guess the ISS ID for gauging reception strength. if self.iss_id is None: stations = self.getStnTransmitters() # Wind retransmitter is best candidate. for station_id in range(0, 8): if stations[station_id]['transmitter_type'] == 'wind': self.iss_id = station_id + 1 # Origin 1. break else: # ISS is next best candidate. for station_id in range(0, 8): if stations[station_id]['transmitter_type'] == 'iss': self.iss_id = station_id + 1 # Origin 1. break else: # On Vue, can use VP2 ISS, which reports as "rain" for station_id in range(0, 8): if stations[station_id]['transmitter_type'] == 'rain': self.iss_id = station_id + 1 # Origin 1. break else: self.iss_id = 1 # Pick a reasonable default. log.debug("ISS ID is %s", self.iss_id) def _getEEPROM_value(self, offset, v_format="B"): """Return a list of values from the EEPROM starting at a specified offset, using a specified format""" nbytes = struct.calcsize(v_format) # Don't bother waking up the console for the first try. It's probably # already awake from opening the port. However, if we fail, then do a # wakeup. firsttime = True command = b"EEBRD %X %X\n" % (offset, nbytes) for unused_count in range(self.max_tries): try: if not firsttime: self.port.wakeup_console(max_tries=self.max_tries) firsttime = False self.port.send_data(command) _buffer = self.port.get_data_with_crc16(nbytes + 2, max_tries=1) _value = struct.unpack(v_format, _buffer[:-2]) return _value except weewx.WeeWxIOError: continue log.error("Max retries exceeded while getting EEPROM data at address 0x%X", offset) raise weewx.RetriesExceeded("While getting EEPROM data value at address 0x%X" % offset) @staticmethod def _port_factory(vp_dict): """Produce a serial or ethernet port object""" timeout = float(vp_dict.get('timeout', 4.0)) wait_before_retry = float(vp_dict.get('wait_before_retry', 1.2)) command_delay = float(vp_dict.get('command_delay', 0.5)) # Get the connection type. If it is not specified, assume 'serial': connection_type = vp_dict.get('type', 'serial').lower() if connection_type == "serial": port = vp_dict['port'] baudrate = int(vp_dict.get('baudrate', 19200)) return SerialWrapper(port, baudrate, timeout, wait_before_retry, command_delay) elif connection_type == "ethernet": hostname = vp_dict['host'] tcp_port = int(vp_dict.get('tcp_port', 22222)) tcp_send_delay = float(vp_dict.get('tcp_send_delay', 0.5)) return EthernetWrapper(hostname, tcp_port, timeout, tcp_send_delay, wait_before_retry, command_delay) raise weewx.UnsupportedFeature(vp_dict['type']) def _unpackLoopPacket(self, raw_loop_buffer): """Decode a raw Davis LOOP packet, returning the results as a dictionary in physical units. raw_loop_buffer: The loop packet data buffer, passed in as a string (Python 2), or a byte array (Python 3). returns: A dictionary. The key will be an observation type, the value will be the observation in physical units.""" # Get the packet type. It's in byte 4. packet_type = indexbytes(raw_loop_buffer, 4) if packet_type == 0: loop_struct = loop1_struct loop_types = loop1_types elif packet_type == 1: loop_struct = loop2_struct loop_types = loop2_types else: raise weewx.WeeWxIOError("Unknown LOOP packet type %s" % packet_type) # Unpack the data, using the appropriate compiled stuct.Struct buffer. # The result will be a long tuple with just the raw values from the console. data_tuple = loop_struct.unpack(raw_loop_buffer) # Combine it with the data types. The result will be a long iterable of 2-way # tuples: (type, raw-value) raw_loop_tuples = zip(loop_types, data_tuple) # Convert to a dictionary: raw_loop_packet = dict(raw_loop_tuples) # Add the bucket type. It's needed to decode rain bucket tips. raw_loop_packet['bucket_type'] = self.rain_bucket_type loop_packet = { 'dateTime': int(time.time() + 0.5), 'usUnits' : weewx.US } # Now we need to map the raw values to physical units. for _type in raw_loop_packet: # Get the mapping function for this type. If there is # no such function, supply a lambda function that returns None func = _loop_map.get(_type, lambda p, k: None) # Apply the function val = func(raw_loop_packet, _type) # Ignore None values: if val is not None: loop_packet[_type] = val # Adjust sunrise and sunset: start_of_day = weeutil.weeutil.startOfDay(loop_packet['dateTime']) if 'sunrise' in loop_packet: loop_packet['sunrise'] += start_of_day if 'sunset' in loop_packet: loop_packet['sunset'] += start_of_day # Because the Davis stations do not offer bucket tips in LOOP data, we # must calculate it by looking for changes in rain totals. This won't # work for the very first rain packet. if self.save_day_rain is None: delta = None else: delta = loop_packet['dayRain'] - self.save_day_rain # If the difference is negative, we're at the beginning of a month. if delta < 0: delta = None loop_packet['rain'] = delta self.save_day_rain = loop_packet['dayRain'] return loop_packet def _unpackArchivePacket(self, raw_archive_buffer): """Decode a Davis archive packet, returning the results as a dictionary. raw_archive_buffer: The archive record data buffer, passed in as a string (Python 2), or a byte array (Python 3). returns: A dictionary. The key will be an observation type, the value will be the observation in physical units.""" # Get the record type. It's in byte 42. record_type = indexbytes(raw_archive_buffer, 42) if record_type == 0xff: # Rev A packet type: rec_struct = rec_A_struct rec_types = rec_types_A elif record_type == 0x00: # Rev B packet type: rec_struct = rec_B_struct rec_types = rec_types_B else: raise weewx.UnknownArchiveType("Unknown archive type = 0x%x" % (record_type,)) data_tuple = rec_struct.unpack(raw_archive_buffer) raw_archive_record = dict(zip(rec_types, data_tuple)) raw_archive_record['bucket_type'] = self.rain_bucket_type archive_record = { 'dateTime': _archive_datetime(raw_archive_record['date_stamp'], raw_archive_record['time_stamp']), 'usUnits': weewx.US, # Divide archive interval by 60 to keep consistent with wview 'interval': int(self.archive_interval // 60), } archive_record['rxCheckPercent'] = _rxcheck(self.model_type, archive_record['interval'], self.iss_id, raw_archive_record['wind_samples']) for _type in raw_archive_record: # Get the mapping function for this type. If there is no such # function, supply a lambda function that will just return None func = _archive_map.get(_type, lambda p, k: None) # Call the function: val = func(raw_archive_record, _type) # Skip all null values if val is not None: archive_record[_type] = val return archive_record #=============================================================================== # LOOP packet #=============================================================================== # A list of all the types held in a Vantage LOOP packet in their native order. loop1_schema = [ ('loop', '3s'), ('rev_type', 'b'), ('packet_type', 'B'), ('next_record', 'H'), ('barometer', 'H'), ('inTemp', 'h'), ('inHumidity', 'B'), ('outTemp', 'h'), ('windSpeed', 'B'), ('windSpeed10', 'B'), ('windDir', 'H'), ('extraTemp1', 'B'), ('extraTemp2', 'B'), ('extraTemp3', 'B'), ('extraTemp4', 'B'), ('extraTemp5', 'B'), ('extraTemp6', 'B'), ('extraTemp7', 'B'), ('soilTemp1', 'B'), ('soilTemp2', 'B'), ('soilTemp3', 'B'), ('soilTemp4', 'B'), ('leafTemp1', 'B'), ('leafTemp2', 'B'), ('leafTemp3', 'B'), ('leafTemp4', 'B'), ('outHumidity', 'B'), ('extraHumid1', 'B'), ('extraHumid2', 'B'), ('extraHumid3', 'B'), ('extraHumid4', 'B'), ('extraHumid5', 'B'), ('extraHumid6', 'B'), ('extraHumid7', 'B'), ('rainRate', 'H'), ('UV', 'B'), ('radiation', 'H'), ('stormRain', 'H'), ('stormStart', 'H'), ('dayRain', 'H'), ('monthRain', 'H'), ('yearRain', 'H'), ('dayET', 'H'), ('monthET', 'H'), ('yearET', 'H'), ('soilMoist1', 'B'), ('soilMoist2', 'B'), ('soilMoist3', 'B'), ('soilMoist4', 'B'), ('leafWet1', 'B'), ('leafWet2', 'B'), ('leafWet3', 'B'), ('leafWet4', 'B'), ('insideAlarm', 'B'), ('rainAlarm', 'B'), ('outsideAlarm1', 'B'), ('outsideAlarm2', 'B'), ('extraAlarm1', 'B'), ('extraAlarm2', 'B'), ('extraAlarm3', 'B'), ('extraAlarm4', 'B'), ('extraAlarm5', 'B'), ('extraAlarm6', 'B'), ('extraAlarm7', 'B'), ('extraAlarm8', 'B'), ('soilLeafAlarm1', 'B'), ('soilLeafAlarm2', 'B'), ('soilLeafAlarm3', 'B'), ('soilLeafAlarm4', 'B'), ('txBatteryStatus', 'B'), ('consBatteryVoltage', 'H'), ('forecastIcon', 'B'), ('forecastRule', 'B'), ('sunrise', 'H'), ('sunset', 'H') ] loop2_schema = [ ('loop', '3s'), ('trendIcon', 'b'), ('packet_type', 'B'), ('_unused', 'H'), ('barometer', 'H'), ('inTemp', 'h'), ('inHumidity', 'B'), ('outTemp', 'h'), ('windSpeed', 'B'), ('_unused', 'B'), ('windDir', 'H'), ('windSpeed10', 'H'), ('windSpeed2', 'H'), ('windGust10', 'H'), ('windGustDir10', 'H'), ('_unused', 'H'), ('_unused', 'H'), ('dewpoint', 'h'), ('_unused', 'B'), ('outHumidity', 'B'), ('_unused', 'B'), ('heatindex', 'h'), ('windchill', 'h'), ('THSW', 'h'), ('rainRate', 'H'), ('UV', 'B'), ('radiation', 'H'), ('stormRain', 'H'), ('stormStart', 'H'), ('dayRain', 'H'), ('rain15', 'H'), ('hourRain', 'H'), ('dayET', 'H'), ('rain24', 'H'), ('bar_reduction', 'B'), ('bar_offset', 'h'), ('bar_calibration', 'h'), ('pressure_raw', 'H'), ('pressure', 'H'), ('altimeter', 'H'), ('_unused', 'B'), ('_unused', 'B'), ('_unused_graph', 'B'), ('_unused_graph', 'B'), ('_unused_graph', 'B'), ('_unused_graph', 'B'), ('_unused_graph', 'B'), ('_unused_graph', 'B'), ('_unused_graph', 'B'), ('_unused_graph', 'B'), ('_unused_graph', 'B'), ('_unused_graph', 'B'), ('_unused', 'H'), ('_unused', 'H'), ('_unused', 'H'), ('_unused', 'H'), ('_unused', 'H'), ('_unused', 'H') ] # Extract the types and struct.Struct formats for the two types of LOOP packets loop1_types, loop1_code = list(zip(*loop1_schema)) loop1_struct = struct.Struct('<' + ''.join(loop1_code)) loop2_types, loop2_code = list(zip(*loop2_schema)) loop2_struct = struct.Struct('<' + ''.join(loop2_code)) #=============================================================================== # archive packet #=============================================================================== rec_A_schema =[ ('date_stamp', 'H'), ('time_stamp', 'H'), ('outTemp', 'h'), ('highOutTemp', 'h'), ('lowOutTemp', 'h'), ('rain', 'H'), ('rainRate', 'H'), ('barometer', 'H'), ('radiation', 'H'), ('wind_samples', 'H'), ('inTemp', 'h'), ('inHumidity', 'B'), ('outHumidity', 'B'), ('windSpeed', 'B'), ('windGust', 'B'), ('windGustDir', 'B'), ('windDir', 'B'), ('UV', 'B'), ('ET', 'B'), ('invalid_data', 'B'), ('soilMoist1', 'B'), ('soilMoist2', 'B'), ('soilMoist3', 'B'), ('soilMoist4', 'B'), ('soilTemp1', 'B'), ('soilTemp2', 'B'), ('soilTemp3', 'B'), ('soilTemp4', 'B'), ('leafWet1', 'B'), ('leafWet2', 'B'), ('leafWet3', 'B'), ('leafWet4', 'B'), ('extraTemp1', 'B'), ('extraTemp2', 'B'), ('extraHumid1', 'B'), ('extraHumid2','B'), ('readClosed', 'H'), ('readOpened', 'H'), ('unused', 'B') ] rec_B_schema = [ ('date_stamp', 'H'), ('time_stamp', 'H'), ('outTemp', 'h'), ('highOutTemp', 'h'), ('lowOutTemp', 'h'), ('rain', 'H'), ('rainRate', 'H'), ('barometer', 'H'), ('radiation', 'H'), ('wind_samples', 'H'), ('inTemp', 'h'), ('inHumidity', 'B'), ('outHumidity', 'B'), ('windSpeed', 'B'), ('windGust', 'B'), ('windGustDir', 'B'), ('windDir', 'B'), ('UV', 'B'), ('ET', 'B'), ('highRadiation', 'H'), ('highUV', 'B'), ('forecastRule', 'B'), ('leafTemp1', 'B'), ('leafTemp2', 'B'), ('leafWet1', 'B'), ('leafWet2', 'B'), ('soilTemp1', 'B'), ('soilTemp2', 'B'), ('soilTemp3', 'B'), ('soilTemp4', 'B'), ('download_record_type', 'B'), ('extraHumid1', 'B'), ('extraHumid2','B'), ('extraTemp1', 'B'), ('extraTemp2', 'B'), ('extraTemp3', 'B'), ('soilMoist1', 'B'), ('soilMoist2', 'B'), ('soilMoist3', 'B'), ('soilMoist4', 'B') ] # Extract the types and struct.Struct formats for the two types of archive packets: rec_types_A, fmt_A = list(zip(*rec_A_schema)) rec_types_B, fmt_B = list(zip(*rec_B_schema)) rec_A_struct = struct.Struct('<' + ''.join(fmt_A)) rec_B_struct = struct.Struct('<' + ''.join(fmt_B)) def _rxcheck(model_type, interval, iss_id, number_of_wind_samples): """Gives an estimate of the fraction of packets received. Ref: Vantage Serial Protocol doc, V2.1.0, released 25-Jan-05; p42""" # The formula for the expected # of packets varies with model number. if model_type == 1: _expected_packets = float(interval * 60) / ( 2.5 + (iss_id-1) / 16.0) -\ float(interval * 60) / (50.0 + (iss_id-1) * 1.25) elif model_type == 2: _expected_packets = 960.0 * interval / float(41 + iss_id - 1) else: return None _frac = number_of_wind_samples * 100.0 / _expected_packets if _frac > 100.0: _frac = 100.0 return _frac #=============================================================================== # Decoding routines #=============================================================================== def _archive_datetime(datestamp, timestamp): """Returns the epoch time of the archive packet.""" try: # Construct a time tuple from Davis time. Unfortunately, as timestamps come # off the Vantage logger, there is no way of telling whether or not DST is # in effect. So, have the operating system guess by using a '-1' in the last # position of the time tuple. It's the best we can do... time_tuple = (((0xfe00 & datestamp) >> 9) + 2000, # year (0x01e0 & datestamp) >> 5, # month (0x001f & datestamp), # day timestamp // 100, # hour timestamp % 100, # minute 0, # second 0, 0, -1) # have OS guess DST # Convert to epoch time: ts = int(time.mktime(time_tuple)) except (OverflowError, ValueError, TypeError): ts = None return ts def _loop_date(p, k): """Returns the epoch time stamp of a time encoded in the LOOP packet, which, for some reason, uses a different encoding scheme than the archive packet. Also, the Davis documentation isn't clear whether "bit 0" refers to the least-significant bit, or the most-significant bit. I'm assuming the former, which is the usual in little-endian machines.""" v = p[k] if v == 0xffff: return None time_tuple = ((0x007f & v) + 2000, # year (0xf000 & v) >> 12, # month (0x0f80 & v) >> 7, # day 0, 0, 0, # h, m, s 0, 0, -1) # Convert to epoch time: try: ts = int(time.mktime(time_tuple)) except (OverflowError, ValueError): ts = None return ts def _decode_rain(p, k): if p['bucket_type'] == 0: # 0.01 inch bucket return p[k] / 100.0 elif p['bucket_type'] == 1: # 0.2 mm bucket return p[k] * 0.0078740157 elif p['bucket_type'] == 2: # 0.1 mm bucket return p[k] * 0.00393700787 else: log.warning("Unknown bucket type $s" % p['bucket_type']) def _decode_windSpeed_H(p, k): """Decode 10-min average wind speed. It is encoded slightly differently between type 0 and type 1 LOOP packets.""" if p['packet_type'] == 0: return float(p[k]) if p[k] != 0xff else None elif p['packet_type'] == 1: return float(p[k]) / 10.0 if p[k] != 0xffff else None else: log.warning("Unknown LOOP packet type %s" % p['packet_type']) # This dictionary maps a type key to a function. The function should be able to # decode a sensor value held in the LOOP packet in the internal, Davis form into US # units and return it. _loop_map = { 'altimeter' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None, 'bar_calibration' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None, 'bar_offset' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None, 'bar_reduction' : lambda p, k: p[k], 'barometer' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None, 'consBatteryVoltage': lambda p, k: float((p[k] * 300) >> 9) / 100.0, 'dayET' : lambda p, k: float(p[k]) / 1000.0, 'dayRain' : _decode_rain, 'dewpoint' : lambda p, k: float(p[k]) if p[k] & 0xff != 0xff else None, 'extraAlarm1' : lambda p, k: p[k], 'extraAlarm2' : lambda p, k: p[k], 'extraAlarm3' : lambda p, k: p[k], 'extraAlarm4' : lambda p, k: p[k], 'extraAlarm5' : lambda p, k: p[k], 'extraAlarm6' : lambda p, k: p[k], 'extraAlarm7' : lambda p, k: p[k], 'extraAlarm8' : lambda p, k: p[k], 'extraHumid1' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'extraHumid2' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'extraHumid3' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'extraHumid4' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'extraHumid5' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'extraHumid6' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'extraHumid7' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'extraTemp1' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'extraTemp2' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'extraTemp3' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'extraTemp4' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'extraTemp5' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'extraTemp6' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'extraTemp7' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'forecastIcon' : lambda p, k: p[k], 'forecastRule' : lambda p, k: p[k], 'heatindex' : lambda p, k: float(p[k]) if p[k] & 0xff != 0xff else None, 'hourRain' : _decode_rain, 'inHumidity' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'insideAlarm' : lambda p, k: p[k], 'inTemp' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0x7fff else None, 'leafTemp1' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'leafTemp2' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'leafTemp3' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'leafTemp4' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'leafWet1' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'leafWet2' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'leafWet3' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'leafWet4' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'monthET' : lambda p, k: float(p[k]) / 100.0, 'monthRain' : _decode_rain, 'outHumidity' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'outsideAlarm1' : lambda p, k: p[k], 'outsideAlarm2' : lambda p, k: p[k], 'outTemp' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0x7fff else None, 'pressure' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None, 'pressure_raw' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None, 'radiation' : lambda p, k: float(p[k]) if p[k] != 0x7fff else None, 'rain15' : _decode_rain, 'rain24' : _decode_rain, 'rainAlarm' : lambda p, k: p[k], 'rainRate' : _decode_rain, 'soilLeafAlarm1' : lambda p, k: p[k], 'soilLeafAlarm2' : lambda p, k: p[k], 'soilLeafAlarm3' : lambda p, k: p[k], 'soilLeafAlarm4' : lambda p, k: p[k], 'soilMoist1' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'soilMoist2' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'soilMoist3' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'soilMoist4' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'soilTemp1' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'soilTemp2' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'soilTemp3' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'soilTemp4' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'stormRain' : _decode_rain, 'stormStart' : _loop_date, 'sunrise' : lambda p, k: 3600 * (p[k] // 100) + 60 * (p[k] % 100), 'sunset' : lambda p, k: 3600 * (p[k] // 100) + 60 * (p[k] % 100), 'THSW' : lambda p, k: float(p[k]) if p[k] & 0xff != 0xff else None, 'trendIcon' : lambda p, k: p[k], 'txBatteryStatus' : lambda p, k: int(p[k]), 'UV' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0xff else None, 'windchill' : lambda p, k: float(p[k]) if p[k] & 0xff != 0xff else None, 'windDir' : lambda p, k: (float(p[k]) if p[k] != 360 else 0) if p[k] and p[k] != 0x7fff else None, 'windGust10' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'windGustDir10' : lambda p, k: (float(p[k]) if p[k] != 360 else 0) if p[k] and p[k] != 0x7fff else None, 'windSpeed' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'windSpeed10' : _decode_windSpeed_H, 'windSpeed2' : _decode_windSpeed_H, 'yearET' : lambda p, k: float(p[k]) / 100.0, 'yearRain' : _decode_rain, } # This dictionary maps a type key to a function. The function should be able to # decode a sensor value held in the archive packet in the internal, Davis form into US # units and return it. _archive_map = { 'barometer' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None, 'ET' : lambda p, k: float(p[k]) / 1000.0, 'extraHumid1' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'extraHumid2' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'extraTemp1' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'extraTemp2' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'extraTemp3' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'forecastRule' : lambda p, k: p[k] if p[k] != 193 else None, 'highOutTemp' : lambda p, k: float(p[k] / 10.0) if p[k] != -32768 else None, 'highRadiation' : lambda p, k: float(p[k]) if p[k] != 0x7fff else None, 'highUV' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0xff else None, 'inHumidity' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'inTemp' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0x7fff else None, 'leafTemp1' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'leafTemp2' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'leafWet1' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'leafWet2' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'leafWet3' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'leafWet4' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'lowOutTemp' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0x7fff else None, 'outHumidity' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'outTemp' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0x7fff else None, 'radiation' : lambda p, k: float(p[k]) if p[k] != 0x7fff else None, 'rain' : _decode_rain, 'rainRate' : _decode_rain, 'readClosed' : lambda p, k: p[k], 'readOpened' : lambda p, k: p[k], 'soilMoist1' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'soilMoist2' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'soilMoist3' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'soilMoist4' : lambda p, k: float(p[k]) if p[k] != 0xff else None, 'soilTemp1' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'soilTemp2' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'soilTemp3' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'soilTemp4' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None, 'UV' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0xff else None, 'wind_samples' : lambda p, k: float(p[k]) if p[k] else None, 'windDir' : lambda p, k: float(p[k]) * 22.5 if p[k] != 0xff else None, 'windGust' : lambda p, k: float(p[k]), 'windGustDir' : lambda p, k: float(p[k]) * 22.5 if p[k] != 0xff else None, 'windSpeed' : lambda p, k: float(p[k]) if p[k] != 0xff else None, } #=============================================================================== # class VantageService #=============================================================================== # This class uses multiple inheritance: class VantageService(Vantage, weewx.engine.StdService): """Weewx service for the Vantage weather stations.""" def __init__(self, engine, config_dict): Vantage.__init__(self, **config_dict[DRIVER_NAME]) weewx.engine.StdService.__init__(self, engine, config_dict) self.max_loop_gust = 0.0 self.max_loop_gustdir = None self.bind(weewx.STARTUP, self.startup) self.bind(weewx.NEW_LOOP_PACKET, self.new_loop_packet) self.bind(weewx.END_ARCHIVE_PERIOD, self.end_archive_period) self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record) def startup(self, event): # @UnusedVariable self.max_loop_gust = 0.0 self.max_loop_gustdir = None self.loop_data = {'txBatteryStatus': None, 'consBatteryVoltage': None} def closePort(self): # Now close my superclass's port: Vantage.closePort(self) def new_loop_packet(self, event): """Calculate the max gust seen since the last archive record.""" # Calculate the max gust seen since the start of this archive record # and put it in the packet. windSpeed = event.packet.get('windSpeed') windDir = event.packet.get('windDir') if windSpeed is not None and windSpeed > self.max_loop_gust: self.max_loop_gust = windSpeed self.max_loop_gustdir = windDir event.packet['windGust'] = self.max_loop_gust event.packet['windGustDir'] = self.max_loop_gustdir # Save the battery statuses: for k in self.loop_data: self.loop_data[k] = event.packet.get(k) def end_archive_period(self, event): # @UnusedVariable """Zero out the max gust seen since the start of the record""" self.max_loop_gust = 0.0 self.max_loop_gustdir = None def new_archive_record(self, event): """Add the battery status to the archive record.""" # Add the last battery status: event.record.update(self.loop_data) #=============================================================================== # Class VantageConfigurator #=============================================================================== class VantageConfigurator(weewx.drivers.AbstractConfigurator): @property def description(self): return "Configures the Davis Vantage weather station." @property def usage(self): return """%prog --help %prog --info [config_file] %prog --current [config_file] %prog --clear-memory [config_file] [-y] %prog --set-interval=MINUTES [config_file] [-y] %prog --set-latitude=DEGREE [config_file] [-y] %prog --set-longitude=DEGREE [config_file] [-y] %prog --set-altitude=FEET [config_file] [-y] %prog --set-barometer=inHg [config_file] [-y] %prog --set-wind-cup=CODE [config_file] [-y] %prog --set-bucket=CODE [config_file] [-y] %prog --set-rain-year-start=MM [config_file] [-y] %prog --set-offset=VARIABLE,OFFSET [config_file] [-y] %prog --set-transmitter-type=CHANNEL,TYPE,TEMP,HUM,REPEATER_ID [config_file] [-y] %prog --set-retransmit=[OFF|ON|ON,CHANNEL] [config_file] [-y] %prog --set-temperature-logging=[LAST|AVERAGE] [config_file] [-y] %prog --set-time [config_file] [-y] %prog --set-dst=[AUTO|ON|OFF] [config_file] [-y] %prog --set-tz-code=TZCODE [config_file] [-y] %prog --set-tz-offset=HHMM [config_file] [-y] %prog --set-lamp=[ON|OFF] [config_file] %prog --dump [--batch-size=BATCH_SIZE] [config_file] [-y] %prog --logger-summary=FILE [config_file] [-y] %prog [--start | --stop] [config_file]""" def add_options(self, parser): super(VantageConfigurator, self).add_options(parser) parser.add_option("--info", action="store_true", dest="info", help="To print configuration, reception, and barometer " "calibration information about your weather station.") parser.add_option("--current", action="store_true", help="To print current LOOP information.") parser.add_option("--clear-memory", action="store_true", dest="clear_memory", help="To clear the memory of your weather station.") parser.add_option("--set-interval", type=int, dest="set_interval", metavar="MINUTES", help="Sets the archive interval to the specified number of minutes. " "Valid values are 1, 5, 10, 15, 30, 60, or 120.") parser.add_option("--set-latitude", type=float, dest="set_latitude", metavar="DEGREE", help="Sets the latitude of the station to the specified number of tenth degree.") parser.add_option("--set-longitude", type=float, dest="set_longitude", metavar="DEGREE", help="Sets the longitude of the station to the specified number of tenth degree.") parser.add_option("--set-altitude", type=float, dest="set_altitude", metavar="FEET", help="Sets the altitude of the station to the specified number of feet.") parser.add_option("--set-barometer", type=float, dest="set_barometer", metavar="inHg", help="Sets the barometer reading of the station to a known correct " "value in inches of mercury. Specify 0 (zero) to have the console " "pick a sensible value.") parser.add_option("--set-wind-cup", type=int, dest="set_wind_cup", metavar="CODE", help="Set the type of wind cup. Specify '0' for small size; '1' for large size") parser.add_option("--set-bucket", type=int, dest="set_bucket", metavar="CODE", help="Set the type of rain bucket. Specify '0' for 0.01 inches; " "'1' for 0.2 mm; '2' for 0.1 mm") parser.add_option("--set-rain-year-start", type=int, dest="set_rain_year_start", metavar="MM", help="Set the rain year start (1=Jan, 2=Feb, etc.).") parser.add_option("--set-offset", type=str, dest="set_offset", metavar="VARIABLE,OFFSET", help="Set the onboard offset for VARIABLE inTemp, outTemp, extraTemp[1-7], " "inHumid, outHumid, extraHumid[1-7], soilTemp[1-4], leafTemp[1-4], windDir) " "to OFFSET (Fahrenheit, %, degrees)") parser.add_option("--set-transmitter-type", type=str, dest="set_transmitter_type", metavar="CHANNEL,TYPE,TEMP,HUM,REPEATER_ID", help="Set the transmitter type for CHANNEL (1-8), TYPE (0=iss, 1=temp, 2=hum, " "3=temp_hum, 4=wind, 5=rain, 6=leaf, 7=soil, 8=leaf_soil, 9=sensorlink, 10=none), " "as extra TEMP station and extra HUM station (both 1-7, if applicable), " "REPEATER_ID ('A'-'H', if used)") parser.add_option("--set-retransmit", type=str, dest="set_retransmit", metavar="OFF|ON|ON,CHANNEL", help="Turn console retransmit function 'ON' or 'OFF'.") parser.add_option("--set-temperature-logging", dest="set_temp_logging", metavar="LAST|AVERAGE", help="Set console temperature logging to either 'LAST' or 'AVERAGE'.") parser.add_option("--set-time", action="store_true", dest="set_time", help="Set the onboard clock to the current time.") parser.add_option("--set-dst", dest="set_dst", metavar="AUTO|ON|OFF", help="Set DST to 'ON', 'OFF', or 'AUTO'") parser.add_option("--set-tz-code", type=int, dest="set_tz_code", metavar="TZCODE", help="Set timezone code to TZCODE. See your Vantage manual for " "valid codes.") parser.add_option("--set-tz-offset", dest="set_tz_offset", help="Set timezone offset to HHMM. E.g. '-0800' for U.S. Pacific Time.", metavar="HHMM") parser.add_option("--set-lamp", dest="set_lamp", metavar="ON|OFF", help="Turn the console lamp 'ON' or 'OFF'.") parser.add_option("--dump", action="store_true", help="Dump all data to the archive. " "NB: This may result in many duplicate primary key errors.") parser.add_option("--batch-size", type=int, default=1, metavar="BATCH_SIZE", help="Use with option --dump. Pages are read off the console in batches " "of BATCH_SIZE. A BATCH_SIZE of zero means dump all data first, " "then put it in the database. This can improve performance in " "high-latency environments, but requires sufficient memory to " "hold all station data. Default is 1 (one).") parser.add_option("--logger-summary", type="string", dest="logger_summary", metavar="FILE", help="Save diagnostic summary to FILE (for debugging the logger).") parser.add_option("--start", action="store_true", help="Start the logger.") parser.add_option("--stop", action="store_true", help="Stop the logger.") def do_options(self, options, parser, config_dict, prompt): if options.start and options.stop: parser.error("Cannot specify both --start and --stop") if options.set_tz_code and options.set_tz_offset: parser.error("Cannot specify both --set-tz-code and --set-tz-offset") station = Vantage(**config_dict[DRIVER_NAME]) if options.info: self.show_info(station) if options.current: self.current(station) if options.set_interval is not None: self.set_interval(station, options.set_interval, options.noprompt) if options.set_latitude is not None: self.set_latitude(station, options.set_latitude, options.noprompt) if options.set_longitude is not None: self.set_longitude(station, options.set_longitude, options.noprompt) if options.set_altitude is not None: self.set_altitude(station, options.set_altitude, options.noprompt) if options.set_barometer is not None: self.set_barometer(station, options.set_barometer, options.noprompt) if options.clear_memory: self.clear_memory(station, options.noprompt) if options.set_wind_cup is not None: self.set_wind_cup(station, options.set_wind_cup, options.noprompt) if options.set_bucket is not None: self.set_bucket(station, options.set_bucket, options.noprompt) if options.set_rain_year_start is not None: self.set_rain_year_start(station, options.set_rain_year_start, options.noprompt) if options.set_offset is not None: self.set_offset(station, options.set_offset, options.noprompt) if options.set_transmitter_type is not None: self.set_transmitter_type(station, options.set_transmitter_type, options.noprompt) if options.set_retransmit is not None: self.set_retransmit(station, options.set_retransmit, options.noprompt) if options.set_temp_logging is not None: self.set_temp_logging(station, options.set_temp_logging, options.noprompt) if options.set_time: self.set_time(station) if options.set_dst: self.set_dst(station, options.set_dst) if options.set_tz_code: self.set_tz_code(station, options.set_tz_code) if options.set_tz_offset: self.set_tz_offset(station, options.set_tz_offset) if options.set_lamp: self.set_lamp(station, options.set_lamp) if options.dump: self.dump_logger(station, config_dict, options.noprompt, options.batch_size) if options.logger_summary: self.logger_summary(station, options.logger_summary) if options.start: self.start_logger(station) if options.stop: self.stop_logger(station) @staticmethod def show_info(station, dest=sys.stdout): """Query the configuration of the Vantage, printing out status information""" print("Querying...") try: _firmware_date = station.getFirmwareDate().decode('ascii') except weewx.RetriesExceeded: _firmware_date = "<Unavailable>" try: _firmware_version = station.getFirmwareVersion().decode('ascii') except weewx.RetriesExceeded: _firmware_version = '<Unavailable>' console_time = station.getConsoleTime() altitude_converted = weewx.units.convert(station.altitude_vt, station.altitude_unit)[0] print("""Davis Vantage EEPROM settings: CONSOLE TYPE: %s CONSOLE FIRMWARE: Date: %s Version: %s CONSOLE SETTINGS: Archive interval: %d (seconds) Altitude: %d (%s) Wind cup type: %s Rain bucket type: %s Rain year start: %d Onboard time: %s CONSOLE DISPLAY UNITS: Barometer: %s Temperature: %s Rain: %s Wind: %s """ % (station.hardware_name, _firmware_date, _firmware_version, station.archive_interval, altitude_converted, station.altitude_unit, station.wind_cup_size, station.rain_bucket_size, station.rain_year_start, console_time, station.barometer_unit, station.temperature_unit, station.rain_unit, station.wind_unit), file=dest) try: (stnlat, stnlon, man_or_auto, dst, gmt_or_zone, zone_code, gmt_offset, tempLogging, retransmit_channel) = station.getStnInfo() if man_or_auto == 'AUTO': dst = 'N/A' if gmt_or_zone == 'ZONE_CODE': gmt_offset_str = 'N/A' else: gmt_offset_str = "%+.1f hours" % gmt_offset zone_code = 'N/A' on_off = "ON" if retransmit_channel else "OFF" print(""" CONSOLE STATION INFO: Latitude (onboard): %+0.1f Longitude (onboard): %+0.1f Use manual or auto DST? %s DST setting: %s Use GMT offset or zone code? %s Time zone code: %s GMT offset: %s Temperature logging: %s Retransmit channel: %s (%d) """ % (stnlat, stnlon, man_or_auto, dst, gmt_or_zone, zone_code, gmt_offset_str, tempLogging, on_off, retransmit_channel), file=dest) except weewx.RetriesExceeded: pass # Add transmitter types for each channel, if we can: transmitter_list = None try: transmitter_list = station.getStnTransmitters() print(" TRANSMITTERS: ", file=dest) print(" Channel Receive Repeater Type", file=dest) for transmitter_id in range(0, 8): comment = "" transmitter_type = transmitter_list[transmitter_id]["transmitter_type"] repeater = transmitter_list[transmitter_id]["repeater"] listen = transmitter_list[transmitter_id]["listen"] if transmitter_type == 'temp_hum': comment = "(as extra temperature %d and extra humidity %d)" % \ (transmitter_list[transmitter_id]["temp"], transmitter_list[transmitter_id]["hum"]) elif transmitter_type == 'temp': comment = "(as extra temperature %d)" % transmitter_list[transmitter_id]["temp"] elif transmitter_type == 'hum': comment = "(as extra humidity %d)" % transmitter_list[transmitter_id]["hum"] elif transmitter_type == 'none': transmitter_type = "(N/A)" print(" %d %-8s %-4s %s %s" % (transmitter_id + 1, listen, repeater, transmitter_type, comment), file=dest) print("", file=dest) except weewx.RetriesExceeded: pass # Add reception statistics if we can: try: _rx_list = station.getRX() print(""" RECEPTION STATS: Total packets received: %d Total packets missed: %d Number of resynchronizations: %d Longest good stretch: %d Number of CRC errors: %d """ % _rx_list, file=dest) except: pass # Add barometer calibration data if we can. try: _bar_list = station.getBarData() print(""" BAROMETER CALIBRATION DATA: Current barometer reading: %.3f inHg Altitude: %.0f feet Dew point: %.0f F Virtual temperature: %.0f F Humidity correction factor: %.1f Correction ratio: %.3f Correction constant: %+.3f inHg Gain: %.3f Offset: %.3f """ % _bar_list, file=dest) except weewx.RetriesExceeded: pass # Add temperature/humidity/wind calibration if we can. calibration_dict = station.getStnCalibration() print(""" OFFSETS: Wind direction: %(wind)+.0f deg Inside Temperature: %(inTemp)+.1f F Inside Humidity: %(inHumid)+.0f %% Outside Temperature: %(outTemp)+.1f F Outside Humidity: %(outHumid)+.0f %%""" % calibration_dict, file=dest) if transmitter_list is not None: # Only print the calibrations for channels that we are # listening to. for extraTemp in range(1, 8): for t_id in range(0, 8): t_type = transmitter_list[t_id]["transmitter_type"] if t_type in ['temp', 'temp_hum'] and \ extraTemp == transmitter_list[t_id]["temp"]: print(" Extra Temperature %d: %+.1f F" % (extraTemp, calibration_dict["extraTemp%d" % extraTemp]), file=dest) for extraHumid in range(1, 8): for t_id in range(0, 8): t_type = transmitter_list[t_id]["transmitter_type"] if t_type in ['hum', 'temp_hum'] and \ extraHumid == transmitter_list[t_id]["hum"]: print(" Extra Humidity %d: %+.1f F" % (extraHumid, calibration_dict["extraHumid%d" % extraHumid]), file=dest) for t_id in range(0, 8): t_type = transmitter_list[t_id]["transmitter_type"] if t_type in ['soil', 'leaf_soil']: for soil in range(1, 5): print(" Soil Temperature %d: %+.1f F" % (soil, calibration_dict["soilTemp%d" % soil]), file=dest) for t_id in range(0, 8): t_type = transmitter_list[t_id]["transmitter_type"] if t_type in ['leaf', 'leaf_soil']: for leaf in range(1, 5): print(" Leaf Temperature %d: %+.1f F" % (leaf, calibration_dict["leafTemp%d" % leaf]), file=dest) print("", file=dest) @staticmethod def current(station): """Print a single, current LOOP packet.""" print('Querying the station for current weather data...') for pack in station.genDavisLoopPackets(1): print(weeutil.weeutil.timestamp_to_string(pack['dateTime']), to_sorted_string(pack)) @staticmethod def set_interval(station, new_interval_minutes, noprompt): """Set the console archive interval.""" old_interval_minutes = station.archive_interval // 60 print("Old archive interval is %d minutes, new one will be %d minutes." % (station.archive_interval // 60, new_interval_minutes)) if old_interval_minutes == new_interval_minutes: print("Old and new archive intervals are the same. Nothing done.") else: ans = weeutil.weeutil.y_or_n("Proceeding will change the archive interval " "as well as erase all old archive records.\n" "Are you sure you want to proceed (y/n)? ", noprompt) if ans == 'y': station.setArchiveInterval(new_interval_minutes * 60) print("Archive interval now set to %d seconds." % (station.archive_interval,)) # The Davis documentation implies that the log is # cleared after changing the archive interval, but that # doesn't seem to be the case. Clear it explicitly: station.clearLog() print("Archive records erased.") else: print("Nothing done.") @staticmethod def set_latitude(station, latitude_dg, noprompt): """Set the console station latitude""" ans = weeutil.weeutil.y_or_n("Proceeding will set the latitude value to %.1f degree.\n" "Are you sure you wish to proceed (y/n)? " % latitude_dg, noprompt) if ans == 'y': station.setLatitude(latitude_dg) print("Station latitude set to %.1f degree." % latitude_dg) else: print("Nothing done.") @staticmethod def set_longitude(station, longitude_dg, noprompt): """Set the console station longitude""" ans = weeutil.weeutil.y_or_n("Proceeding will set the longitude value to %.1f degree.\n" "Are you sure you wish to proceed (y/n)? " % longitude_dg, noprompt) if ans == 'y': station.setLongitude(longitude_dg) print("Station longitude set to %.1f degree." % longitude_dg) else: print("Nothing done.") @staticmethod def set_altitude(station, altitude_ft, noprompt): """Set the console station altitude""" ans = weeutil.weeutil.y_or_n("Proceeding will set the station altitude to %.0f feet.\n" "Are you sure you wish to proceed (y/n)? " % altitude_ft, noprompt) if ans == 'y': # Hit the console to get the current barometer calibration data and preserve it: _bardata = station.getBarData() _barcal = _bardata[6] # Set new altitude to station and clear previous _barcal value station.setBarData(0.0, altitude_ft) if _barcal != 0.0: # Hit the console again to get the new barometer data: _bardata = station.getBarData() # Set previous _barcal value station.setBarData(_bardata[0] + _barcal, altitude_ft) else: print("Nothing done.") @staticmethod def set_barometer(station, barometer_inHg, noprompt): """Set the barometer reading to a known correct value.""" # Hit the console to get the current barometer calibration data: _bardata = station.getBarData() if barometer_inHg: msg = "Proceeding will set the barometer value to %.3f and " \ "the station altitude to %.0f feet.\n" % (barometer_inHg, _bardata[1]) else: msg = "Proceeding will have the console pick a sensible barometer " \ "calibration and set the station altitude to %.0f feet.\n" % (_bardata[1],) ans = weeutil.weeutil.y_or_n(msg + "Are you sure you wish to proceed (y/n)? ", noprompt) if ans == 'y': station.setBarData(barometer_inHg, _bardata[1]) else: print("Nothing done.") @staticmethod def clear_memory(station, noprompt): """Clear the archive memory of a VantagePro""" ans = weeutil.weeutil.y_or_n("Proceeding will erase all archive records.\n" "Are you sure you wish to proceed (y/n)? ", noprompt) if ans == 'y': print("Erasing all archive records ...") station.clearLog() print("Archive records erased.") else: print("Nothing done.") @staticmethod def set_wind_cup(station, new_wind_cup_type, noprompt): """Set the wind cup type on the console.""" if station.hardware_type != 16: print("Unable to set new wind cup type.") print ("Reason: command only valid with Vantage Pro or Vantage Pro2 station.", file=sys.stderr) return print("Old rain wind cup type is %d (%s), new one is %d (%s)." % (station.wind_cup_type, station.wind_cup_size, new_wind_cup_type, Vantage.wind_cup_dict[new_wind_cup_type])) if station.wind_cup_type == new_wind_cup_type: print("Old and new wind cup types are the same. Nothing done.") else: ans = weeutil.weeutil.y_or_n("Proceeding will change the wind cup type.\n" "Are you sure you want to proceed (y/n)? ", noprompt) if ans == 'y': station.setWindCupType(new_wind_cup_type) print("Wind cup type set to %d (%s)." % (station.wind_cup_type, station.wind_cup_size)) else: print("Nothing done.") @staticmethod def set_bucket(station, new_bucket_type, noprompt): """Set the bucket type on the console.""" print("Old rain bucket type is %d (%s), new one is %d (%s)." % (station.rain_bucket_type, station.rain_bucket_size, new_bucket_type, Vantage.rain_bucket_dict[new_bucket_type])) if station.rain_bucket_type == new_bucket_type: print("Old and new bucket types are the same. Nothing done.") else: ans = weeutil.weeutil.y_or_n("Proceeding will change the rain bucket type.\n" "Are you sure you want to proceed (y/n)? ", noprompt) if ans == 'y': station.setBucketType(new_bucket_type) print("Bucket type now set to %d." % (station.rain_bucket_type,)) else: print("Nothing done.") @staticmethod def set_rain_year_start(station, rain_year_start, noprompt): print("Old rain season start is %d, new one is %d." % (station.rain_year_start, rain_year_start)) if station.rain_year_start == rain_year_start: print("Old and new rain season starts are the same. Nothing done.") else: ans = weeutil.weeutil.y_or_n("Proceeding will change the rain season start.\n" "Are you sure you want to proceed (y/n)? ", noprompt) if ans == 'y': station.setRainYearStart(rain_year_start) print("Rain year start now set to %d." % (station.rain_year_start,)) else: print("Nothing done.") @staticmethod def set_offset(station, offset_list, noprompt): """Set the on-board offset for a temperature, humidity or wind direction variable.""" (variable, offset_str) = offset_list.split(',') # These variables may be calibrated. temp_variables = ['inTemp', 'outTemp' ] + \ ['extraTemp%d' % i for i in range(1, 8)] + \ ['soilTemp%d' % i for i in range(1, 5)] + \ ['leafTemp%d' % i for i in range(1, 5)] humid_variables = ['inHumid', 'outHumid'] + \ ['extraHumid%d' % i for i in range(1, 8)] # Wind direction can also be calibrated. if variable == "windDir": offset = int(offset_str) if not -359 <= offset <= 359: print("Wind direction offset %d is out of range." % offset, file=sys.stderr) else: ans = weeutil.weeutil.y_or_n("Proceeding will set offset for wind direction to %+d.\n" % offset + "Are you sure you want to proceed (y/n)? ", noprompt) if ans == 'y': station.setCalibrationWindDir(offset) print("Wind direction offset now set to %+d." % offset) else: print("Nothing done.") elif variable in temp_variables: offset = float(offset_str) if not -12.8 <= offset <= 12.7: print("Temperature offset %+.1f is out of range." % (offset), file=sys.stderr) else: ans = weeutil.weeutil.y_or_n("Proceeding will set offset for " "temperature %s to %+.1f.\n" % (variable, offset) + "Are you sure you want to proceed (y/n)? ", noprompt) if ans == 'y': station.setCalibrationTemp(variable, offset) print("Temperature offset %s now set to %+.1f." % (variable, offset)) else: print("Nothing done.") elif variable in humid_variables: offset = int(offset_str) if not 0 <= offset <= 100: print("Humidity offset %+d is out of range." % (offset), file=sys.stderr) else: ans = weeutil.weeutil.y_or_n("Proceeding will set offset for " "humidity %s to %+d.\n" % (variable, offset) + "Are you sure you want to proceed (y/n)? ", noprompt) if ans == 'y': station.setCalibrationHumid(variable, offset) print("Humidity offset %s now set to %+d." % (variable, offset)) else: print("Nothing done.") else: print("Unknown variable %s" % variable, file=sys.stderr) @staticmethod def set_transmitter_type(station, transmitter_list, noprompt): """Set the transmitter type for one of the eight channels.""" transmitter_list = list(map((lambda x: int(x) if x.isdigit() else x if x != "" else None), transmitter_list.split(','))) channel = transmitter_list[0] if not 1 <= channel <= 8: print("Channel number must be between 1 and 8.") return # Check new channel against retransmit channel. # Warn and stop if new channel is used as retransmit channel. retransmit_channel = station._getEEPROM_value(0x18)[0] if retransmit_channel == channel: print("This channel is used as retransmit channel. " "Please turn off retransmit function or choose another channel.") return # Init repeater to 'no repeater' repeater = 0 # Check the last entry in transmitter_list to see if it is a repeater letter try: if transmitter_list[len(transmitter_list)-1].isalpha(): repeater_id = transmitter_list[len(transmitter_list)-1].upper() del transmitter_list[len(transmitter_list)-1] # Check with repeater_dict and get the ID number for key in list(station.repeater_dict.keys()): if station.repeater_dict[key] == repeater_id: repeater = key break if repeater == 0: print("Repeater ID must be between 'A' and 'H'.") return except AttributeError: # No repeater letter pass transmitter_type = transmitter_list[1] extra_temp = transmitter_list[2] if len(transmitter_list) > 2 else None extra_hum = transmitter_list[3] if len(transmitter_list) > 3 else None usetx = 1 if transmitter_type != 10 else 0 try: transmitter_type_name = station.transmitter_type_dict[transmitter_type] except KeyError: print("Unknown transmitter type (%s)" % transmitter_type) return if transmitter_type_name in ['temp', 'temp_hum'] and extra_temp not in list(range(1, 8)): print("Transmitter type %s requires extra_temp in range 1-7'" % transmitter_type_name) return if transmitter_type_name in ['hum', 'temp_hum'] and extra_hum not in list(range(1, 8)): print("Transmitter type %s requires extra_hum in range 1-7'" % transmitter_type_name) return msg = "Proceeding will set channel %d to type %d (%s), repeater: %s, %s.\n" \ % (channel, transmitter_type, transmitter_type_name, station.repeater_dict[repeater], station.listen_dict[usetx]) ans = weeutil.weeutil.y_or_n(msg + "Are you sure you want to proceed (y/n)? ", noprompt) if ans == 'y': station.setTransmitterType(channel, transmitter_type, extra_temp, extra_hum, repeater) print("Transmitter type for channel %d set to %d (%s), repeater: %s, %s." % (channel, transmitter_type, transmitter_type_name, station.repeater_dict[repeater], station.listen_dict[usetx])) else: print("Nothing done.") @staticmethod def set_retransmit(station, channel_on_off, noprompt): """Set console retransmit channel.""" channel = 0 channel_on_off = channel_on_off.strip().upper() channel_on_off_list = channel_on_off.split(',') on_off = channel_on_off_list[0] if on_off != "OFF": if len(channel_on_off_list) > 1: channel = map((lambda x: int(x) if x != "" else None), channel_on_off_list[1])[0] if not 0 < channel < 9: print("Channel out of range 1..8. Nothing done.") return transmitter_list = station.getStnTransmitters() if channel: if transmitter_list[channel-1]["listen"] == "active": print("Channel %d in use. Please select another channel. Nothing done." % channel) return else: for i in range(0, 7): if transmitter_list[i]["listen"] == "inactive": channel = i+1 break if channel == 0: print("All Channels in use. Retransmit can't be enabled. Nothing done.") return old_channel = station._getEEPROM_value(0x18)[0] if old_channel == channel: print("Old and new retransmit settings are the same. Nothing done.") return if channel: msg = "Proceeding will set retransmit to 'ON' at channel: %d.\n" % channel else: msg = "Proceeding will set retransmit to 'OFF'\n." ans = weeutil.weeutil.y_or_n(msg + "Are you sure you want to proceed (y/n)? ", noprompt) if ans == 'y': station.setRetransmit(channel) if channel: print("Retransmit set to 'ON' at channel: %d." % channel) else: print("Retransmit set to 'OFF'.") else: print("Nothing done.") @staticmethod def set_temp_logging(station, tempLogging, noprompt): """Set console temperature logging to 'LAST' or 'AVERAGE'.""" msg = "Proceeding will change the console temperature logging to '%s'.\n" % tempLogging.upper() ans = weeutil.weeutil.y_or_n(msg + "Are you sure you want to proceed (y/n)? ", noprompt) if ans == 'y': station.setTempLogging(tempLogging) print("Console temperature logging set to '%s'." % (tempLogging.upper())) else: print("Nothing done.") @staticmethod def set_time(station): print("Setting time on console...") station.setTime() newtime_ts = station.getTime() print("Current console time is %s" % weeutil.weeutil.timestamp_to_string(newtime_ts)) @staticmethod def set_dst(station, dst): station.setDST(dst) print("Set DST on console to '%s'" % dst) @staticmethod def set_tz_code(station, tz_code): print("Setting time zone code to %d..." % tz_code) station.setTZcode(tz_code) new_tz_code = station.getStnInfo()[5] print("Set time zone code to %s" % new_tz_code) @staticmethod def set_tz_offset(station, tz_offset): offset_int = int(tz_offset) h = abs(offset_int) // 100 m = abs(offset_int) % 100 if h > 12 or m >= 60: raise ValueError("Invalid time zone offset: %s" % tz_offset) offset = h * 100 + (100 * m // 60) if offset_int < 0: offset = -offset station.setTZoffset(offset) new_offset = station.getStnInfo()[6] print("Set time zone offset to %+.1f hours" % new_offset) @staticmethod def set_lamp(station, onoff): print("Setting lamp on console...") station.setLamp(onoff) @staticmethod def start_logger(station): print("Starting logger ...") station.startLogger() print("Logger started") @staticmethod def stop_logger(station): print("Stopping logger ...") station.stopLogger() print("Logger stopped") @staticmethod def dump_logger(station, config_dict, noprompt, batch_size=1): import weewx.manager ans = weeutil.weeutil.y_or_n("Proceeding will dump all data in the logger.\n" "Are you sure you want to proceed (y/n)? ", noprompt) if ans == 'y': with weewx.manager.open_manager_with_config(config_dict, 'wx_binding', initialize=True) as archive: nrecs = 0 # Determine whether to use something to show our progress: progress_fn = print_page if batch_size == 0 else None # Wrap the Vantage generator function in a converter, which will convert the units # to the same units used by the database: converted_generator = weewx.units.GenWithConvert( station.genArchiveDump(progress_fn=progress_fn), archive.std_unit_system) # Wrap it again, to dump in the requested batch size converted_generator = weeutil.weeutil.GenByBatch(converted_generator, batch_size) print("Starting dump ...") for record in converted_generator: archive.addRecord(record) nrecs += 1 print("Records processed: %d; Timestamp: %s\r" % (nrecs, weeutil.weeutil.timestamp_to_string(record['dateTime'])), end=' ', file=sys.stdout) sys.stdout.flush() print("\nFinished dump. %d records added" % (nrecs,)) else: print("Nothing done.") @staticmethod def logger_summary(station, dest_path): with open(dest_path, mode="w") as dest: VantageConfigurator.show_info(station, dest) print("Starting download of logger summary...") nrecs = 0 for (page, index, y, mo, d, h, mn, time_ts) in station.genLoggerSummary(): if time_ts: print("%4d %4d %4d | %4d-%02d-%02d %02d:%02d | %s" % (nrecs, page, index, y + 2000, mo, d, h, mn, weeutil.weeutil.timestamp_to_string(time_ts)), file=dest) else: print("%4d %4d %4d [*** Unused index ***]" % (nrecs, page, index), file=dest) nrecs += 1 if nrecs % 10 == 0: print("Records processed: %d; Timestamp: %s\r" % (nrecs, weeutil.weeutil.timestamp_to_string(time_ts)), end=' ', file=sys.stdout) sys.stdout.flush() print("\nFinished download of logger summary to file '%s'. %d records processed." % (dest_path, nrecs)) # ============================================================================= # Class VantageConfEditor # ============================================================================= class VantageConfEditor(weewx.dr
codeparrot/github-code-clean
# -*- coding: utf-8 -*- # Copyright (C) 2006-2012 Søren Roug, European Environment Agency # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Contributor(s): # from .namespaces import * import re import types pattern_color = re.compile(r'#[0-9a-fA-F]{6}') pattern_vector3D = re.compile( r'\([ ]*-?([0-9]+(\.[0-9]*)?|\.[0-9]+)([ ]+-?([0-9]+(\.[0-9]*)?|\.[0-9]+)){2}[ ]*\)') def make_NCName(arg): for c in (':', ' '): arg = arg.replace(c, "_%x_" % ord(c)) return arg def cnv_anyURI(attribute, arg, element): return unicode(arg) def cnv_boolean(attribute, arg, element): """ XML Schema Part 2: Datatypes Second Edition An instance of a datatype that is defined as boolean can have the following legal literals {true, false, 1, 0} """ if str(arg).lower() in ("0", "false", "no"): return "false" if str(arg).lower() in ("1", "true", "yes"): return "true" raise ValueError( "'%s' not allowed as Boolean value for %s" % (str(arg), attribute)) # Potentially accept color values def cnv_color(attribute, arg, element): """ A RGB color in conformance with §5.9.11 of [XSL], that is a RGB color in notation “#rrggbb”, where rr, gg and bb are 8-bit hexadecimal digits. """ return str(arg) def cnv_configtype(attribute, arg, element): if str(arg) not in ("boolean", "short", "int", "long", "double", "string", "datetime", "base64Binary"): raise ValueError("'%s' not allowed" % str(arg)) return str(arg) def cnv_data_source_has_labels(attribute, arg, element): if str(arg) not in ("none", "row", "column", "both"): raise ValueError("'%s' not allowed" % str(arg)) return str(arg) # Understand different date formats def cnv_date(attribute, arg, element): """ A dateOrDateTime value is either an [xmlschema-2] date value or an [xmlschema-2] dateTime value. """ return str(arg) def cnv_dateTime(attribute, arg, element): """ A dateOrDateTime value is either an [xmlschema-2] date value or an [xmlschema-2] dateTime value. """ return str(arg) def cnv_double(attribute, arg, element): return str(arg) def cnv_duration(attribute, arg, element): return str(arg) def cnv_family(attribute, arg, element): """ A style family """ if str(arg) not in ( "text", "paragraph", "section", "ruby", "table", "table-column", "table-row", "table-cell", "graphic", "presentation", "drawing-page", "chart"): raise ValueError("'%s' not allowed" % str(arg)) return str(arg) def __save_prefix(attribute, arg, element): prefix = arg.split(':', 1)[0] if prefix == arg: return unicode(arg) namespace = element.get_knownns(prefix) if namespace is None: #raise ValueError, "'%s' is an unknown prefix" % str(prefix) return unicode(arg) p = element.get_nsprefix(namespace) return unicode(arg) def cnv_formula(attribute, arg, element): """ A string containing a formula. Formulas do not have a predefined syntax, but the string should begin with a namespace prefix, followed by a “:” (COLON, U+003A) separator, followed by the text of the formula. The namespace bound to the prefix determines the syntax and semantics of the formula. """ return __save_prefix(attribute, arg, element) def cnv_ID(attribute, arg, element): return str(arg) def cnv_IDREF(attribute, arg, element): return str(arg) def cnv_integer(attribute, arg, element): return str(arg) def cnv_legend_position(attribute, arg, element): if str(arg) not in ( "start", "end", "top", "bottom", "top-start", "bottom-start", "top-end", "bottom-end"): raise ValueError("'%s' not allowed" % str(arg)) return str(arg) pattern_length = re.compile( r'-?([0-9]+(\.[0-9]*)?|\.[0-9]+)((cm)|(mm)|(in)|(pt)|(pc)|(px))') def cnv_length(attribute, arg, element): """ A (positive or negative) physical length, consisting of magnitude and unit, in conformance with the Units of Measure defined in §5.9.13 of [XSL]. """ global pattern_length if not pattern_length.match(arg): raise ValueError("'%s' is not a valid length" % arg) return arg def cnv_lengthorpercent(attribute, arg, element): failed = False try: return cnv_length(attribute, arg, element) except: failed = True try: return cnv_percent(attribute, arg, element) except: failed = True if failed: raise ValueError("'%s' is not a valid length or percent" % arg) return arg def cnv_metavaluetype(attribute, arg, element): if str(arg) not in ("float", "date", "time", "boolean", "string"): raise ValueError("'%s' not allowed" % str(arg)) return str(arg) def cnv_major_minor(attribute, arg, element): if arg not in ('major', 'minor'): raise ValueError("'%s' is not either 'minor' or 'major'" % arg) pattern_namespacedToken = re.compile(r'[0-9a-zA-Z_]+:[0-9a-zA-Z._\-]+') def cnv_namespacedToken(attribute, arg, element): global pattern_namespacedToken if not pattern_namespacedToken.match(arg): raise ValueError("'%s' is not a valid namespaced token" % arg) return __save_prefix(attribute, arg, element) def cnv_NCName(attribute, arg, element): """ NCName is defined in http://www.w3.org/TR/REC-xml-names/#NT-NCName Essentially an XML name minus ':' """ if type(arg) in types.StringTypes: return make_NCName(arg) else: return arg.getAttrNS(STYLENS, 'name') # This function takes either an instance of a style (preferred) # or a text string naming the style. If it is a text string, then it must # already have been converted to an NCName # The text-string argument is mainly for when we build a structure from XML def cnv_StyleNameRef(attribute, arg, element): try: return arg.getAttrNS(STYLENS, 'name') except: return arg # This function takes either an instance of a style (preferred) # or a text string naming the style. If it is a text string, then it must # already have been converted to an NCName # The text-string argument is mainly for when we build a structure from XML def cnv_DrawNameRef(attribute, arg, element): try: return arg.getAttrNS(DRAWNS, 'name') except: return arg # Must accept list of Style objects def cnv_NCNames(attribute, arg, element): return ' '.join(arg) def cnv_nonNegativeInteger(attribute, arg, element): return str(arg) pattern_percent = re.compile(r'-?([0-9]+(\.[0-9]*)?|\.[0-9]+)%') def cnv_percent(attribute, arg, element): global pattern_percent if not pattern_percent.match(arg): raise ValueError("'%s' is not a valid length" % arg) return arg # Real one doesn't allow floating point values pattern_points = re.compile(r'-?[0-9]+,-?[0-9]+([ ]+-?[0-9]+,-?[0-9]+)*') #pattern_points = re.compile(r'-?[0-9.]+,-?[0-9.]+([ ]+-?[0-9.]+,-?[0-9.]+)*') def cnv_points(attribute, arg, element): global pattern_points if type(arg) in types.StringTypes: if not pattern_points.match(arg): raise ValueError( "x,y are separated by a comma and the points are separated by white spaces") return arg else: try: strarg = ' '.join(["%d,%d" % p for p in arg]) except: raise ValueError( "Points must be string or [(0,0),(1,1)] - not %s" % arg) return strarg def cnv_positiveInteger(attribute, arg, element): return str(arg) def cnv_string(attribute, arg, element): return unicode(arg) def cnv_textnoteclass(attribute, arg, element): if str(arg) not in ("footnote", "endnote"): raise ValueError("'%s' not allowed" % str(arg)) return str(arg) # Understand different time formats def cnv_time(attribute, arg, element): return str(arg) def cnv_token(attribute, arg, element): return str(arg) pattern_viewbox = re.compile(r'-?[0-9]+([ ]+-?[0-9]+){3}$') def cnv_viewbox(attribute, arg, element): global pattern_viewbox if not pattern_viewbox.match(arg): raise ValueError( "viewBox must be four integers separated by whitespaces") return arg def cnv_xlinkshow(attribute, arg, element): if str(arg) not in ("new", "replace", "embed"): raise ValueError("'%s' not allowed" % str(arg)) return str(arg) attrconverters = { ((ANIMNS, u'audio-level'), None): cnv_double, ((ANIMNS, u'color-interpolation'), None): cnv_string, ((ANIMNS, u'color-interpolation-direction'), None): cnv_string, ((ANIMNS, u'command'), None): cnv_string, ((ANIMNS, u'formula'), None): cnv_string, ((ANIMNS, u'id'), None): cnv_ID, ((ANIMNS, u'iterate-interval'), None): cnv_duration, ((ANIMNS, u'iterate-type'), None): cnv_string, ((ANIMNS, u'name'), None): cnv_string, ((ANIMNS, u'sub-item'), None): cnv_string, ((ANIMNS, u'value'), None): cnv_string, # ((DBNS,u'type'), None): cnv_namespacedToken, ((CHARTNS, u'attached-axis'), None): cnv_string, ((CHARTNS, u'class'), (CHARTNS, u'grid')): cnv_major_minor, ((CHARTNS, u'class'), None): cnv_namespacedToken, ((CHARTNS, u'column-mapping'), None): cnv_string, ((CHARTNS, u'connect-bars'), None): cnv_boolean, ((CHARTNS, u'data-label-number'), None): cnv_string, ((CHARTNS, u'data-label-symbol'), None): cnv_boolean, ((CHARTNS, u'data-label-text'), None): cnv_boolean, ((CHARTNS, u'data-source-has-labels'), None): cnv_data_source_has_labels, ((CHARTNS, u'deep'), None): cnv_boolean, ((CHARTNS, u'dimension'), None): cnv_string, ((CHARTNS, u'display-label'), None): cnv_boolean, ((CHARTNS, u'error-category'), None): cnv_string, ((CHARTNS, u'error-lower-indicator'), None): cnv_boolean, ((CHARTNS, u'error-lower-limit'), None): cnv_string, ((CHARTNS, u'error-margin'), None): cnv_string, ((CHARTNS, u'error-percentage'), None): cnv_string, ((CHARTNS, u'error-upper-indicator'), None): cnv_boolean, ((CHARTNS, u'error-upper-limit'), None): cnv_string, ((CHARTNS, u'gap-width'), None): cnv_string, ((CHARTNS, u'interpolation'), None): cnv_string, ((CHARTNS, u'interval-major'), None): cnv_string, ((CHARTNS, u'interval-minor-divisor'), None): cnv_string, ((CHARTNS, u'japanese-candle-stick'), None): cnv_boolean, ((CHARTNS, u'label-arrangement'), None): cnv_string, ((CHARTNS, u'label-cell-address'), None): cnv_string, ((CHARTNS, u'legend-align'), None): cnv_string, ((CHARTNS, u'legend-position'), None): cnv_legend_position, ((CHARTNS, u'lines'), None): cnv_boolean, ((CHARTNS, u'link-data-style-to-source'), None): cnv_boolean, ((CHARTNS, u'logarithmic'), None): cnv_boolean, ((CHARTNS, u'maximum'), None): cnv_string, ((CHARTNS, u'mean-value'), None): cnv_boolean, ((CHARTNS, u'minimum'), None): cnv_string, ((CHARTNS, u'name'), None): cnv_string, ((CHARTNS, u'origin'), None): cnv_string, ((CHARTNS, u'overlap'), None): cnv_string, ((CHARTNS, u'percentage'), None): cnv_boolean, ((CHARTNS, u'pie-offset'), None): cnv_string, ((CHARTNS, u'regression-type'), None): cnv_string, ((CHARTNS, u'repeated'), None): cnv_nonNegativeInteger, ((CHARTNS, u'row-mapping'), None): cnv_string, ((CHARTNS, u'scale-text'), None): cnv_boolean, ((CHARTNS, u'series-source'), None): cnv_string, ((CHARTNS, u'solid-type'), None): cnv_string, ((CHARTNS, u'spline-order'), None): cnv_string, ((CHARTNS, u'spline-resolution'), None): cnv_string, ((CHARTNS, u'stacked'), None): cnv_boolean, ((CHARTNS, u'style-name'), None): cnv_StyleNameRef, ((CHARTNS, u'symbol-height'), None): cnv_string, ((CHARTNS, u'symbol-name'), None): cnv_string, ((CHARTNS, u'symbol-type'), None): cnv_string, ((CHARTNS, u'symbol-width'), None): cnv_string, ((CHARTNS, u'text-overlap'), None): cnv_boolean, ((CHARTNS, u'three-dimensional'), None): cnv_boolean, ((CHARTNS, u'tick-marks-major-inner'), None): cnv_boolean, ((CHARTNS, u'tick-marks-major-outer'), None): cnv_boolean, ((CHARTNS, u'tick-marks-minor-inner'), None): cnv_boolean, ((CHARTNS, u'tick-marks-minor-outer'), None): cnv_boolean, ((CHARTNS, u'values-cell-range-address'), None): cnv_string, ((CHARTNS, u'vertical'), None): cnv_boolean, ((CHARTNS, u'visible'), None): cnv_boolean, ((CONFIGNS, u'name'), None): cnv_formula, ((CONFIGNS, u'type'), None): cnv_configtype, ((DR3DNS, u'ambient-color'), None): cnv_string, ((DR3DNS, u'back-scale'), None): cnv_string, ((DR3DNS, u'backface-culling'), None): cnv_string, ((DR3DNS, u'center'), None): cnv_string, ((DR3DNS, u'close-back'), None): cnv_boolean, ((DR3DNS, u'close-front'), None): cnv_boolean, ((DR3DNS, u'depth'), None): cnv_length, ((DR3DNS, u'diffuse-color'), None): cnv_string, ((DR3DNS, u'direction'), None): cnv_string, ((DR3DNS, u'distance'), None): cnv_length, ((DR3DNS, u'edge-rounding'), None): cnv_string, ((DR3DNS, u'edge-rounding-mode'), None): cnv_string, ((DR3DNS, u'emissive-color'), None): cnv_string, ((DR3DNS, u'enabled'), None): cnv_boolean, ((DR3DNS, u'end-angle'), None): cnv_string, ((DR3DNS, u'focal-length'), None): cnv_length, ((DR3DNS, u'horizontal-segments'), None): cnv_string, ((DR3DNS, u'lighting-mode'), None): cnv_boolean, ((DR3DNS, u'max-edge'), None): cnv_string, ((DR3DNS, u'min-edge'), None): cnv_string, ((DR3DNS, u'normals-direction'), None): cnv_string, ((DR3DNS, u'normals-kind'), None): cnv_string, ((DR3DNS, u'projection'), None): cnv_string, ((DR3DNS, u'shade-mode'), None): cnv_string, ((DR3DNS, u'shadow'), None): cnv_string, ((DR3DNS, u'shadow-slant'), None): cnv_nonNegativeInteger, ((DR3DNS, u'shininess'), None): cnv_string, ((DR3DNS, u'size'), None): cnv_string, ((DR3DNS, u'specular'), None): cnv_boolean, ((DR3DNS, u'specular-color'), None): cnv_string, ((DR3DNS, u'texture-filter'), None): cnv_string, ((DR3DNS, u'texture-generation-mode-x'), None): cnv_string, ((DR3DNS, u'texture-generation-mode-y'), None): cnv_string, ((DR3DNS, u'texture-kind'), None): cnv_string, ((DR3DNS, u'texture-mode'), None): cnv_string, ((DR3DNS, u'transform'), None): cnv_string, ((DR3DNS, u'vertical-segments'), None): cnv_string, ((DR3DNS, u'vpn'), None): cnv_string, ((DR3DNS, u'vrp'), None): cnv_string, ((DR3DNS, u'vup'), None): cnv_string, ((DRAWNS, u'align'), None): cnv_string, ((DRAWNS, u'angle'), None): cnv_integer, ((DRAWNS, u'archive'), None): cnv_string, ((DRAWNS, u'auto-grow-height'), None): cnv_boolean, ((DRAWNS, u'auto-grow-width'), None): cnv_boolean, ((DRAWNS, u'background-size'), None): cnv_string, ((DRAWNS, u'blue'), None): cnv_string, ((DRAWNS, u'border'), None): cnv_string, ((DRAWNS, u'caption-angle'), None): cnv_string, ((DRAWNS, u'caption-angle-type'), None): cnv_string, ((DRAWNS, u'caption-escape'), None): cnv_string, ((DRAWNS, u'caption-escape-direction'), None): cnv_string, ((DRAWNS, u'caption-fit-line-length'), None): cnv_boolean, ((DRAWNS, u'caption-gap'), None): cnv_string, ((DRAWNS, u'caption-line-length'), None): cnv_length, ((DRAWNS, u'caption-point-x'), None): cnv_string, ((DRAWNS, u'caption-point-y'), None): cnv_string, ((DRAWNS, u'caption-id'), None): cnv_IDREF, ((DRAWNS, u'caption-type'), None): cnv_string, ((DRAWNS, u'chain-next-name'), None): cnv_string, ((DRAWNS, u'class-id'), None): cnv_string, ((DRAWNS, u'class-names'), None): cnv_NCNames, ((DRAWNS, u'code'), None): cnv_string, ((DRAWNS, u'color'), None): cnv_string, ((DRAWNS, u'color-inversion'), None): cnv_boolean, ((DRAWNS, u'color-mode'), None): cnv_string, ((DRAWNS, u'concave'), None): cnv_string, ((DRAWNS, u'concentric-gradient-fill-allowed'), None): cnv_boolean, ((DRAWNS, u'contrast'), None): cnv_string, ((DRAWNS, u'control'), None): cnv_IDREF, ((DRAWNS, u'copy-of'), None): cnv_string, ((DRAWNS, u'corner-radius'), None): cnv_length, ((DRAWNS, u'corners'), None): cnv_positiveInteger, ((DRAWNS, u'cx'), None): cnv_string, ((DRAWNS, u'cy'), None): cnv_string, ((DRAWNS, u'data'), None): cnv_string, ((DRAWNS, u'decimal-places'), None): cnv_string, ((DRAWNS, u'display'), None): cnv_string, ((DRAWNS, u'display-name'), None): cnv_string, ((DRAWNS, u'distance'), None): cnv_lengthorpercent, ((DRAWNS, u'dots1'), None): cnv_integer, ((DRAWNS, u'dots1-length'), None): cnv_lengthorpercent, ((DRAWNS, u'dots2'), None): cnv_integer, ((DRAWNS, u'dots2-length'), None): cnv_lengthorpercent, ((DRAWNS, u'end-angle'), None): cnv_double, ((DRAWNS, u'end'), None): cnv_string, ((DRAWNS, u'end-color'), None): cnv_string, ((DRAWNS, u'end-glue-point'), None): cnv_nonNegativeInteger, ((DRAWNS, u'end-guide'), None): cnv_length, ((DRAWNS, u'end-intensity'), None): cnv_string, ((DRAWNS, u'end-line-spacing-horizontal'), None): cnv_string, ((DRAWNS, u'end-line-spacing-vertical'), None): cnv_string, ((DRAWNS, u'end-shape'), None): cnv_IDREF, ((DRAWNS, u'engine'), None): cnv_namespacedToken, ((DRAWNS, u'enhanced-path'), None): cnv_string, ((DRAWNS, u'escape-direction'), None): cnv_string, ((DRAWNS, u'extrusion-allowed'), None): cnv_boolean, ((DRAWNS, u'extrusion-brightness'), None): cnv_string, ((DRAWNS, u'extrusion'), None): cnv_boolean, ((DRAWNS, u'extrusion-color'), None): cnv_boolean, ((DRAWNS, u'extrusion-depth'), None): cnv_double, ((DRAWNS, u'extrusion-diffusion'), None): cnv_string, ((DRAWNS, u'extrusion-first-light-direction'), None): cnv_string, ((DRAWNS, u'extrusion-first-light-harsh'), None): cnv_boolean, ((DRAWNS, u'extrusion-first-light-level'), None): cnv_string, ((DRAWNS, u'extrusion-light-face'), None): cnv_boolean, ((DRAWNS, u'extrusion-metal'), None): cnv_boolean, ((DRAWNS, u'extrusion-number-of-line-segments'), None): cnv_integer, ((DRAWNS, u'extrusion-origin'), None): cnv_double, ((DRAWNS, u'extrusion-rotation-angle'), None): cnv_double, ((DRAWNS, u'extrusion-rotation-center'), None): cnv_string, ((DRAWNS, u'extrusion-second-light-direction'), None): cnv_string, ((DRAWNS, u'extrusion-second-light-harsh'), None): cnv_boolean, ((DRAWNS, u'extrusion-second-light-level'), None): cnv_string, ((DRAWNS, u'extrusion-shininess'), None): cnv_string, ((DRAWNS, u'extrusion-skew'), None): cnv_double, ((DRAWNS, u'extrusion-specularity'), None): cnv_string, ((DRAWNS, u'extrusion-viewpoint'), None): cnv_string, ((DRAWNS, u'fill'), None): cnv_string, ((DRAWNS, u'fill-color'), None): cnv_string, ((DRAWNS, u'fill-gradient-name'), None): cnv_string, ((DRAWNS, u'fill-hatch-name'), None): cnv_string, ((DRAWNS, u'fill-hatch-solid'), None): cnv_boolean, ((DRAWNS, u'fill-image-height'), None): cnv_lengthorpercent, ((DRAWNS, u'fill-image-name'), None): cnv_DrawNameRef, ((DRAWNS, u'fill-image-ref-point'), None): cnv_string, ((DRAWNS, u'fill-image-ref-point-x'), None): cnv_string, ((DRAWNS, u'fill-image-ref-point-y'), None): cnv_string, ((DRAWNS, u'fill-image-width'), None): cnv_lengthorpercent, ((DRAWNS, u'filter-name'), None): cnv_string, ((DRAWNS, u'fit-to-contour'), None): cnv_boolean, ((DRAWNS, u'fit-to-size'), None): cnv_string, # ODF 1.2 says boolean ((DRAWNS, u'formula'), None): cnv_string, ((DRAWNS, u'frame-display-border'), None): cnv_boolean, ((DRAWNS, u'frame-display-scrollbar'), None): cnv_boolean, ((DRAWNS, u'frame-margin-horizontal'), None): cnv_string, ((DRAWNS, u'frame-margin-vertical'), None): cnv_string, ((DRAWNS, u'frame-name'), None): cnv_string, ((DRAWNS, u'gamma'), None): cnv_string, ((DRAWNS, u'glue-point-leaving-directions'), None): cnv_string, ((DRAWNS, u'glue-point-type'), None): cnv_string, ((DRAWNS, u'glue-points'), None): cnv_string, ((DRAWNS, u'gradient-step-count'), None): cnv_string, ((DRAWNS, u'green'), None): cnv_string, ((DRAWNS, u'guide-distance'), None): cnv_string, ((DRAWNS, u'guide-overhang'), None): cnv_length, ((DRAWNS, u'handle-mirror-horizontal'), None): cnv_boolean, ((DRAWNS, u'handle-mirror-vertical'), None): cnv_boolean, ((DRAWNS, u'handle-polar'), None): cnv_string, ((DRAWNS, u'handle-position'), None): cnv_string, ((DRAWNS, u'handle-radius-range-maximum'), None): cnv_string, ((DRAWNS, u'handle-radius-range-minimum'), None): cnv_string, ((DRAWNS, u'handle-range-x-maximum'), None): cnv_string, ((DRAWNS, u'handle-range-x-minimum'), None): cnv_string, ((DRAWNS, u'handle-range-y-maximum'), None): cnv_string, ((DRAWNS, u'handle-range-y-minimum'), None): cnv_string, ((DRAWNS, u'handle-switched'), None): cnv_boolean, # ((DRAWNS,u'id'), None): cnv_ID, # ((DRAWNS,u'id'), None): cnv_nonNegativeInteger, # ?? line 6581 in RNG ((DRAWNS, u'id'), None): cnv_string, ((DRAWNS, u'image-opacity'), None): cnv_string, ((DRAWNS, u'kind'), None): cnv_string, ((DRAWNS, u'layer'), None): cnv_string, ((DRAWNS, u'line-distance'), None): cnv_string, ((DRAWNS, u'line-skew'), None): cnv_string, ((DRAWNS, u'luminance'), None): cnv_string, ((DRAWNS, u'marker-end-center'), None): cnv_boolean, ((DRAWNS, u'marker-end'), None): cnv_string, ((DRAWNS, u'marker-end-width'), None): cnv_length, ((DRAWNS, u'marker-start-center'), None): cnv_boolean, ((DRAWNS, u'marker-start'), None): cnv_string, ((DRAWNS, u'marker-start-width'), None): cnv_length, ((DRAWNS, u'master-page-name'), None): cnv_StyleNameRef, ((DRAWNS, u'may-script'), None): cnv_boolean, ((DRAWNS, u'measure-align'), None): cnv_string, ((DRAWNS, u'measure-vertical-align'), None): cnv_string, ((DRAWNS, u'mime-type'), None): cnv_string, ((DRAWNS, u'mirror-horizontal'), None): cnv_boolean, ((DRAWNS, u'mirror-vertical'), None): cnv_boolean, ((DRAWNS, u'modifiers'), None): cnv_string, ((DRAWNS, u'name'), None): cnv_NCName, # ((DRAWNS,u'name'), None): cnv_string, ((DRAWNS, u'nav-order'), None): cnv_IDREF, ((DRAWNS, u'nohref'), None): cnv_string, ((DRAWNS, u'notify-on-update-of-ranges'), None): cnv_string, ((DRAWNS, u'object'), None): cnv_string, ((DRAWNS, u'ole-draw-aspect'), None): cnv_string, ((DRAWNS, u'opacity'), None): cnv_string, ((DRAWNS, u'opacity-name'), None): cnv_string, ((DRAWNS, u'page-number'), None): cnv_positiveInteger, ((DRAWNS, u'parallel'), None): cnv_boolean, ((DRAWNS, u'path-stretchpoint-x'), None): cnv_double, ((DRAWNS, u'path-stretchpoint-y'), None): cnv_double, ((DRAWNS, u'placing'), None): cnv_string, ((DRAWNS, u'points'), None): cnv_points, ((DRAWNS, u'protected'), None): cnv_boolean, ((DRAWNS, u'recreate-on-edit'), None): cnv_boolean, ((DRAWNS, u'red'), None): cnv_string, ((DRAWNS, u'rotation'), None): cnv_integer, ((DRAWNS, u'secondary-fill-color'), None): cnv_string, ((DRAWNS, u'shadow'), None): cnv_string, ((DRAWNS, u'shadow-color'), None): cnv_string, ((DRAWNS, u'shadow-offset-x'), None): cnv_length, ((DRAWNS, u'shadow-offset-y'), None): cnv_length, ((DRAWNS, u'shadow-opacity'), None): cnv_string, ((DRAWNS, u'shape-id'), None): cnv_IDREF, ((DRAWNS, u'sharpness'), None): cnv_string, ((DRAWNS, u'show-unit'), None): cnv_boolean, ((DRAWNS, u'start-angle'), None): cnv_double, ((DRAWNS, u'start'), None): cnv_string, ((DRAWNS, u'start-color'), None): cnv_string, ((DRAWNS, u'start-glue-point'), None): cnv_nonNegativeInteger, ((DRAWNS, u'start-guide'), None): cnv_length, ((DRAWNS, u'start-intensity'), None): cnv_string, ((DRAWNS, u'start-line-spacing-horizontal'), None): cnv_string, ((DRAWNS, u'start-line-spacing-vertical'), None): cnv_string, ((DRAWNS, u'start-shape'), None): cnv_IDREF, ((DRAWNS, u'stroke'), None): cnv_string, ((DRAWNS, u'stroke-dash'), None): cnv_string, ((DRAWNS, u'stroke-dash-names'), None): cnv_string, ((DRAWNS, u'stroke-linejoin'), None): cnv_string, ((DRAWNS, u'style'), None): cnv_string, ((DRAWNS, u'style-name'), None): cnv_StyleNameRef, ((DRAWNS, u'symbol-color'), None): cnv_string, ((DRAWNS, u'text-areas'), None): cnv_string, ((DRAWNS, u'text-path-allowed'), None): cnv_boolean, ((DRAWNS, u'text-path'), None): cnv_boolean, ((DRAWNS, u'text-path-mode'), None): cnv_string, ((DRAWNS, u'text-path-same-letter-heights'), None): cnv_boolean, ((DRAWNS, u'text-path-scale'), None): cnv_string, ((DRAWNS, u'text-rotate-angle'), None): cnv_double, ((DRAWNS, u'text-style-name'), None): cnv_StyleNameRef, ((DRAWNS, u'textarea-horizontal-align'), None): cnv_string, ((DRAWNS, u'textarea-vertical-align'), None): cnv_string, ((DRAWNS, u'tile-repeat-offset'), None): cnv_string, ((DRAWNS, u'transform'), None): cnv_string, ((DRAWNS, u'type'), None): cnv_string, ((DRAWNS, u'unit'), None): cnv_string, ((DRAWNS, u'value'), None): cnv_string, ((DRAWNS, u'visible-area-height'), None): cnv_string, ((DRAWNS, u'visible-area-left'), None): cnv_string, ((DRAWNS, u'visible-area-top'), None): cnv_string, ((DRAWNS, u'visible-area-width'), None): cnv_string, ((DRAWNS, u'wrap-influence-on-position'), None): cnv_string, ((DRAWNS, u'z-index'), None): cnv_nonNegativeInteger, ((FONS, u'background-color'), None): cnv_string, ((FONS, u'border-bottom'), None): cnv_string, ((FONS, u'border'), None): cnv_string, ((FONS, u'border-left'), None): cnv_string, ((FONS, u'border-right'), None): cnv_string, ((FONS, u'border-top'), None): cnv_string, ((FONS, u'break-after'), None): cnv_string, ((FONS, u'break-before'), None): cnv_string, ((FONS, u'clip'), None): cnv_string, ((FONS, u'color'), None): cnv_string, ((FONS, u'column-count'), None): cnv_positiveInteger, ((FONS, u'column-gap'), None): cnv_length, ((FONS, u'country'), None): cnv_token, ((FONS, u'end-indent'), None): cnv_length, ((FONS, u'font-family'), None): cnv_string, ((FONS, u'font-size'), None): cnv_string, ((FONS, u'font-style'), None): cnv_string, ((FONS, u'font-variant'), None): cnv_string, ((FONS, u'font-weight'), None): cnv_string, ((FONS, u'height'), None): cnv_string, ((FONS, u'hyphenate'), None): cnv_boolean, ((FONS, u'hyphenation-keep'), None): cnv_string, ((FONS, u'hyphenation-ladder-count'), None): cnv_string, ((FONS, u'hyphenation-push-char-count'), None): cnv_string, ((FONS, u'hyphenation-remain-char-count'), None): cnv_string, ((FONS, u'keep-together'), None): cnv_string, ((FONS, u'keep-with-next'), None): cnv_string, ((FONS, u'language'), None): cnv_token, ((FONS, u'letter-spacing'), None): cnv_string, ((FONS, u'line-height'), None): cnv_string, ((FONS, u'margin-bottom'), None): cnv_string, ((FONS, u'margin'), None): cnv_string, ((FONS, u'margin-left'), None): cnv_string, ((FONS, u'margin-right'), None): cnv_string, ((FONS, u'margin-top'), None): cnv_string, ((FONS, u'max-height'), None): cnv_string, ((FONS, u'max-width'), None): cnv_string, ((FONS, u'min-height'), None): cnv_length, ((FONS, u'min-width'), None): cnv_string, ((FONS, u'orphans'), None): cnv_string, ((FONS, u'padding-bottom'), None): cnv_string, ((FONS, u'padding'), None): cnv_string, ((FONS, u'padding-left'), None): cnv_string, ((FONS, u'padding-right'), None): cnv_string, ((FONS, u'padding-top'), None): cnv_string, ((FONS, u'page-height'), None): cnv_length, ((FONS, u'page-width'), None): cnv_length, ((FONS, u'space-after'), None): cnv_length, ((FONS, u'space-before'), None): cnv_length, ((FONS, u'start-indent'), None): cnv_length, ((FONS, u'text-align'), None): cnv_string, ((FONS, u'text-align-last'), None): cnv_string, ((FONS, u'text-indent'), None): cnv_string, ((FONS, u'text-shadow'), None): cnv_string, ((FONS, u'text-transform'), None): cnv_string, ((FONS, u'widows'), None): cnv_string, ((FONS, u'width'), None): cnv_string, ((FONS, u'wrap-option'), None): cnv_string, ((FORMNS, u'allow-deletes'), None): cnv_boolean, ((FORMNS, u'allow-inserts'), None): cnv_boolean, ((FORMNS, u'allow-updates'), None): cnv_boolean, ((FORMNS, u'apply-design-mode'), None): cnv_boolean, ((FORMNS, u'apply-filter'), None): cnv_boolean, ((FORMNS, u'auto-complete'), None): cnv_boolean, ((FORMNS, u'automatic-focus'), None): cnv_boolean, ((FORMNS, u'bound-column'), None): cnv_string, ((FORMNS, u'button-type'), None): cnv_string, ((FORMNS, u'command'), None): cnv_string, ((FORMNS, u'command-type'), None): cnv_string, ((FORMNS, u'control-implementation'), None): cnv_namespacedToken, ((FORMNS, u'convert-empty-to-null'), None): cnv_boolean, ((FORMNS, u'current-selected'), None): cnv_boolean, ((FORMNS, u'current-state'), None): cnv_string, # ((FORMNS,u'current-value'), None): cnv_date, # ((FORMNS,u'current-value'), None): cnv_double, ((FORMNS, u'current-value'), None): cnv_string, # ((FORMNS,u'current-value'), None): cnv_time, ((FORMNS, u'data-field'), None): cnv_string, ((FORMNS, u'datasource'), None): cnv_string, ((FORMNS, u'default-button'), None): cnv_boolean, ((FORMNS, u'delay-for-repeat'), None): cnv_duration, ((FORMNS, u'detail-fields'), None): cnv_string, ((FORMNS, u'disabled'), None): cnv_boolean, ((FORMNS, u'dropdown'), None): cnv_boolean, ((FORMNS, u'echo-char'), None): cnv_string, ((FORMNS, u'enctype'), None): cnv_string, ((FORMNS, u'escape-processing'), None): cnv_boolean, ((FORMNS, u'filter'), None): cnv_string, ((FORMNS, u'focus-on-click'), None): cnv_boolean, ((FORMNS, u'for'), None): cnv_string, ((FORMNS, u'id'), None): cnv_ID, ((FORMNS, u'ignore-result'), None): cnv_boolean, ((FORMNS, u'image-align'), None): cnv_string, ((FORMNS, u'image-data'), None): cnv_anyURI, ((FORMNS, u'image-position'), None): cnv_string, ((FORMNS, u'is-tristate'), None): cnv_boolean, ((FORMNS, u'label'), None): cnv_string, ((FORMNS, u'list-source'), None): cnv_string, ((FORMNS, u'list-source-type'), None): cnv_string, ((FORMNS, u'master-fields'), None): cnv_string, ((FORMNS, u'max-length'), None): cnv_nonNegativeInteger, # ((FORMNS,u'max-value'), None): cnv_date, # ((FORMNS,u'max-value'), None): cnv_double, ((FORMNS, u'max-value'), None): cnv_string, # ((FORMNS,u'max-value'), None): cnv_time, ((FORMNS, u'method'), None): cnv_string, # ((FORMNS,u'min-value'), None): cnv_date, # ((FORMNS,u'min-value'), None): cnv_double, ((FORMNS, u'min-value'), None): cnv_string, # ((FORMNS,u'min-value'), None): cnv_time, ((FORMNS, u'multi-line'), None): cnv_boolean, ((FORMNS, u'multiple'), None): cnv_boolean, ((FORMNS, u'name'), None): cnv_string, ((FORMNS, u'navigation-mode'), None): cnv_string, ((FORMNS, u'order'), None): cnv_string, ((FORMNS, u'orientation'), None): cnv_string, ((FORMNS, u'page-step-size'), None): cnv_positiveInteger, ((FORMNS, u'printable'), None): cnv_boolean, ((FORMNS, u'property-name'), None): cnv_string, ((FORMNS, u'readonly'), None): cnv_boolean, ((FORMNS, u'selected'), None): cnv_boolean, ((FORMNS, u'size'), None): cnv_nonNegativeInteger, ((FORMNS, u'state'), None): cnv_string, ((FORMNS, u'step-size'), None): cnv_positiveInteger, ((FORMNS, u'tab-cycle'), None): cnv_string, ((FORMNS, u'tab-index'), None): cnv_nonNegativeInteger, ((FORMNS, u'tab-stop'), None): cnv_boolean, ((FORMNS, u'text-style-name'), None): cnv_StyleNameRef, ((FORMNS, u'title'), None): cnv_string, ((FORMNS, u'toggle'), None): cnv_boolean, ((FORMNS, u'validation'), None): cnv_boolean, # ((FORMNS,u'value'), None): cnv_date, # ((FORMNS,u'value'), None): cnv_double, ((FORMNS, u'value'), None): cnv_string, # ((FORMNS,u'value'), None): cnv_time, ((FORMNS, u'visual-effect'), None): cnv_string, ((FORMNS, u'xforms-list-source'), None): cnv_string, ((FORMNS, u'xforms-submission'), None): cnv_string, ((MANIFESTNS, 'algorithm-name'), None): cnv_string, ((MANIFESTNS, 'checksum'), None): cnv_string, ((MANIFESTNS, 'checksum-type'), None): cnv_string, ((MANIFESTNS, 'full-path'), None): cnv_string, ((MANIFESTNS, 'initialisation-vector'), None): cnv_string, ((MANIFESTNS, 'iteration-count'), None): cnv_nonNegativeInteger, ((MANIFESTNS, 'key-derivation-name'), None): cnv_string, ((MANIFESTNS, 'media-type'), None): cnv_string, ((MANIFESTNS, 'salt'), None): cnv_string, ((MANIFESTNS, 'size'), None): cnv_nonNegativeInteger, ((METANS, u'cell-count'), None): cnv_nonNegativeInteger, ((METANS, u'character-count'), None): cnv_nonNegativeInteger, ((METANS, u'date'), None): cnv_dateTime, ((METANS, u'delay'), None): cnv_duration, ((METANS, u'draw-count'), None): cnv_nonNegativeInteger, ((METANS, u'frame-count'), None): cnv_nonNegativeInteger, ((METANS, u'image-count'), None): cnv_nonNegativeInteger, ((METANS, u'name'), None): cnv_string, ((METANS, u'non-whitespace-character-count'), None): cnv_nonNegativeInteger, ((METANS, u'object-count'), None): cnv_nonNegativeInteger, ((METANS, u'ole-object-count'), None): cnv_nonNegativeInteger, ((METANS, u'page-count'), None): cnv_nonNegativeInteger, ((METANS, u'paragraph-count'), None): cnv_nonNegativeInteger, ((METANS, u'row-count'), None): cnv_nonNegativeInteger, ((METANS, u'sentence-count'), None): cnv_nonNegativeInteger, ((METANS, u'syllable-count'), None): cnv_nonNegativeInteger, ((METANS, u'table-count'), None): cnv_nonNegativeInteger, ((METANS, u'value-type'), None): cnv_metavaluetype, ((METANS, u'word-count'), None): cnv_nonNegativeInteger, ((NUMBERNS, u'automatic-order'), None): cnv_boolean, ((NUMBERNS, u'calendar'), None): cnv_string, ((NUMBERNS, u'country'), None): cnv_token, ((NUMBERNS, u'decimal-places'), None): cnv_integer, ((NUMBERNS, u'decimal-replacement'), None): cnv_string, ((NUMBERNS, u'denominator-value'), None): cnv_integer, ((NUMBERNS, u'display-factor'), None): cnv_double, ((NUMBERNS, u'format-source'), None): cnv_string, ((NUMBERNS, u'grouping'), None): cnv_boolean, ((NUMBERNS, u'language'), None): cnv_token, ((NUMBERNS, u'min-denominator-digits'), None): cnv_integer, ((NUMBERNS, u'min-exponent-digits'), None): cnv_integer, ((NUMBERNS, u'min-integer-digits'), None): cnv_integer, ((NUMBERNS, u'min-numerator-digits'), None): cnv_integer, ((NUMBERNS, u'position'), None): cnv_integer, ((NUMBERNS, u'possessive-form'), None): cnv_boolean, ((NUMBERNS, u'style'), None): cnv_string, ((NUMBERNS, u'textual'), None): cnv_boolean, ((NUMBERNS, u'title'), None): cnv_string, ((NUMBERNS, u'transliteration-country'), None): cnv_token, ((NUMBERNS, u'transliteration-format'), None): cnv_string, ((NUMBERNS, u'transliteration-language'), None): cnv_token, ((NUMBERNS, u'transliteration-style'), None): cnv_string, ((NUMBERNS, u'truncate-on-overflow'), None): cnv_boolean, ((OFFICENS, u'automatic-update'), None): cnv_boolean, ((OFFICENS, u'boolean-value'), None): cnv_boolean, ((OFFICENS, u'conversion-mode'), None): cnv_string, ((OFFICENS, u'currency'), None): cnv_string, ((OFFICENS, u'date-value'), None): cnv_dateTime, ((OFFICENS, u'dde-application'), None): cnv_string, ((OFFICENS, u'dde-item'), None): cnv_string, ((OFFICENS, u'dde-topic'), None): cnv_string, ((OFFICENS, u'display'), None): cnv_boolean, ((OFFICENS, u'mimetype'), None): cnv_string, ((OFFICENS, u'name'), None): cnv_string, ((OFFICENS, u'process-content'), None): cnv_boolean, ((OFFICENS, u'server-map'), None): cnv_boolean, ((OFFICENS, u'string-value'), None): cnv_string, ((OFFICENS, u'target-frame'), None): cnv_string, ((OFFICENS, u'target-frame-name'), None): cnv_string, ((OFFICENS, u'time-value'), None): cnv_duration, ((OFFICENS, u'title'), None): cnv_string, ((OFFICENS, u'value'), None): cnv_double, ((OFFICENS, u'value-type'), None): cnv_string, ((OFFICENS, u'version'), None): cnv_string, ((PRESENTATIONNS, u'action'), None): cnv_string, ((PRESENTATIONNS, u'animations'), None): cnv_string, ((PRESENTATIONNS, u'background-objects-visible'), None): cnv_boolean, ((PRESENTATIONNS, u'background-visible'), None): cnv_boolean, ((PRESENTATIONNS, u'class'), None): cnv_string, ((PRESENTATIONNS, u'class-names'), None): cnv_NCNames, ((PRESENTATIONNS, u'delay'), None): cnv_duration, ((PRESENTATIONNS, u'direction'), None): cnv_string, ((PRESENTATIONNS, u'display-date-time'), None): cnv_boolean, ((PRESENTATIONNS, u'display-footer'), None): cnv_boolean, ((PRESENTATIONNS, u'display-header'), None): cnv_boolean, ((PRESENTATIONNS, u'display-page-number'), None): cnv_boolean, ((PRESENTATIONNS, u'duration'), None): cnv_string, ((PRESENTATIONNS, u'effect'), None): cnv_string, ((PRESENTATIONNS, u'endless'), None): cnv_boolean, ((PRESENTATIONNS, u'force-manual'), None): cnv_boolean, ((PRESENTATIONNS, u'full-screen'), None): cnv_boolean, ((PRESENTATIONNS, u'group-id'), None): cnv_string, ((PRESENTATIONNS, u'master-element'), None): cnv_IDREF, ((PRESENTATIONNS, u'mouse-as-pen'), None): cnv_boolean, ((PRESENTATIONNS, u'mouse-visible'), None): cnv_boolean, ((PRESENTATIONNS, u'name'), None): cnv_string, ((PRESENTATIONNS, u'node-type'), None): cnv_string, ((PRESENTATIONNS, u'object'), None): cnv_string, ((PRESENTATIONNS, u'pages'), None): cnv_string, ((PRESENTATIONNS, u'path-id'), None): cnv_string, ((PRESENTATIONNS, u'pause'), None): cnv_duration, ((PRESENTATIONNS, u'placeholder'), None): cnv_boolean, ((PRESENTATIONNS, u'play-full'), None): cnv_boolean, ((PRESENTATIONNS, u'presentation-page-layout-name'), None): cnv_StyleNameRef, ((PRESENTATIONNS, u'preset-class'), None): cnv_string, ((PRESENTATIONNS, u'preset-id'), None): cnv_string, ((PRESENTATIONNS, u'preset-sub-type'), None): cnv_string, ((PRESENTATIONNS, u'show'), None): cnv_string, ((PRESENTATIONNS, u'show-end-of-presentation-slide'), None): cnv_boolean, ((PRESENTATIONNS, u'show-logo'), None): cnv_boolean, ((PRESENTATIONNS, u'source'), None): cnv_string, ((PRESENTATIONNS, u'speed'), None): cnv_string, ((PRESENTATIONNS, u'start-page'), None): cnv_string, ((PRESENTATIONNS, u'start-scale'), None): cnv_string, ((PRESENTATIONNS, u'start-with-navigator'), None): cnv_boolean, ((PRESENTATIONNS, u'stay-on-top'), None): cnv_boolean, ((PRESENTATIONNS, u'style-name'), None): cnv_StyleNameRef, ((PRESENTATIONNS, u'transition-on-click'), None): cnv_string, ((PRESENTATIONNS, u'transition-speed'), None): cnv_string, ((PRESENTATIONNS, u'transition-style'), None): cnv_string, ((PRESENTATIONNS, u'transition-type'), None): cnv_string, ((PRESENTATIONNS, u'use-date-time-name'), None): cnv_string, ((PRESENTATIONNS, u'use-footer-name'), None): cnv_string, ((PRESENTATIONNS, u'use-header-name'), None): cnv_string, ((PRESENTATIONNS, u'user-transformed'), None): cnv_boolean, ((PRESENTATIONNS, u'verb'), None): cnv_nonNegativeInteger, ((PRESENTATIONNS, u'visibility'), None): cnv_string, ((SCRIPTNS, u'event-name'), None): cnv_formula, ((SCRIPTNS, u'language'), None): cnv_formula, ((SCRIPTNS, u'macro-name'), None): cnv_string, ((SMILNS, u'accelerate'), None): cnv_double, ((SMILNS, u'accumulate'), None): cnv_string, ((SMILNS, u'additive'), None): cnv_string, ((SMILNS, u'attributeName'), None): cnv_string, ((SMILNS, u'autoReverse'), None): cnv_boolean, ((SMILNS, u'begin'), None): cnv_string, ((SMILNS, u'by'), None): cnv_string, ((SMILNS, u'calcMode'), None): cnv_string, ((SMILNS, u'decelerate'), None): cnv_double, ((SMILNS, u'direction'), None): cnv_string, ((SMILNS, u'dur'), None): cnv_string, ((SMILNS, u'end'), None): cnv_string, ((SMILNS, u'endsync'), None): cnv_string, ((SMILNS, u'fadeColor'), None): cnv_string, ((SMILNS, u'fill'), None): cnv_string, ((SMILNS, u'fillDefault'), None): cnv_string, ((SMILNS, u'from'), None): cnv_string, ((SMILNS, u'keySplines'), None): cnv_string, ((SMILNS, u'keyTimes'), None): cnv_string, ((SMILNS, u'mode'), None): cnv_string, ((SMILNS, u'repeatCount'), None): cnv_nonNegativeInteger, ((SMILNS, u'repeatDur'), None): cnv_string, ((SMILNS, u'restart'), None): cnv_string, ((SMILNS, u'restartDefault'), None): cnv_string, ((SMILNS, u'subtype'), None): cnv_string, ((SMILNS, u'targetElement'), None): cnv_IDREF, ((SMILNS, u'to'), None): cnv_string, ((SMILNS, u'type'), None): cnv_string, ((SMILNS, u'values'), None): cnv_string, ((STYLENS, u'adjustment'), None): cnv_string, ((STYLENS, u'apply-style-name'), None): cnv_StyleNameRef, ((STYLENS, u'auto-text-indent'), None): cnv_boolean, ((STYLENS, u'auto-update'), None): cnv_boolean, ((STYLENS, u'background-transparency'), None): cnv_string, ((STYLENS, u'base-cell-address'), None): cnv_string, ((STYLENS, u'border-line-width-bottom'), None): cnv_string, ((STYLENS, u'border-line-width'), None): cnv_string, ((STYLENS, u'border-line-width-left'), None): cnv_string, ((STYLENS, u'border-line-width-right'), None): cnv_string, ((STYLENS, u'border-line-width-top'), None): cnv_string, ((STYLENS, u'cell-protect'), None): cnv_string, ((STYLENS, u'char'), None): cnv_string, ((STYLENS, u'class'), None): cnv_string, ((STYLENS, u'color'), None): cnv_string, ((STYLENS, u'column-width'), None): cnv_string, ((STYLENS, u'condition'), None): cnv_string, ((STYLENS, u'country-asian'), None): cnv_string, ((STYLENS, u'country-complex'), None): cnv_string, ((STYLENS, u'data-style-name'), None): cnv_StyleNameRef, ((STYLENS, u'decimal-places'), None): cnv_string, ((STYLENS, u'default-outline-level'), None): cnv_positiveInteger, ((STYLENS, u'diagonal-bl-tr'), None): cnv_string, ((STYLENS, u'diagonal-bl-tr-widths'), None): cnv_string, ((STYLENS, u'diagonal-tl-br'), None): cnv_string, ((STYLENS, u'diagonal-tl-br-widths'), None): cnv_string, ((STYLENS, u'direction'), None): cnv_string, ((STYLENS, u'display'), None): cnv_boolean, ((STYLENS, u'display-name'), None): cnv_string, ((STYLENS, u'distance-after-sep'), None): cnv_length, ((STYLENS, u'distance-before-sep'), None): cnv_length, ((STYLENS, u'distance'), None): cnv_length, ((STYLENS, u'dynamic-spacing'), None): cnv_boolean, ((STYLENS, u'editable'), None): cnv_boolean, ((STYLENS, u'family'), None): cnv_family, ((STYLENS, u'filter-name'), None): cnv_string, ((STYLENS, u'first-page-number'), None): cnv_string, ((STYLENS, u'flow-with-text'), None): cnv_boolean, ((STYLENS, u'font-adornments'), None): cnv_string, ((STYLENS, u'font-charset'), None): cnv_string, ((STYLENS, u'font-charset-asian'), None): cnv_string, ((STYLENS, u'font-charset-complex'), None): cnv_string, ((STYLENS, u'font-family-asian'), None): cnv_string, ((STYLENS, u'font-family-complex'), None): cnv_string, ((STYLENS, u'font-family-generic-asian'), None): cnv_string, ((STYLENS, u'font-family-generic'), None): cnv_string, ((STYLENS, u'font-family-generic-complex'), None): cnv_string, ((STYLENS, u'font-independent-line-spacing'), None): cnv_boolean, ((STYLENS, u'font-name-asian'), None): cnv_string, ((STYLENS, u'font-name'), None): cnv_string, ((STYLENS, u'font-name-complex'), None): cnv_string, ((STYLENS, u'font-pitch-asian'), None): cnv_string, ((STYLENS, u'font-pitch'), None): cnv_string, ((STYLENS, u'font-pitch-complex'), None): cnv_string, ((STYLENS, u'font-relief'), None): cnv_string, ((STYLENS, u'font-size-asian'), None): cnv_string, ((STYLENS, u'font-size-complex'), None): cnv_string, ((STYLENS, u'font-size-rel-asian'), None): cnv_length, ((STYLENS, u'font-size-rel'), None): cnv_length, ((STYLENS, u'font-size-rel-complex'), None): cnv_length, ((STYLENS, u'font-style-asian'), None): cnv_string, ((STYLENS, u'font-style-complex'), None): cnv_string, ((STYLENS, u'font-style-name-asian'), None): cnv_string, ((STYLENS, u'font-style-name'), None): cnv_string, ((STYLENS, u'font-style-name-complex'), None): cnv_string, ((STYLENS, u'font-weight-asian'), None): cnv_string, ((STYLENS, u'font-weight-complex'), None): cnv_string, ((STYLENS, u'footnote-max-height'), None): cnv_length, ((STYLENS, u'glyph-orientation-vertical'), None): cnv_string, ((STYLENS, u'height'), None): cnv_string, ((STYLENS, u'horizontal-pos'), None): cnv_string, ((STYLENS, u'horizontal-rel'), None): cnv_string, ((STYLENS, u'justify-single-word'), None): cnv_boolean, ((STYLENS, u'language-asian'), None): cnv_string, ((STYLENS, u'language-complex'), None): cnv_string, ((STYLENS, u'layout-grid-base-height'), None): cnv_length, ((STYLENS, u'layout-grid-color'), None): cnv_string, ((STYLENS, u'layout-grid-display'), None): cnv_boolean, ((STYLENS, u'layout-grid-lines'), None): cnv_string, ((STYLENS, u'layout-grid-mode'), None): cnv_string, ((STYLENS, u'layout-grid-print'), None): cnv_boolean, ((STYLENS, u'layout-grid-ruby-below'), None): cnv_boolean, ((STYLENS, u'layout-grid-ruby-height'), None): cnv_length, ((STYLENS, u'leader-char'), None): cnv_string, ((STYLENS, u'leader-color'), None): cnv_string, ((STYLENS, u'leader-style'), None): cnv_string, ((STYLENS, u'leader-text'), None): cnv_string, ((STYLENS, u'leader-text-style'), None): cnv_StyleNameRef, ((STYLENS, u'leader-type'), None): cnv_string, ((STYLENS, u'leader-width'), None): cnv_string, ((STYLENS, u'legend-expansion-aspect-ratio'), None): cnv_double, ((STYLENS, u'legend-expansion'), None): cnv_string, ((STYLENS, u'length'), None): cnv_positiveInteger, ((STYLENS, u'letter-kerning'), None): cnv_boolean, ((STYLENS, u'line-break'), None): cnv_string, ((STYLENS, u'line-height-at-least'), None): cnv_string, ((STYLENS, u'line-spacing'), None): cnv_length, ((STYLENS, u'line-style'), None): cnv_string, ((STYLENS, u'lines'), None): cnv_positiveInteger, ((STYLENS, u'list-style-name'), None): cnv_StyleNameRef, ((STYLENS, u'master-page-name'), None): cnv_StyleNameRef, ((STYLENS, u'may-break-between-rows'), None): cnv_boolean, ((STYLENS, u'min-row-height'), None): cnv_string, ((STYLENS, u'mirror'), None): cnv_string, ((STYLENS, u'name'), None): cnv_NCName, ((STYLENS, u'name'), (STYLENS, u'font-face')): cnv_string, ((STYLENS, u'next-style-name'), None): cnv_StyleNameRef, ((STYLENS, u'num-format'), None): cnv_string, ((STYLENS, u'num-letter-sync'), None): cnv_boolean, ((STYLENS, u'num-prefix'), None): cnv_string, ((STYLENS, u'num-suffix'), None): cnv_string, ((STYLENS, u'number-wrapped-paragraphs'), None): cnv_string, ((STYLENS, u'overflow-behavior'), None): cnv_string, ((STYLENS, u'page-layout-name'), None): cnv_StyleNameRef, ((STYLENS, u'page-number'), None): cnv_string, ((STYLENS, u'page-usage'), None): cnv_string, ((STYLENS, u'paper-tray-name'), None): cnv_string, ((STYLENS, u'parent-style-name'), None): cnv_StyleNameRef, ((STYLENS, u'position'), (STYLENS, u'tab-stop')): cnv_length, ((STYLENS, u'position'), None): cnv_string, ((STYLENS, u'print'), None): cnv_string, ((STYLENS, u'print-content'), None): cnv_boolean, ((STYLENS, u'print-orientation'), None): cnv_string, ((STYLENS, u'print-page-order'), None): cnv_string, ((STYLENS, u'protect'), (STYLENS, u'section-properties')): cnv_boolean, ((STYLENS, u'protect'), (STYLENS, u'graphic-properties')): cnv_string, # ((STYLENS,u'protect'), None): cnv_boolean, ((STYLENS, u'punctuation-wrap'), None): cnv_string, ((STYLENS, u'register-true'), None): cnv_boolean, ((STYLENS, u'register-truth-ref-style-name'), None): cnv_string, ((STYLENS, u'rel-column-width'), None): cnv_string, ((STYLENS, u'rel-height'), None): cnv_string, ((STYLENS, u'rel-width'), None): cnv_string, ((STYLENS, u'repeat'), None): cnv_string, ((STYLENS, u'repeat-content'), None): cnv_boolean, ((STYLENS, u'rotation-align'), None): cnv_string, ((STYLENS, u'rotation-angle'), None): cnv_string, ((STYLENS, u'row-height'), None): cnv_string, ((STYLENS, u'ruby-align'), None): cnv_string, ((STYLENS, u'ruby-position'), None): cnv_string, ((STYLENS, u'run-through'), None): cnv_string, ((STYLENS, u'scale-to'), None): cnv_string, ((STYLENS, u'scale-to-pages'), None): cnv_string, ((STYLENS, u'script-type'), None): cnv_string, ((STYLENS, u'shadow'), None): cnv_string, ((STYLENS, u'shrink-to-fit'), None): cnv_boolean, ((STYLENS, u'snap-to-layout-grid'), None): cnv_boolean, ((STYLENS, u'style'), None): cnv_string, ((STYLENS, u'style-name'), None): cnv_StyleNameRef, ((STYLENS, u'tab-stop-distance'), None): cnv_string, ((STYLENS, u'table-centering'), None): cnv_string, ((STYLENS, u'text-align-source'), None): cnv_string, ((STYLENS, u'text-autospace'), None): cnv_string, ((STYLENS, u'text-blinking'), None): cnv_boolean, ((STYLENS, u'text-combine'), None): cnv_string, ((STYLENS, u'text-combine-end-char'), None): cnv_string, ((STYLENS, u'text-combine-start-char'), None): cnv_string, ((STYLENS, u'text-emphasize'), None): cnv_string, ((STYLENS, u'text-line-through-color'), None): cnv_string, ((STYLENS, u'text-line-through-mode'), None): cnv_string, ((STYLENS, u'text-line-through-style'), None): cnv_string, ((STYLENS, u'text-line-through-text'), None): cnv_string, ((STYLENS, u'text-line-through-text-style'), None): cnv_string, ((STYLENS, u'text-line-through-type'), None): cnv_string, ((STYLENS, u'text-line-through-width'), None): cnv_string, ((STYLENS, u'text-outline'), None): cnv_boolean, ((STYLENS, u'text-position'), None): cnv_string, ((STYLENS, u'text-rotation-angle'), None): cnv_string, ((STYLENS, u'text-rotation-scale'), None): cnv_string, ((STYLENS, u'text-scale'), None): cnv_string, ((STYLENS, u'text-underline-color'), None): cnv_string, ((STYLENS, u'text-underline-mode'), None): cnv_string, ((STYLENS, u'text-underline-style'), None): cnv_string, ((STYLENS, u'text-underline-type'), None): cnv_string, ((STYLENS, u'text-underline-width'), None): cnv_string, ((STYLENS, u'type'), None): cnv_string, ((STYLENS, u'use-optimal-column-width'), None): cnv_boolean, ((STYLENS, u'use-optimal-row-height'), None): cnv_boolean, ((STYLENS, u'use-window-font-color'), None): cnv_boolean, ((STYLENS, u'vertical-align'), None): cnv_string, ((STYLENS, u'vertical-pos'), None): cnv_string, ((STYLENS, u'vertical-rel'), None): cnv_string, ((STYLENS, u'volatile'), None): cnv_boolean, ((STYLENS, u'width'), None): cnv_string, ((STYLENS, u'wrap'), None): cnv_string, ((STYLENS, u'wrap-contour'), None): cnv_boolean, ((STYLENS, u'wrap-contour-mode'), None): cnv_string, ((STYLENS, u'wrap-dynamic-threshold'), None): cnv_length, ((STYLENS, u'writing-mode-automatic'), None): cnv_boolean, ((STYLENS, u'writing-mode'), None): cnv_string, ((SVGNS, u'accent-height'), None): cnv_integer, ((SVGNS, u'alphabetic'), None): cnv_integer, ((SVGNS, u'ascent'), None): cnv_integer, ((SVGNS, u'bbox'), None): cnv_string, ((SVGNS, u'cap-height'), None): cnv_integer, ((SVGNS, u'cx'), None): cnv_string, ((SVGNS, u'cy'), None): cnv_string, ((SVGNS, u'd'), None): cnv_string, ((SVGNS, u'descent'), None): cnv_integer, ((SVGNS, u'fill-rule'), None): cnv_string, ((SVGNS, u'font-family'), None): cnv_string, ((SVGNS, u'font-size'), None): cnv_string, ((SVGNS, u'font-stretch'), None): cnv_string, ((SVGNS, u'font-style'), None): cnv_string, ((SVGNS, u'font-variant'), None): cnv_string, ((SVGNS, u'font-weight'), None): cnv_string, ((SVGNS, u'fx'), None): cnv_string, ((SVGNS, u'fy'), None): cnv_string, ((SVGNS, u'gradientTransform'), None): cnv_string, ((SVGNS, u'gradientUnits'), None): cnv_string, ((SVGNS, u'hanging'), None): cnv_integer, ((SVGNS, u'height'), None): cnv_length, ((SVGNS, u'ideographic'), None): cnv_integer, ((SVGNS, u'mathematical'), None): cnv_integer, ((SVGNS, u'name'), None): cnv_string, ((SVGNS, u'offset'), None): cnv_string, ((SVGNS, u'origin'), None): cnv_string, ((SVGNS, u'overline-position'), None): cnv_integer, ((SVGNS, u'overline-thickness'), None): cnv_integer, ((SVGNS, u'panose-1'), None): cnv_string, ((SVGNS, u'path'), None): cnv_string, ((SVGNS, u'r'), None): cnv_length, ((SVGNS, u'rx'), None): cnv_length, ((SVGNS, u'ry'), None): cnv_length, ((SVGNS, u'slope'), None): cnv_integer, ((SVGNS, u'spreadMethod'), None): cnv_string, ((SVGNS, u'stemh'), None): cnv_integer, ((SVGNS, u'stemv'), None): cnv_integer, ((SVGNS, u'stop-color'), None): cnv_string, ((SVGNS, u'stop-opacity'), None): cnv_double, ((SVGNS, u'strikethrough-position'), None): cnv_integer, ((SVGNS, u'strikethrough-thickness'), None): cnv_integer, ((SVGNS, u'string'), None): cnv_string, ((SVGNS, u'stroke-color'), None): cnv_string, ((SVGNS, u'stroke-opacity'), None): cnv_string, ((SVGNS, u'stroke-width'), None): cnv_length, ((SVGNS, u'type'), None): cnv_string, ((SVGNS, u'underline-position'), None): cnv_integer, ((SVGNS, u'underline-thickness'), None): cnv_integer, ((SVGNS, u'unicode-range'), None): cnv_string, ((SVGNS, u'units-per-em'), None): cnv_integer, ((SVGNS, u'v-alphabetic'), None): cnv_integer, ((SVGNS, u'v-hanging'), None): cnv_integer, ((SVGNS, u'v-ideographic'), None): cnv_integer, ((SVGNS, u'v-mathematical'), None): cnv_integer, ((SVGNS, u'viewBox'), None): cnv_viewbox, ((SVGNS, u'width'), None): cnv_length, ((SVGNS, u'widths'), None): cnv_string, ((SVGNS, u'x'), None): cnv_length, ((SVGNS, u'x-height'), None): cnv_integer, ((SVGNS, u'x1'), None): cnv_lengthorpercent, ((SVGNS, u'x2'), None): cnv_lengthorpercent, ((SVGNS, u'y'), None): cnv_length, ((SVGNS, u'y1'), None): cnv_lengthorpercent, ((SVGNS, u'y2'), None): cnv_lengthorpercent, ((TABLENS, u'acceptance-state'), None): cnv_string, ((TABLENS, u'add-empty-lines'), None): cnv_boolean, ((TABLENS, u'algorithm'), None): cnv_formula, ((TABLENS, u'align'), None): cnv_string, ((TABLENS, u'allow-empty-cell'), None): cnv_boolean, ((TABLENS, u'application-data'), None): cnv_string, ((TABLENS, u'automatic-find-labels'), None): cnv_boolean, ((TABLENS, u'base-cell-address'), None): cnv_string, ((TABLENS, u'bind-styles-to-content'), None): cnv_boolean, ((TABLENS, u'border-color'), None): cnv_string, ((TABLENS, u'border-model'), None): cnv_string, ((TABLENS, u'buttons'), None): cnv_string, ((TABLENS, u'buttons'), None): cnv_string, ((TABLENS, u'case-sensitive'), None): cnv_boolean, ((TABLENS, u'case-sensitive'), None): cnv_string, ((TABLENS, u'cell-address'), None): cnv_string, ((TABLENS, u'cell-range-address'), None): cnv_string, ((TABLENS, u'cell-range-address'), None): cnv_string, ((TABLENS, u'cell-range'), None): cnv_string, ((TABLENS, u'column'), None): cnv_integer, ((TABLENS, u'comment'), None): cnv_string, ((TABLENS, u'condition'), None): cnv_formula, ((TABLENS, u'condition-source'), None): cnv_string, ((TABLENS, u'condition-source-range-address'), None): cnv_string, ((TABLENS, u'contains-error'), None): cnv_boolean, ((TABLENS, u'contains-header'), None): cnv_boolean, ((TABLENS, u'content-validation-name'), None): cnv_string, ((TABLENS, u'copy-back'), None): cnv_boolean, ((TABLENS, u'copy-formulas'), None): cnv_boolean, ((TABLENS, u'copy-styles'), None): cnv_boolean, ((TABLENS, u'count'), None): cnv_positiveInteger, ((TABLENS, u'country'), None): cnv_token, ((TABLENS, u'data-cell-range-address'), None): cnv_string, ((TABLENS, u'data-field'), None): cnv_string, ((TABLENS, u'data-type'), None): cnv_string, ((TABLENS, u'database-name'), None): cnv_string, ((TABLENS, u'database-table-name'), None): cnv_string, ((TABLENS, u'date-end'), None): cnv_string, ((TABLENS, u'date-start'), None): cnv_string, ((TABLENS, u'date-value'), None): cnv_date, ((TABLENS, u'default-cell-style-name'), None): cnv_StyleNameRef, ((TABLENS, u'direction'), None): cnv_string, ((TABLENS, u'display-border'), None): cnv_boolean, ((TABLENS, u'display'), None): cnv_boolean, ((TABLENS, u'display-duplicates'), None): cnv_boolean, ((TABLENS, u'display-filter-buttons'), None): cnv_boolean, ((TABLENS, u'display-list'), None): cnv_string, ((TABLENS, u'display-member-mode'), None): cnv_string, ((TABLENS, u'drill-down-on-double-click'), None): cnv_boolean, ((TABLENS, u'enabled'), None): cnv_boolean, ((TABLENS, u'end-cell-address'), None): cnv_string, ((TABLENS, u'end'), None): cnv_string, ((TABLENS, u'end-column'), None): cnv_integer, ((TABLENS, u'end-position'), None): cnv_integer, ((TABLENS, u'end-row'), None): cnv_integer, ((TABLENS, u'end-table'), None): cnv_integer, ((TABLENS, u'end-x'), None): cnv_length, ((TABLENS, u'end-y'), None): cnv_length, ((TABLENS, u'execute'), None): cnv_boolean, ((TABLENS, u'expression'), None): cnv_formula, ((TABLENS, u'field-name'), None): cnv_string, ((TABLENS, u'field-number'), None): cnv_nonNegativeInteger, ((TABLENS, u'field-number'), None): cnv_string, ((TABLENS, u'filter-name'), None): cnv_string, ((TABLENS, u'filter-options'), None): cnv_string, ((TABLENS, u'formula'), None): cnv_formula, ((TABLENS, u'function'), None): cnv_string, ((TABLENS, u'function'), None): cnv_string, ((TABLENS, u'grand-total'), None): cnv_string, ((TABLENS, u'group-by-field-number'), None): cnv_nonNegativeInteger, ((TABLENS, u'grouped-by'), None): cnv_string, ((TABLENS, u'has-persistent-data'), None): cnv_boolean, ((TABLENS, u'id'), None): cnv_string, ((TABLENS, u'identify-categories'), None): cnv_boolean, ((TABLENS, u'ignore-empty-rows'), None): cnv_boolean, ((TABLENS, u'index'), None): cnv_nonNegativeInteger, ((TABLENS, u'is-active'), None): cnv_boolean, ((TABLENS, u'is-data-layout-field'), None): cnv_string, ((TABLENS, u'is-selection'), None): cnv_boolean, ((TABLENS, u'is-sub-table'), None): cnv_boolean, ((TABLENS, u'label-cell-range-address'), None): cnv_string, ((TABLENS, u'language'), None): cnv_token, ((TABLENS, u'language'), None): cnv_token, ((TABLENS, u'last-column-spanned'), None): cnv_positiveInteger, ((TABLENS, u'last-row-spanned'), None): cnv_positiveInteger, ((TABLENS, u'layout-mode'), None): cnv_string, ((TABLENS, u'link-to-source-data'), None): cnv_boolean, ((TABLENS, u'marked-invalid'), None): cnv_boolean, ((TABLENS, u'matrix-covered'), None): cnv_boolean, ((TABLENS, u'maximum-difference'), None): cnv_double, ((TABLENS, u'member-count'), None): cnv_nonNegativeInteger, ((TABLENS, u'member-name'), None): cnv_string, ((TABLENS, u'member-type'), None): cnv_string, ((TABLENS, u'message-type'), None): cnv_string, ((TABLENS, u'mode'), None): cnv_string, ((TABLENS, u'multi-deletion-spanned'), None): cnv_integer, ((TABLENS, u'name'), None): cnv_string, ((TABLENS, u'name'), None): cnv_string, ((TABLENS, u'null-year'), None): cnv_positiveInteger, ((TABLENS, u'number-columns-repeated'), None): cnv_positiveInteger, ((TABLENS, u'number-columns-spanned'), None): cnv_positiveInteger, ((TABLENS, u'number-matrix-columns-spanned'), None): cnv_positiveInteger, ((TABLENS, u'number-matrix-rows-spanned'), None): cnv_positiveInteger, ((TABLENS, u'number-rows-repeated'), None): cnv_positiveInteger, ((TABLENS, u'number-rows-spanned'), None): cnv_positiveInteger, ((TABLENS, u'object-name'), None): cnv_string, ((TABLENS, u'on-update-keep-size'), None): cnv_boolean, ((TABLENS, u'on-update-keep-styles'), None): cnv_boolean, ((TABLENS, u'operator'), None): cnv_string, ((TABLENS, u'operator'), None): cnv_string, ((TABLENS, u'order'), None): cnv_string, ((TABLENS, u'orientation'), None): cnv_string, ((TABLENS, u'orientation'), None): cnv_string, ((TABLENS, u'page-breaks-on-group-change'), None): cnv_boolean, ((TABLENS, u'parse-sql-statement'), None): cnv_boolean, ((TABLENS, u'password'), None): cnv_string, ((TABLENS, u'position'), None): cnv_integer, ((TABLENS, u'precision-as-shown'), None): cnv_boolean, ((TABLENS, u'print'), None): cnv_boolean, ((TABLENS, u'print-ranges'), None): cnv_string, ((TABLENS, u'protect'), None): cnv_boolean, ((TABLENS, u'protected'), None): cnv_boolean, ((TABLENS, u'protection-key'), None): cnv_string, ((TABLENS, u'query-name'), None): cnv_string, ((TABLENS, u'range-usable-as'), None): cnv_string, ((TABLENS, u'refresh-delay'), None): cnv_boolean, ((TABLENS, u'refresh-delay'), None): cnv_duration, ((TABLENS, u'rejecting-change-id'), None): cnv_string, ((TABLENS, u'row'), None): cnv_integer, ((TABLENS, u'scenario-ranges'), None): cnv_string, ((TABLENS, u'search-criteria-must-apply-to-whole-cell'), None): cnv_boolean, ((TABLENS, u'selected-page'), None): cnv_string, ((TABLENS, u'show-details'), None): cnv_boolean, ((TABLENS, u'show-empty'), None): cnv_boolean, ((TABLENS, u'show-empty'), None): cnv_string, ((TABLENS, u'show-filter-button'), None): cnv_boolean, ((TABLENS, u'sort-mode'), None): cnv_string, ((TABLENS, u'source-cell-range-addresses'), None): cnv_string, ((TABLENS, u'source-cell-range-addresses'), None): cnv_string, ((TABLENS, u'source-field-name'), None): cnv_string, ((TABLENS, u'source-field-name'), None): cnv_string, ((TABLENS, u'source-name'), None): cnv_string, ((TABLENS, u'sql-statement'), None): cnv_string, ((TABLENS, u'start'), None): cnv_string, ((TABLENS, u'start-column'), None): cnv_integer, ((TABLENS, u'start-position'), None): cnv_integer, ((TABLENS, u'start-row'), None): cnv_integer, ((TABLENS, u'start-table'), None): cnv_integer, ((TABLENS, u'status'), None): cnv_string, ((TABLENS, u'step'), None): cnv_double, ((TABLENS, u'steps'), None): cnv_positiveInteger, ((TABLENS, u'structure-protected'), None): cnv_boolean, ((TABLENS, u'style-name'), None): cnv_StyleNameRef, ((TABLENS, u'table-background'), None): cnv_boolean, ((TABLENS, u'table'), None): cnv_integer, ((TABLENS, u'table-name'), None): cnv_string, ((TABLENS, u'target-cell-address'), None): cnv_string, ((TABLENS, u'target-cell-address'), None): cnv_string, ((TABLENS, u'target-range-address'), None): cnv_string, ((TABLENS, u'target-range-address'), None): cnv_string, ((TABLENS, u'title'), None): cnv_string, ((TABLENS, u'track-changes'), None): cnv_boolean, ((TABLENS, u'type'), None): cnv_string, ((TABLENS, u'use-labels'), None): cnv_string, ((TABLENS, u'use-regular-expressions'), None): cnv_boolean, ((TABLENS, u'used-hierarchy'), None): cnv_integer, ((TABLENS, u'user-name'), None): cnv_string, ((TABLENS, u'value'), None): cnv_string, ((TABLENS, u'value'), None): cnv_string, ((TABLENS, u'value-type'), None): cnv_string, ((TABLENS, u'visibility'), None): cnv_string, ((TEXTNS, u'active'), None): cnv_boolean, ((TEXTNS, u'address'), None): cnv_string, ((TEXTNS, u'alphabetical-separators'), None): cnv_boolean, ((TEXTNS, u'anchor-page-number'), None): cnv_positiveInteger, ((TEXTNS, u'anchor-type'), None): cnv_string, ((TEXTNS, u'animation'), None): cnv_string, ((TEXTNS, u'animation-delay'), None): cnv_string, ((TEXTNS, u'animation-direction'), None): cnv_string, ((TEXTNS, u'animation-repeat'), None): cnv_string, ((TEXTNS, u'animation-start-inside'), None): cnv_boolean, ((TEXTNS, u'animation-steps'), None): cnv_length, ((TEXTNS, u'animation-stop-inside'), None): cnv_boolean, ((TEXTNS, u'annote'), None): cnv_string, ((TEXTNS, u'author'), None): cnv_string, ((TEXTNS, u'bibliography-data-field'), None): cnv_string, ((TEXTNS, u'bibliography-type'), None): cnv_string, ((TEXTNS, u'booktitle'), None): cnv_string, ((TEXTNS, u'bullet-char'), None): cnv_string, ((TEXTNS, u'bullet-relative-size'), None): cnv_string, ((TEXTNS, u'c'), None): cnv_nonNegativeInteger, ((TEXTNS, u'capitalize-entries'), None): cnv_boolean, ((TEXTNS, u'caption-sequence-format'), None): cnv_string, ((TEXTNS, u'caption-sequence-name'), None): cnv_string, ((TEXTNS, u'change-id'), None): cnv_IDREF, ((TEXTNS, u'chapter'), None): cnv_string, ((TEXTNS, u'citation-body-style-name'), None): cnv_StyleNameRef, ((TEXTNS, u'citation-style-name'), None): cnv_StyleNameRef, ((TEXTNS, u'class-names'), None): cnv_NCNames, ((TEXTNS, u'column-name'), None): cnv_string, ((TEXTNS, u'combine-entries'), None): cnv_boolean, ((TEXTNS, u'combine-entries-with-dash'), None): cnv_boolean, ((TEXTNS, u'combine-entries-with-pp'), None): cnv_boolean, ((TEXTNS, u'comma-separated'), None): cnv_boolean, ((TEXTNS, u'cond-style-name'), None): cnv_StyleNameRef, ((TEXTNS, u'condition'), None): cnv_formula, ((TEXTNS, u'connection-name'), None): cnv_string, ((TEXTNS, u'consecutive-numbering'), None): cnv_boolean, ((TEXTNS, u'continue-numbering'), None): cnv_boolean, ((TEXTNS, u'copy-outline-levels'), None): cnv_boolean, ((TEXTNS, u'count-empty-lines'), None): cnv_boolean, ((TEXTNS, u'count-in-text-boxes'), None): cnv_boolean, ((TEXTNS, u'current-value'), None): cnv_boolean, ((TEXTNS, u'custom1'), None): cnv_string, ((TEXTNS, u'custom2'), None): cnv_string, ((TEXTNS, u'custom3'), None): cnv_string, ((TEXTNS, u'custom4'), None): cnv_string, ((TEXTNS, u'custom5'), None): cnv_string, ((TEXTNS, u'database-name'), None): cnv_string, ((TEXTNS, u'date-adjust'), None): cnv_duration, ((TEXTNS, u'date-value'), None): cnv_date, # ((TEXTNS,u'date-value'), None): cnv_dateTime, ((TEXTNS, u'default-style-name'), None): cnv_StyleNameRef, ((TEXTNS, u'description'), None): cnv_string, ((TEXTNS, u'display'), None): cnv_string, ((TEXTNS, u'display-levels'), None): cnv_positiveInteger, ((TEXTNS, u'display-outline-level'), None): cnv_nonNegativeInteger, ((TEXTNS, u'dont-balance-text-columns'), None): cnv_boolean, ((TEXTNS, u'duration'), None): cnv_duration, ((TEXTNS, u'edition'), None): cnv_string, ((TEXTNS, u'editor'), None): cnv_string, ((TEXTNS, u'filter-name'), None): cnv_string, ((TEXTNS, u'first-row-end-column'), None): cnv_string, ((TEXTNS, u'first-row-start-column'), None): cnv_string, ((TEXTNS, u'fixed'), None): cnv_boolean, ((TEXTNS, u'footnotes-position'), None): cnv_string, ((TEXTNS, u'formula'), None): cnv_formula, ((TEXTNS, u'global'), None): cnv_boolean, ((TEXTNS, u'howpublished'), None): cnv_string, ((TEXTNS, u'id'), None): cnv_ID, # ((TEXTNS,u'id'), None): cnv_string, ((TEXTNS, u'identifier'), None): cnv_string, ((TEXTNS, u'ignore-case'), None): cnv_boolean, ((TEXTNS, u'increment'), None): cnv_nonNegativeInteger, ((TEXTNS, u'index-name'), None): cnv_string, ((TEXTNS, u'index-scope'), None): cnv_string, ((TEXTNS, u'institution'), None): cnv_string, ((TEXTNS, u'is-hidden'), None): cnv_boolean, ((TEXTNS, u'is-list-header'), None): cnv_boolean, ((TEXTNS, u'isbn'), None): cnv_string, ((TEXTNS, u'issn'), None): cnv_string, ((TEXTNS, u'issn'), None): cnv_string, ((TEXTNS, u'journal'), None): cnv_string, ((TEXTNS, u'key'), None): cnv_string, ((TEXTNS, u'key1'), None): cnv_string, ((TEXTNS, u'key1-phonetic'), None): cnv_string, ((TEXTNS, u'key2'), None): cnv_string, ((TEXTNS, u'key2-phonetic'), None): cnv_string, ((TEXTNS, u'kind'), None): cnv_string, ((TEXTNS, u'label'), None): cnv_string, ((TEXTNS, u'last-row-end-column'), None): cnv_string, ((TEXTNS, u'last-row-start-column'), None): cnv_string, ((TEXTNS, u'level'), None): cnv_positiveInteger, ((TEXTNS, u'line-break'), None): cnv_boolean, ((TEXTNS, u'line-number'), None): cnv_string, ((TEXTNS, u'main-entry'), None): cnv_boolean, ((TEXTNS, u'main-entry-style-name'), None): cnv_StyleNameRef, ((TEXTNS, u'master-page-name'), None): cnv_StyleNameRef, ((TEXTNS, u'min-label-distance'), None): cnv_string, ((TEXTNS, u'min-label-width'), None): cnv_string, ((TEXTNS, u'month'), None): cnv_string, ((TEXTNS, u'name'), None): cnv_string, ((TEXTNS, u'note-class'), None): cnv_textnoteclass, ((TEXTNS, u'note'), None): cnv_string, ((TEXTNS, u'number'), None): cnv_string, ((TEXTNS, u'number-lines'), None): cnv_boolean, ((TEXTNS, u'number-position'), None): cnv_string, ((TEXTNS, u'numbered-entries'), None): cnv_boolean, ((TEXTNS, u'offset'), None): cnv_string, ((TEXTNS, u'organizations'), None): cnv_string, ((TEXTNS, u'outline-level'), None): cnv_string, ((TEXTNS, u'page-adjust'), None): cnv_integer, ((TEXTNS, u'pages'), None): cnv_string, ((TEXTNS, u'paragraph-style-name'), None): cnv_StyleNameRef, ((TEXTNS, u'placeholder-type'), None): cnv_string, ((TEXTNS, u'prefix'), None): cnv_string, ((TEXTNS, u'protected'), None): cnv_boolean, ((TEXTNS, u'protection-key'), None): cnv_string, ((TEXTNS, u'publisher'), None): cnv_string, ((TEXTNS, u'ref-name'), None): cnv_string, ((TEXTNS, u'reference-format'), None): cnv_string, ((TEXTNS, u'relative-tab-stop-position'), None): cnv_boolean, ((TEXTNS, u'report-type'), None): cnv_string, ((TEXTNS, u'restart-numbering'), None): cnv_boolean, ((TEXTNS, u'restart-on-page'), None): cnv_boolean, ((TEXTNS, u'row-number'), None): cnv_nonNegativeInteger, ((TEXTNS, u'school'), None): cnv_string, ((TEXTNS, u'section-name'), None): cnv_string, ((TEXTNS, u'select-page'), None): cnv_string, ((TEXTNS, u'separation-character'), None): cnv_string, ((TEXTNS, u'series'), None): cnv_string, ((TEXTNS, u'sort-algorithm'), None): cnv_string, ((TEXTNS, u'sort-ascending'), None): cnv_boolean, ((TEXTNS, u'sort-by-position'), None): cnv_boolean, ((TEXTNS, u'space-before'), None): cnv_string, ((TEXTNS, u'start-numbering-at'), None): cnv_string, ((TEXTNS, u'start-value'), None): cnv_nonNegativeInteger, ((TEXTNS, u'start-value'), None): cnv_positiveInteger, ((TEXTNS, u'string-value'), None): cnv_string, ((TEXTNS, u'string-value-if-false'), None): cnv_string, ((TEXTNS, u'string-value-if-true'), None): cnv_string, ((TEXTNS, u'string-value-phonetic'), None): cnv_string, ((TEXTNS, u'style-name'), None): cnv_StyleNameRef, ((TEXTNS, u'suffix'), None): cnv_string, ((TEXTNS, u'tab-ref'), None): cnv_nonNegativeInteger, ((TEXTNS, u'table-name'), None): cnv_string, ((TEXTNS, u'table-type'), None): cnv_string, ((TEXTNS, u'time-adjust'), None): cnv_duration, ((TEXTNS, u'time-value'), None): cnv_dateTime, ((TEXTNS, u'time-value'), None): cnv_time, ((TEXTNS, u'title'), None): cnv_string, ((TEXTNS, u'track-changes'), None): cnv_boolean, ((TEXTNS, u'url'), None): cnv_string, ((TEXTNS, u'use-caption'), None): cnv_boolean, ((TEXTNS, u'use-chart-objects'), None): cnv_boolean, ((TEXTNS, u'use-draw-objects'), None): cnv_boolean, ((TEXTNS, u'use-floating-frames'), None): cnv_boolean, ((TEXTNS, u'use-graphics'), None): cnv_boolean, ((TEXTNS, u'use-index-marks'), None): cnv_boolean, ((TEXTNS, u'use-index-source-styles'), None): cnv_boolean, ((TEXTNS, u'use-keys-as-entries'), None): cnv_boolean, ((TEXTNS, u'use-math-objects'), None): cnv_boolean, ((TEXTNS, u'use-objects'), None): cnv_boolean, ((TEXTNS, u'use-other-objects'), None): cnv_boolean, ((TEXTNS, u'use-outline-level'), None): cnv_boolean, ((TEXTNS, u'use-soft-page-breaks'), None): cnv_boolean, ((TEXTNS, u'use-spreadsheet-objects'), None): cnv_boolean, ((TEXTNS, u'use-tables'), None): cnv_boolean, ((TEXTNS, u'value'), None): cnv_nonNegativeInteger, ((TEXTNS, u'visited-style-name'), None): cnv_StyleNameRef, ((TEXTNS, u'volume'), None): cnv_string, ((TEXTNS, u'year'), None): cnv_string, ((XFORMSNS, u'bind'), None): cnv_string, ((XLINKNS, u'actuate'), None): cnv_string, ((XLINKNS, u'href'), None): cnv_anyURI, ((XLINKNS, u'show'), None): cnv_xlinkshow, ((XLINKNS, u'title'), None): cnv_string, ((XLINKNS, u'type'), None): cnv_string, } class AttrConverters: def convert(self, attribute, value, element): """ Based on the element, figures out how to check/convert the attribute value All values are converted to string """ conversion = attrconverters.get((attribute, element.qname), None) if conversion is not None: return conversion(attribute, value, element) else: conversion = attrconverters.get((attribute, None), None) if conversion is not None: return conversion(attribute, value, element) return unicode(value)
codeparrot/github-code-clean
# Orca # # Copyright 2004-2009 Sun Microsystems Inc. # Copyright 2010 Joanmarie Diggs # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., Franklin Street, Fifth Floor, # Boston MA 02110-1301 USA. """The default Script for presenting information to the user using both speech and Braille. This is based primarily on the de-facto standard implementation of the AT-SPI, which is the GAIL support for GTK.""" __id__ = "$Id$" __version__ = "$Revision$" __date__ = "$Date$" __copyright__ = "Copyright (c) 2004-2009 Sun Microsystems Inc." \ "Copyright (c) 2010 Joanmarie Diggs" __license__ = "LGPL" import time from gi.repository import Gtk, Gdk import pyatspi import orca.braille as braille import orca.chnames as chnames import orca.cmdnames as cmdnames import orca.debug as debug import orca.eventsynthesizer as eventsynthesizer import orca.find as find import orca.flat_review as flat_review import orca.guilabels as guilabels import orca.input_event as input_event import orca.keybindings as keybindings import orca.messages as messages import orca.orca as orca import orca.orca_gui_commandlist as commandlist import orca.orca_state as orca_state import orca.phonnames as phonnames import orca.sound_utils as sound_utils import orca.script as script import orca.settings as settings import orca.settings_manager as settings_manager import orca.speech as speech import orca.speechserver as speechserver import orca.mouse_review as mouse_review import orca.notification_messages as notification_messages _settingsManager = settings_manager.getManager() ######################################################################## # # # The Default script class. # # # ######################################################################## class Script(script.Script): EMBEDDED_OBJECT_CHARACTER = '\ufffc' NO_BREAK_SPACE_CHARACTER = '\u00a0' # generatorCache # DISPLAYED_LABEL = 'displayedLabel' DISPLAYED_TEXT = 'displayedText' KEY_BINDING = 'keyBinding' NESTING_LEVEL = 'nestingLevel' NODE_LEVEL = 'nodeLevel' REAL_ACTIVE_DESCENDANT = 'realActiveDescendant' def __init__(self, app): """Creates a new script for the given application. Arguments: - app: the application to create a script for. """ script.Script.__init__(self, app) self.flatReviewContext = None self.windowActivateTime = None self.targetCursorCell = None self.sound = sound_utils.SoundUtils() self.sound.createSimpePipeline() self.justEnteredFlatReviewMode = False self.digits = '0123456789' self.whitespace = ' \t\n\r\v\f' # Unicode currency symbols (populated by the # getUnicodeCurrencySymbols() routine). # self._unicodeCurrencySymbols = [] # Used to determine whether progress bar value changes presented. self.lastProgressBarTime = {} self.lastProgressBarValue = {} self.lastSelectedMenu = None # A dictionary of non-standardly-named text attributes and their # Atk equivalents. # self.attributeNamesDict = {} # Keep track of the last time we issued a mouse routing command # so that we can guess if a change resulted from our moving the # pointer. # self.lastMouseRoutingTime = None # The last location of the mouse, which we might want if routing # the pointer elsewhere. # self.oldMouseCoordinates = [0, 0] # Used to copy/append the current flat review contents to the # clipboard. # self.currentReviewContents = "" self._lastWord = "" self._lastWordCheckedForSpelling = "" self._inSayAll = False self._sayAllIsInterrupted = False self._sayAllContexts = [] if app: app.setCacheMask( pyatspi.cache.DEFAULT ^ pyatspi.cache.CHILDREN ^ pyatspi.cache.NAME) def setupInputEventHandlers(self): """Defines InputEventHandler fields for this script that can be called by the key and braille bindings.""" self.inputEventHandlers["routePointerToItemHandler"] = \ input_event.InputEventHandler( Script.routePointerToItem, cmdnames.ROUTE_POINTER_TO_ITEM) self.inputEventHandlers["leftClickReviewItemHandler"] = \ input_event.InputEventHandler( Script.leftClickReviewItem, cmdnames.LEFT_CLICK_REVIEW_ITEM) self.inputEventHandlers["rightClickReviewItemHandler"] = \ input_event.InputEventHandler( Script.rightClickReviewItem, cmdnames.RIGHT_CLICK_REVIEW_ITEM) self.inputEventHandlers["sayAllHandler"] = \ input_event.InputEventHandler( Script.sayAll, cmdnames.SAY_ALL) self.inputEventHandlers["whereAmIBasicHandler"] = \ input_event.InputEventHandler( Script.whereAmIBasic, cmdnames.WHERE_AM_I_BASIC) self.inputEventHandlers["whereAmIDetailedHandler"] = \ input_event.InputEventHandler( Script.whereAmIDetailed, cmdnames.WHERE_AM_I_DETAILED) self.inputEventHandlers["getTitleHandler"] = \ input_event.InputEventHandler( Script.presentTitle, cmdnames.PRESENT_TITLE) self.inputEventHandlers["getStatusBarHandler"] = \ input_event.InputEventHandler( Script.presentStatusBar, cmdnames.PRESENT_STATUS_BAR) self.inputEventHandlers["findHandler"] = \ input_event.InputEventHandler( orca.showFindGUI, cmdnames.SHOW_FIND_GUI) self.inputEventHandlers["findNextHandler"] = \ input_event.InputEventHandler( Script.findNext, cmdnames.FIND_NEXT) self.inputEventHandlers["findPreviousHandler"] = \ input_event.InputEventHandler( Script.findPrevious, cmdnames.FIND_PREVIOUS) self.inputEventHandlers["toggleFlatReviewModeHandler"] = \ input_event.InputEventHandler( Script.toggleFlatReviewMode, cmdnames.TOGGLE_FLAT_REVIEW) self.inputEventHandlers["reviewPreviousLineHandler"] = \ input_event.InputEventHandler( Script.reviewPreviousLine, cmdnames.REVIEW_PREVIOUS_LINE) self.inputEventHandlers["reviewHomeHandler"] = \ input_event.InputEventHandler( Script.reviewHome, cmdnames.REVIEW_HOME) self.inputEventHandlers["reviewCurrentLineHandler"] = \ input_event.InputEventHandler( Script.reviewCurrentLine, cmdnames.REVIEW_CURRENT_LINE) self.inputEventHandlers["reviewSpellCurrentLineHandler"] = \ input_event.InputEventHandler( Script.reviewSpellCurrentLine, cmdnames.REVIEW_SPELL_CURRENT_LINE) self.inputEventHandlers["reviewPhoneticCurrentLineHandler"] = \ input_event.InputEventHandler( Script.reviewPhoneticCurrentLine, cmdnames.REVIEW_PHONETIC_CURRENT_LINE) self.inputEventHandlers["reviewNextLineHandler"] = \ input_event.InputEventHandler( Script.reviewNextLine, cmdnames.REVIEW_NEXT_LINE) self.inputEventHandlers["reviewEndHandler"] = \ input_event.InputEventHandler( Script.reviewEnd, cmdnames.REVIEW_END) self.inputEventHandlers["reviewPreviousItemHandler"] = \ input_event.InputEventHandler( Script.reviewPreviousItem, cmdnames.REVIEW_PREVIOUS_ITEM) self.inputEventHandlers["reviewAboveHandler"] = \ input_event.InputEventHandler( Script.reviewAbove, cmdnames.REVIEW_ABOVE) self.inputEventHandlers["reviewCurrentItemHandler"] = \ input_event.InputEventHandler( Script.reviewCurrentItem, cmdnames.REVIEW_CURRENT_ITEM) self.inputEventHandlers["reviewSpellCurrentItemHandler"] = \ input_event.InputEventHandler( Script.reviewSpellCurrentItem, cmdnames.REVIEW_SPELL_CURRENT_ITEM) self.inputEventHandlers["reviewPhoneticCurrentItemHandler"] = \ input_event.InputEventHandler( Script.reviewPhoneticCurrentItem, cmdnames.REVIEW_PHONETIC_CURRENT_ITEM) self.inputEventHandlers["reviewNextItemHandler"] = \ input_event.InputEventHandler( Script.reviewNextItem, cmdnames.REVIEW_NEXT_ITEM) self.inputEventHandlers["reviewCurrentAccessibleHandler"] = \ input_event.InputEventHandler( Script.reviewCurrentAccessible, cmdnames.REVIEW_CURRENT_ACCESSIBLE) self.inputEventHandlers["reviewBelowHandler"] = \ input_event.InputEventHandler( Script.reviewBelow, cmdnames.REVIEW_BELOW) self.inputEventHandlers["reviewPreviousCharacterHandler"] = \ input_event.InputEventHandler( Script.reviewPreviousCharacter, cmdnames.REVIEW_PREVIOUS_CHARACTER) self.inputEventHandlers["reviewEndOfLineHandler"] = \ input_event.InputEventHandler( Script.reviewEndOfLine, cmdnames.REVIEW_END_OF_LINE) self.inputEventHandlers["reviewBottomLeftHandler"] = \ input_event.InputEventHandler( Script.reviewBottomLeft, cmdnames.REVIEW_BOTTOM_LEFT) self.inputEventHandlers["reviewCurrentCharacterHandler"] = \ input_event.InputEventHandler( Script.reviewCurrentCharacter, cmdnames.REVIEW_CURRENT_CHARACTER) self.inputEventHandlers["reviewSpellCurrentCharacterHandler"] = \ input_event.InputEventHandler( Script.reviewSpellCurrentCharacter, cmdnames.REVIEW_SPELL_CURRENT_CHARACTER) self.inputEventHandlers["reviewUnicodeCurrentCharacterHandler"] = \ input_event.InputEventHandler( Script.reviewUnicodeCurrentCharacter, cmdnames.REVIEW_UNICODE_CURRENT_CHARACTER) self.inputEventHandlers["reviewNextCharacterHandler"] = \ input_event.InputEventHandler( Script.reviewNextCharacter, cmdnames.REVIEW_NEXT_CHARACTER) self.inputEventHandlers["flatReviewCopyHandler"] = \ input_event.InputEventHandler( Script.flatReviewCopy, cmdnames.FLAT_REVIEW_COPY) self.inputEventHandlers["flatReviewAppendHandler"] = \ input_event.InputEventHandler( Script.flatReviewAppend, cmdnames.FLAT_REVIEW_APPEND) self.inputEventHandlers["toggleTableCellReadModeHandler"] = \ input_event.InputEventHandler( Script.toggleTableCellReadMode, cmdnames.TOGGLE_TABLE_CELL_READ_MODE) self.inputEventHandlers["readCharAttributesHandler"] = \ input_event.InputEventHandler( Script.readCharAttributes, cmdnames.READ_CHAR_ATTRIBUTES) self.inputEventHandlers["panBrailleLeftHandler"] = \ input_event.InputEventHandler( Script.panBrailleLeft, cmdnames.PAN_BRAILLE_LEFT, False) # Do not enable learn mode for this action self.inputEventHandlers["panBrailleRightHandler"] = \ input_event.InputEventHandler( Script.panBrailleRight, cmdnames.PAN_BRAILLE_RIGHT, False) # Do not enable learn mode for this action self.inputEventHandlers["goBrailleHomeHandler"] = \ input_event.InputEventHandler( Script.goBrailleHome, cmdnames.GO_BRAILLE_HOME) self.inputEventHandlers["contractedBrailleHandler"] = \ input_event.InputEventHandler( Script.setContractedBraille, cmdnames.SET_CONTRACTED_BRAILLE) self.inputEventHandlers["processRoutingKeyHandler"] = \ input_event.InputEventHandler( Script.processRoutingKey, cmdnames.PROCESS_ROUTING_KEY) self.inputEventHandlers["processBrailleCutBeginHandler"] = \ input_event.InputEventHandler( Script.processBrailleCutBegin, cmdnames.PROCESS_BRAILLE_CUT_BEGIN) self.inputEventHandlers["processBrailleCutLineHandler"] = \ input_event.InputEventHandler( Script.processBrailleCutLine, cmdnames.PROCESS_BRAILLE_CUT_LINE) self.inputEventHandlers["enterLearnModeHandler"] = \ input_event.InputEventHandler( Script.enterLearnMode, cmdnames.ENTER_LEARN_MODE) self.inputEventHandlers["decreaseSpeechRateHandler"] = \ input_event.InputEventHandler( speech.decreaseSpeechRate, cmdnames.DECREASE_SPEECH_RATE) self.inputEventHandlers["increaseSpeechRateHandler"] = \ input_event.InputEventHandler( speech.increaseSpeechRate, cmdnames.INCREASE_SPEECH_RATE) self.inputEventHandlers["decreaseSpeechPitchHandler"] = \ input_event.InputEventHandler( speech.decreaseSpeechPitch, cmdnames.DECREASE_SPEECH_PITCH) self.inputEventHandlers["increaseSpeechPitchHandler"] = \ input_event.InputEventHandler( speech.increaseSpeechPitch, cmdnames.INCREASE_SPEECH_PITCH) self.inputEventHandlers["decreaseSpeechVolumeHandler"] = \ input_event.InputEventHandler( speech.decreaseSpeechVolume, cmdnames.DECREASE_SPEECH_VOLUME) self.inputEventHandlers["increaseSpeechVolumeHandler"] = \ input_event.InputEventHandler( speech.increaseSpeechVolume, cmdnames.INCREASE_SPEECH_VOLUME) self.inputEventHandlers["shutdownHandler"] = \ input_event.InputEventHandler( orca.quitOrca, cmdnames.QUIT_ORCA) self.inputEventHandlers["preferencesSettingsHandler"] = \ input_event.InputEventHandler( orca.showPreferencesGUI, cmdnames.SHOW_PREFERENCES_GUI) self.inputEventHandlers["appPreferencesSettingsHandler"] = \ input_event.InputEventHandler( orca.showAppPreferencesGUI, cmdnames.SHOW_APP_PREFERENCES_GUI) self.inputEventHandlers["toggleSilenceSpeechHandler"] = \ input_event.InputEventHandler( Script.toggleSilenceSpeech, cmdnames.TOGGLE_SPEECH) self.inputEventHandlers["toggleSpeechVerbosityHandler"] = \ input_event.InputEventHandler( Script.toggleSpeechVerbosity, cmdnames.TOGGLE_SPEECH_VERBOSITY) self.inputEventHandlers[ \ "toggleSpeakingIndentationJustificationHandler"] = \ input_event.InputEventHandler( Script.toggleSpeakingIndentationJustification, cmdnames.TOGGLE_SPOKEN_INDENTATION_AND_JUSTIFICATION) self.inputEventHandlers["cycleSpeakingPunctuationLevelHandler"] = \ input_event.InputEventHandler( Script.cycleSpeakingPunctuationLevel, cmdnames.CYCLE_PUNCTUATION_LEVEL) self.inputEventHandlers["cycleSettingsProfileHandler"] = \ input_event.InputEventHandler( Script.cycleSettingsProfile, cmdnames.CYCLE_SETTINGS_PROFILE) self.inputEventHandlers["cycleCapitalizationStyleHandler"] = \ input_event.InputEventHandler( Script.cycleCapitalizationStyle, cmdnames.CYCLE_CAPITALIZATION_STYLE) self.inputEventHandlers["cycleKeyEchoHandler"] = \ input_event.InputEventHandler( Script.cycleKeyEcho, cmdnames.CYCLE_KEY_ECHO) self.inputEventHandlers["cycleDebugLevelHandler"] = \ input_event.InputEventHandler( Script.cycleDebugLevel, cmdnames.CYCLE_DEBUG_LEVEL) self.inputEventHandlers["goToPrevBookmark"] = \ input_event.InputEventHandler( Script.goToPrevBookmark, cmdnames.BOOKMARK_GO_TO_PREVIOUS) self.inputEventHandlers["goToBookmark"] = \ input_event.InputEventHandler( Script.goToBookmark, cmdnames.BOOKMARK_GO_TO) self.inputEventHandlers["goToNextBookmark"] = \ input_event.InputEventHandler( Script.goToNextBookmark, cmdnames.BOOKMARK_GO_TO_NEXT) self.inputEventHandlers["addBookmark"] = \ input_event.InputEventHandler( Script.addBookmark, cmdnames.BOOKMARK_ADD) self.inputEventHandlers["saveBookmarks"] = \ input_event.InputEventHandler( Script.saveBookmarks, cmdnames.BOOKMARK_SAVE) self.inputEventHandlers["toggleMouseReviewHandler"] = \ input_event.InputEventHandler( mouse_review.toggle, cmdnames.MOUSE_REVIEW_TOGGLE) self.inputEventHandlers["presentTimeHandler"] = \ input_event.InputEventHandler( Script.presentTime, cmdnames.PRESENT_CURRENT_TIME) self.inputEventHandlers["presentDateHandler"] = \ input_event.InputEventHandler( Script.presentDate, cmdnames.PRESENT_CURRENT_DATE) self.inputEventHandlers["bypassNextCommandHandler"] = \ input_event.InputEventHandler( Script.bypassNextCommand, cmdnames.BYPASS_NEXT_COMMAND) self.inputEventHandlers.update(notification_messages.inputEventHandlers) def getInputEventHandlerKey(self, inputEventHandler): """Returns the name of the key that contains an inputEventHadler passed as argument """ for keyName, handler in list(self.inputEventHandlers.items()): if handler == inputEventHandler: return keyName return None def getListeners(self): """Sets up the AT-SPI event listeners for this script. """ listeners = script.Script.getListeners(self) listeners["focus:"] = \ self.onFocus #listeners["keyboard:modifiers"] = \ # self.noOp listeners["document:reload"] = \ self.onDocumentReload listeners["document:load-complete"] = \ self.onDocumentLoadComplete listeners["document:load-stopped"] = \ self.onDocumentLoadStopped listeners["mouse:button"] = \ self.onMouseButton listeners["object:property-change:accessible-name"] = \ self.onNameChanged listeners["object:text-caret-moved"] = \ self.onCaretMoved listeners["object:text-changed:delete"] = \ self.onTextDeleted listeners["object:text-changed:insert"] = \ self.onTextInserted listeners["object:active-descendant-changed"] = \ self.onActiveDescendantChanged listeners["object:children-changed"] = \ self.onChildrenChanged listeners["object:state-changed:active"] = \ self.onActiveChanged listeners["object:state-changed:busy"] = \ self.onBusyChanged listeners["object:state-changed:focused"] = \ self.onFocusedChanged listeners["object:state-changed:showing"] = \ self.onShowingChanged listeners["object:state-changed:checked"] = \ self.onCheckedChanged listeners["object:state-changed:pressed"] = \ self.onPressedChanged listeners["object:state-changed:indeterminate"] = \ self.onIndeterminateChanged listeners["object:state-changed:expanded"] = \ self.onExpandedChanged listeners["object:state-changed:selected"] = \ self.onSelectedChanged listeners["object:state-changed:sensitive"] = \ self.onSensitiveChanged listeners["object:text-attributes-changed"] = \ self.onTextAttributesChanged listeners["object:text-selection-changed"] = \ self.onTextSelectionChanged listeners["object:selection-changed"] = \ self.onSelectionChanged listeners["object:property-change:accessible-value"] = \ self.onValueChanged listeners["object:value-changed"] = \ self.onValueChanged listeners["object:column-reordered"] = \ self.onColumnReordered listeners["object:row-reordered"] = \ self.onRowReordered listeners["window:activate"] = \ self.onWindowActivated listeners["window:deactivate"] = \ self.onWindowDeactivated listeners["window:create"] = \ self.onWindowCreated return listeners def __getDesktopBindings(self): """Returns an instance of keybindings.KeyBindings that use the numeric keypad for focus tracking and flat review. """ import orca.desktop_keyboardmap as desktop_keyboardmap keyBindings = keybindings.KeyBindings() keyBindings.load(desktop_keyboardmap.keymap, self.inputEventHandlers) return keyBindings def __getLaptopBindings(self): """Returns an instance of keybindings.KeyBindings that use the the main keyboard keys for focus tracking and flat review. """ import orca.laptop_keyboardmap as laptop_keyboardmap keyBindings = keybindings.KeyBindings() keyBindings.load(laptop_keyboardmap.keymap, self.inputEventHandlers) return keyBindings def getKeyBindings(self): """Defines the key bindings for this script. Returns an instance of keybindings.KeyBindings. """ keyBindings = script.Script.getKeyBindings(self) bindings = self.getDefaultKeyBindings() for keyBinding in bindings.keyBindings: keyBindings.add(keyBinding) bindings = self.getToolkitKeyBindings() for keyBinding in bindings.keyBindings: keyBindings.add(keyBinding) bindings = self.getAppKeyBindings() for keyBinding in bindings.keyBindings: keyBindings.add(keyBinding) try: keyBindings = _settingsManager.overrideKeyBindings(self, keyBindings) except: debug.println(debug.LEVEL_WARNING, "WARNING: problem overriding keybindings:") debug.printException(debug.LEVEL_WARNING) return keyBindings def getDefaultKeyBindings(self): """Returns the default script's keybindings, i.e. without any of the toolkit or application specific commands added.""" keyBindings = keybindings.KeyBindings() layout = _settingsManager.getSetting('keyboardLayout') if layout == settings.GENERAL_KEYBOARD_LAYOUT_DESKTOP: for keyBinding in self.__getDesktopBindings().keyBindings: keyBindings.add(keyBinding) else: for keyBinding in self.__getLaptopBindings().keyBindings: keyBindings.add(keyBinding) import orca.common_keyboardmap as common_keyboardmap keyBindings.load(common_keyboardmap.keymap, self.inputEventHandlers) return keyBindings def getBrailleBindings(self): """Defines the braille bindings for this script. Returns a dictionary where the keys are BrlTTY commands and the values are InputEventHandler instances. """ brailleBindings = script.Script.getBrailleBindings(self) try: brailleBindings[braille.brlapi.KEY_CMD_FWINLT] = \ self.inputEventHandlers["panBrailleLeftHandler"] brailleBindings[braille.brlapi.KEY_CMD_FWINRT] = \ self.inputEventHandlers["panBrailleRightHandler"] brailleBindings[braille.brlapi.KEY_CMD_LNUP] = \ self.inputEventHandlers["reviewAboveHandler"] brailleBindings[braille.brlapi.KEY_CMD_LNDN] = \ self.inputEventHandlers["reviewBelowHandler"] brailleBindings[braille.brlapi.KEY_CMD_FREEZE] = \ self.inputEventHandlers["toggleFlatReviewModeHandler"] brailleBindings[braille.brlapi.KEY_CMD_TOP_LEFT] = \ self.inputEventHandlers["reviewHomeHandler"] brailleBindings[braille.brlapi.KEY_CMD_BOT_LEFT] = \ self.inputEventHandlers["reviewBottomLeftHandler"] brailleBindings[braille.brlapi.KEY_CMD_HOME] = \ self.inputEventHandlers["goBrailleHomeHandler"] brailleBindings[braille.brlapi.KEY_CMD_SIXDOTS] = \ self.inputEventHandlers["contractedBrailleHandler"] brailleBindings[braille.brlapi.KEY_CMD_ROUTE] = \ self.inputEventHandlers["processRoutingKeyHandler"] brailleBindings[braille.brlapi.KEY_CMD_CUTBEGIN] = \ self.inputEventHandlers["processBrailleCutBeginHandler"] brailleBindings[braille.brlapi.KEY_CMD_CUTLINE] = \ self.inputEventHandlers["processBrailleCutLineHandler"] except AttributeError: debug.println(debug.LEVEL_CONFIGURATION, "WARNING: braille bindings unavailable:") except: debug.println(debug.LEVEL_CONFIGURATION, "WARNING: braille bindings unavailable:") debug.printException(debug.LEVEL_CONFIGURATION) return brailleBindings def deactivate(self): """Called when this script is deactivated.""" self._inSayAll = False self._sayAllIsInterrupted = False self.pointOfReference = {} def processKeyboardEvent(self, keyboardEvent): """Processes the given keyboard event. It uses the super class equivalent to do most of the work. The only thing done here is to detect when the user is trying to get out of learn mode. Arguments: - keyboardEvent: an instance of input_event.KeyboardEvent """ return script.Script.processKeyboardEvent(self, keyboardEvent) def _saveFocusedObjectInfo(self, obj): """Saves some basic information about obj. Note that this method is intended to be called primarily (if not only) by locusOfFocusChanged(). It is expected that accessible event callbacks will update the point of reference data specific to that event. The goal here is to weed out duplicate events.""" if not obj: return try: role = obj.getRole() state = obj.getState() name = obj.name except: return # We want to save the name because some apps and toolkits emit name # changes after the focus or selection has changed, even though the # name has not. names = self.pointOfReference.get('names', {}) names[hash(obj)] = name self.pointOfReference['names'] = names # We want to save the offset for text objects because some apps and # toolkits emit caret-moved events immediately after a text object # gains focus, even though the caret has not actually moved. try: text = obj.queryText() except: pass else: self._saveLastCursorPosition(obj, max(0, text.caretOffset)) textSelections = self.pointOfReference.get('textSelections', {}) textSelections[hash(obj)] = text.getSelection(0) self.pointOfReference['textSelections'] = textSelections # We want to save the current row and column of a newly focused # or selected table cell so that on subsequent cell focus/selection # we only present the changed location. if role == pyatspi.ROLE_TABLE_CELL: try: table = obj.parent.queryTable() except: pass else: index = self.utilities.cellIndex(obj) column = table.getColumnAtIndex(index) row = table.getRowAtIndex(index) self.pointOfReference['lastColumn'] = column self.pointOfReference['lastRow'] = row else: self.pointOfReference['lastColumn'] = -1 self.pointOfReference['lastRow'] = -1 self.pointOfReference['checkedChange'] = \ hash(obj), state.contains(pyatspi.STATE_CHECKED) def locusOfFocusChanged(self, event, oldLocusOfFocus, newLocusOfFocus): """Called when the visual object with focus changes. Arguments: - event: if not None, the Event that caused the change - oldLocusOfFocus: Accessible that is the old locus of focus - newLocusOfFocus: Accessible that is the new locus of focus """ if not newLocusOfFocus: orca_state.noFocusTimeStamp = time.time() return if newLocusOfFocus.getState().contains(pyatspi.STATE_DEFUNCT): return if self.utilities.isSameObject(oldLocusOfFocus, newLocusOfFocus): return try: if self.findCommandRun: # Then the Orca Find dialog has just given up focus # to the original window. We don't want to speak # the window title, current line, etc. return except: pass if self.flatReviewContext: self.toggleFlatReviewMode() self.updateBraille(newLocusOfFocus) shouldNotInterrupt = \ self.windowActivateTime and time.time() - self.windowActivateTime < 1 # [[[TODO: WDW - this should move to the generator.]]] if newLocusOfFocus.getRole() == pyatspi.ROLE_LINK: voice = self.voices[settings.HYPERLINK_VOICE] else: voice = self.voices[settings.DEFAULT_VOICE] utterances = self.speechGenerator.generateSpeech( newLocusOfFocus, priorObj=oldLocusOfFocus) speech.speak(utterances, voice, not shouldNotInterrupt) self._saveFocusedObjectInfo(newLocusOfFocus) def activate(self): """Called when this script is activated.""" _settingsManager.loadAppSettings(self) braille.setupKeyRanges(list(self.brailleBindings.keys())) speech.updatePunctuationLevel() def updateBraille(self, obj, extraRegion=None): """Updates the braille display to show the give object. Arguments: - obj: the Accessible - extra: extra Region to add to the end """ if not _settingsManager.getSetting('enableBraille') \ and not _settingsManager.getSetting('enableBrailleMonitor'): debug.println(debug.LEVEL_INFO, "BRAILLE: update disabled") return if not obj: return self.clearBraille() line = self.getNewBrailleLine() braille.addLine(line) result = self.brailleGenerator.generateBraille(obj) self.addBrailleRegionsToLine(result[0], line) if extraRegion: self.addBrailleRegionToLine(extraRegion, line) if extraRegion: self.setBrailleFocus(extraRegion) else: self.setBrailleFocus(result[1]) self.refreshBraille(True) ######################################################################## # # # INPUT EVENT HANDLERS (AKA ORCA COMMANDS) # # # ######################################################################## def bypassNextCommand(self, inputEvent=None): """Causes the next keyboard command to be ignored by Orca and passed along to the current application. Returns True to indicate the input event has been consumed. """ self.presentMessage(messages.BYPASS_MODE_ENABLED) orca_state.bypassNextCommand = True return True def enterLearnMode(self, inputEvent=None): """Turns learn mode on. The user must press the escape key to exit learn mode. Returns True to indicate the input event has been consumed. """ if orca_state.learnModeEnabled: return True self.presentMessage(messages.VERSION) self.speakMessage(messages.LEARN_MODE_START_SPEECH) self.displayBrailleMessage(messages.LEARN_MODE_START_BRAILLE) orca_state.learnModeEnabled = True return True def exitLearnMode(self, inputEvent=None): """Turns learn mode off. Returns True to indicate the input event has been consumed. """ if not orca_state.learnModeEnabled: return False if isinstance(inputEvent, input_event.KeyboardEvent) \ and not inputEvent.event_string == 'Escape': return False self.presentMessage(messages.LEARN_MODE_STOP) orca_state.learnModeEnabled = False return True def listOrcaShortcuts(self, inputEvent=None): """Shows a simple gui listing Orca's bound commands.""" if not inputEvent or inputEvent.event_string == "F2": bound = self.getDefaultKeyBindings().getBoundBindings() title = messages.shortcutsFoundOrca(len(bound)) else: try: appName = self.app.name except AttributeError: appName = messages.APPLICATION_NO_NAME bound = self.getAppKeyBindings().getBoundBindings() bound.extend(self.getToolkitKeyBindings().getBoundBindings()) title = messages.shortcutsFoundApp(len(bound), appName) if not bound: self.presentMessage(title) return True self.exitLearnMode() rows = [(kb.handler.function, kb.handler.description, kb.asString()) for kb in bound] sorted(rows, key=lambda cmd: cmd[2]) header1 = guilabels.KB_HEADER_FUNCTION header2 = guilabels.KB_HEADER_KEY_BINDING commandlist.showUI(title, ("", header1, header2), rows, False) return True def findNext(self, inputEvent): """Searches forward for the next instance of the string searched for via the Orca Find dialog. Other than direction and the starting point, the search options initially specified (case sensitivity, window wrap, and full/partial match) are preserved. """ lastQuery = find.getLastQuery() if lastQuery: lastQuery.searchBackwards = False lastQuery.startAtTop = False self.find(lastQuery) else: orca.showFindGUI() def findPrevious(self, inputEvent): """Searches backwards for the next instance of the string searched for via the Orca Find dialog. Other than direction and the starting point, the search options initially specified (case sensitivity, window wrap, and full/or partial match) are preserved. """ lastQuery = find.getLastQuery() if lastQuery: lastQuery.searchBackwards = True lastQuery.startAtTop = False self.find(lastQuery) else: orca.showFindGUI() def addBookmark(self, inputEvent): """ Add an in-page accessible object bookmark for this key. Delegates to Bookmark.addBookmark """ bookmarks = self.getBookmarks() bookmarks.addBookmark(inputEvent) def goToBookmark(self, inputEvent): """ Go to the bookmark indexed by inputEvent.hw_code. Delegates to Bookmark.goToBookmark """ bookmarks = self.getBookmarks() bookmarks.goToBookmark(inputEvent) def goToNextBookmark(self, inputEvent): """ Go to the next bookmark location. If no bookmark has yet to be selected, the first bookmark will be used. Delegates to Bookmark.goToNextBookmark """ bookmarks = self.getBookmarks() bookmarks.goToNextBookmark(inputEvent) def goToPrevBookmark(self, inputEvent): """ Go to the previous bookmark location. If no bookmark has yet to be selected, the first bookmark will be used. Delegates to Bookmark.goToPrevBookmark """ bookmarks = self.getBookmarks() bookmarks.goToPrevBookmark(inputEvent) def saveBookmarks(self, inputEvent): """ Save the bookmarks for this script. Delegates to Bookmark.saveBookmarks """ bookmarks = self.getBookmarks() bookmarks.saveBookmarks(inputEvent) def panBrailleLeft(self, inputEvent=None, panAmount=0): """Pans the braille display to the left. If panAmount is non-zero, the display is panned by that many cells. If it is 0, the display is panned one full display width. In flat review mode, panning beyond the beginning will take you to the end of the previous line. In focus tracking mode, the cursor stays at its logical position. In flat review mode, the review cursor moves to character associated with cell 0.""" if self.flatReviewContext: if self.isBrailleBeginningShowing(): self.flatReviewContext.goBegin(flat_review.Context.LINE) self.reviewPreviousCharacter(inputEvent) else: self.panBrailleInDirection(panAmount, panToLeft=True) # This will update our target cursor cell # self._setFlatReviewContextToBeginningOfBrailleDisplay() [charString, x, y, width, height] = \ self.flatReviewContext.getCurrent(flat_review.Context.CHAR) self.targetCursorCell = 1 self.updateBrailleReview(self.targetCursorCell) elif self.isBrailleBeginningShowing() and orca_state.locusOfFocus \ and self.utilities.isTextArea(orca_state.locusOfFocus): # If we're at the beginning of a line of a multiline text # area, then force it's caret to the end of the previous # line. The assumption here is that we're currently # viewing the line that has the caret -- which is a pretty # good assumption for focus tacking mode. When we set the # caret position, we will get a caret event, which will # then update the braille. # text = orca_state.locusOfFocus.queryText() [lineString, startOffset, endOffset] = text.getTextAtOffset( text.caretOffset, pyatspi.TEXT_BOUNDARY_LINE_START) movedCaret = False if startOffset > 0: movedCaret = text.setCaretOffset(startOffset - 1) # If we didn't move the caret and we're in a terminal, we # jump into flat review to review the text. See # http://bugzilla.gnome.org/show_bug.cgi?id=482294. # if (not movedCaret) \ and (orca_state.locusOfFocus.getRole() \ == pyatspi.ROLE_TERMINAL): context = self.getFlatReviewContext() context.goBegin(flat_review.Context.LINE) self.reviewPreviousCharacter(inputEvent) else: self.panBrailleInDirection(panAmount, panToLeft=True) # We might be panning through a flashed message. # braille.resetFlashTimer() self.refreshBraille(False, stopFlash=False) return True def panBrailleLeftOneChar(self, inputEvent=None): """Nudges the braille display one character to the left. In focus tracking mode, the cursor stays at its logical position. In flat review mode, the review cursor moves to character associated with cell 0.""" self.panBrailleLeft(inputEvent, 1) def panBrailleRight(self, inputEvent=None, panAmount=0): """Pans the braille display to the right. If panAmount is non-zero, the display is panned by that many cells. If it is 0, the display is panned one full display width. In flat review mode, panning beyond the end will take you to the begininng of the next line. In focus tracking mode, the cursor stays at its logical position. In flat review mode, the review cursor moves to character associated with cell 0.""" if self.flatReviewContext: if self.isBrailleEndShowing(): self.flatReviewContext.goEnd(flat_review.Context.LINE) self.reviewNextCharacter(inputEvent) else: self.panBrailleInDirection(panAmount, panToLeft=False) # This will update our target cursor cell # self._setFlatReviewContextToBeginningOfBrailleDisplay() [charString, x, y, width, height] = \ self.flatReviewContext.getCurrent(flat_review.Context.CHAR) self.targetCursorCell = 1 self.updateBrailleReview(self.targetCursorCell) elif self.isBrailleEndShowing() and orca_state.locusOfFocus \ and self.utilities.isTextArea(orca_state.locusOfFocus): # If we're at the end of a line of a multiline text area, then # force it's caret to the beginning of the next line. The # assumption here is that we're currently viewing the line that # has the caret -- which is a pretty good assumption for focus # tacking mode. When we set the caret position, we will get a # caret event, which will then update the braille. # text = orca_state.locusOfFocus.queryText() [lineString, startOffset, endOffset] = text.getTextAtOffset( text.caretOffset, pyatspi.TEXT_BOUNDARY_LINE_START) if endOffset < text.characterCount: text.setCaretOffset(endOffset) else: self.panBrailleInDirection(panAmount, panToLeft=False) # We might be panning through a flashed message. # braille.resetFlashTimer() self.refreshBraille(False, stopFlash=False) return True def panBrailleRightOneChar(self, inputEvent=None): """Nudges the braille display one character to the right. In focus tracking mode, the cursor stays at its logical position. In flat review mode, the review cursor moves to character associated with cell 0.""" self.panBrailleRight(inputEvent, 1) def goBrailleHome(self, inputEvent=None): """Returns to the component with focus.""" if self.flatReviewContext: return self.toggleFlatReviewMode(inputEvent) else: return braille.returnToRegionWithFocus(inputEvent) def setContractedBraille(self, inputEvent=None): """Toggles contracted braille.""" self._setContractedBraille(inputEvent) return True def processRoutingKey(self, inputEvent=None): """Processes a cursor routing key.""" braille.processRoutingKey(inputEvent) return True def processBrailleCutBegin(self, inputEvent=None): """Clears the selection and moves the caret offset in the currently active text area. """ obj, caretOffset = self.getBrailleCaretContext(inputEvent) if caretOffset >= 0: self.utilities.clearTextSelection(obj) self.utilities.setCaretOffset(obj, caretOffset) return True def processBrailleCutLine(self, inputEvent=None): """Extends the text selection in the currently active text area and also copies the selected text to the system clipboard.""" obj, caretOffset = self.getBrailleCaretContext(inputEvent) if caretOffset >= 0: self.utilities.adjustTextSelection(obj, caretOffset) texti = obj.queryText() startOffset, endOffset = texti.getSelection(0) string = texti.getText(startOffset, endOffset) clipboard = Gtk.Clipboard.get(Gdk.Atom.intern("CLIPBOARD", False)) clipboard.set_text(string, len(string)) return True def routePointerToItem(self, inputEvent=None): """Moves the mouse pointer to the current item.""" # Store the original location for scripts which want to restore # it later. # self.oldMouseCoordinates = self.utilities.absoluteMouseCoordinates() self.lastMouseRoutingTime = time.time() if self.flatReviewContext: self.flatReviewContext.routeToCurrent() else: try: eventsynthesizer.routeToCharacter(orca_state.locusOfFocus) except: try: eventsynthesizer.routeToObject(orca_state.locusOfFocus) except: full = messages.LOCATION_NOT_FOUND_FULL brief = messages.LOCATION_NOT_FOUND_BRIEF self.presentMessage(full, brief) return True def presentStatusBar(self, inputEvent): """Speaks and brailles the contents of the status bar and/or default button of the window with focus. """ obj = orca_state.locusOfFocus self.updateBraille(obj) voice = self.voices[settings.DEFAULT_VOICE] frame, dialog = self.utilities.frameAndDialog(obj) if frame: # In windows with lots of objects (Thunderbird, Firefox, etc.) # If we wait until we've checked for both the status bar and # a default button, there may be a noticable delay. Therefore, # speak the status bar info immediately and then go looking # for a default button. # msg = self.speechGenerator.generateStatusBar(frame) if msg: self.presentMessage(msg, voice=voice) window = dialog or frame if window: msg = self.speechGenerator.generateDefaultButton(window) if msg: self.presentMessage(msg, voice=voice) def presentTitle(self, inputEvent): """Speaks and brailles the title of the window with focus.""" title = self.speechGenerator.generateTitle(orca_state.locusOfFocus) for (string, voice) in title: self.presentMessage(string, voice=voice) def readCharAttributes(self, inputEvent=None): """Reads the attributes associated with the current text character. Calls outCharAttributes to speak a list of attributes. By default, a certain set of attributes will be spoken. If this is not desired, then individual application scripts should override this method to only speak the subset required. """ attrs, start, end = self.utilities.textAttributes(orca_state.locusOfFocus, None, True) # Get a dictionary of text attributes that the user cares about. [userAttrList, userAttrDict] = self.utilities.stringToKeysAndDict( _settingsManager.getSetting('enabledSpokenTextAttributes')) # Because some implementors make up their own attribute names, # we need to convert. userAttrList = list(map(self.utilities.getAppNameForAttribute, userAttrList)) nullValues = ['0', '0mm', 'none', 'false'] for key in userAttrList: value = attrs.get(key) ignoreIfValue = userAttrDict.get(key) if value in nullValues and ignoreIfValue in nullValues: continue if value and value != ignoreIfValue: self.speakMessage(self.utilities.localizeTextAttribute(key, value)) return True def leftClickReviewItem(self, inputEvent=None): """Performs a left mouse button click on the current item.""" if self.flatReviewContext: self.flatReviewContext.clickCurrent(1) else: try: eventsynthesizer.clickCharacter(orca_state.locusOfFocus, 1) except: try: eventsynthesizer.clickObject(orca_state.locusOfFocus, 1) except: self.speakMessage(messages.LOCATION_NOT_FOUND_FULL) return True def rightClickReviewItem(self, inputEvent=None): """Performs a right mouse button click on the current item.""" if self.flatReviewContext: self.flatReviewContext.clickCurrent(3) else: try: eventsynthesizer.clickCharacter(orca_state.locusOfFocus, 3) except: try: eventsynthesizer.clickObject(orca_state.locusOfFocus, 3) except: full = messages.LOCATION_NOT_FOUND_FULL brief = messages.LOCATION_NOT_FOUND_BRIEF self.presentMessage(full, brief) return True def spellCurrentItem(self, itemString): """Spell the current flat review word or line. Arguments: - itemString: the string to spell. """ for character in itemString: self.speakCharacter(character) def _reviewCurrentItem(self, inputEvent, targetCursorCell=0, speechType=1): """Presents the current item to the user. Arguments: - inputEvent - the current input event. - targetCursorCell - if non-zero, the target braille cursor cell. - speechType - the desired presentation: speak (1), spell (2), or phonetic (3). """ context = self.getFlatReviewContext() [wordString, x, y, width, height] = \ context.getCurrent(flat_review.Context.WORD) # Don't announce anything from speech if the user used # the Braille display as an input device. # if not isinstance(inputEvent, input_event.BrailleEvent): if (not wordString) \ or (not len(wordString)) \ or (wordString == "\n"): speech.speak(messages.BLANK) else: [lineString, x, y, width, height] = \ context.getCurrent(flat_review.Context.LINE) if lineString == "\n": speech.speak(messages.BLANK) elif wordString.isspace(): speech.speak(messages.WHITE_SPACE) elif wordString.isupper() and speechType == 1: speech.speak(wordString, self.voices[settings.UPPERCASE_VOICE]) elif speechType == 2: self.spellCurrentItem(wordString) elif speechType == 3: self.phoneticSpellCurrentItem(wordString) elif speechType == 1: wordString = self.utilities.adjustForRepeats(wordString) speech.speak(wordString) self.updateBrailleReview(targetCursorCell) self.currentReviewContents = wordString return True def reviewCurrentAccessible(self, inputEvent): context = self.getFlatReviewContext() [zoneString, x, y, width, height] = \ context.getCurrent(flat_review.Context.ZONE) # Don't announce anything from speech if the user used # the Braille display as an input device. # if not isinstance(inputEvent, input_event.BrailleEvent): utterances = self.speechGenerator.generateSpeech( context.getCurrentAccessible()) utterances.extend(self.tutorialGenerator.getTutorial( context.getCurrentAccessible(), False)) speech.speak(utterances) return True def reviewPreviousItem(self, inputEvent): """Moves the flat review context to the previous item. Places the flat review cursor at the beginning of the item.""" context = self.getFlatReviewContext() moved = context.goPrevious(flat_review.Context.WORD, flat_review.Context.WRAP_LINE) if moved: self._reviewCurrentItem(inputEvent) self.targetCursorCell = self.getBrailleCursorCell() return True def reviewNextItem(self, inputEvent): """Moves the flat review context to the next item. Places the flat review cursor at the beginning of the item.""" context = self.getFlatReviewContext() moved = context.goNext(flat_review.Context.WORD, flat_review.Context.WRAP_LINE) if moved: self._reviewCurrentItem(inputEvent) self.targetCursorCell = self.getBrailleCursorCell() return True def reviewCurrentCharacter(self, inputEvent): """Brailles and speaks the current flat review character.""" self._reviewCurrentCharacter(inputEvent, 1) return True def reviewSpellCurrentCharacter(self, inputEvent): """Brailles and 'spells' (phonetically) the current flat review character. """ self._reviewCurrentCharacter(inputEvent, 2) return True def reviewUnicodeCurrentCharacter(self, inputEvent): """Brailles and speaks unicode information about the current flat review character. """ self._reviewCurrentCharacter(inputEvent, 3) return True def _reviewCurrentCharacter(self, inputEvent, speechType=1): """Presents the current flat review character via braille and speech. Arguments: - inputEvent - the current input event. - speechType - the desired presentation: speak (1), phonetic (2) unicode value information (3) """ context = self.getFlatReviewContext() [charString, x, y, width, height] = \ context.getCurrent(flat_review.Context.CHAR) # Don't announce anything from speech if the user used # the Braille display as an input device. # if not isinstance(inputEvent, input_event.BrailleEvent): if (not charString) or (not len(charString)): speech.speak(messages.BLANK) else: [lineString, x, y, width, height] = \ context.getCurrent(flat_review.Context.LINE) if lineString == "\n" and speechType != 3: speech.speak(messages.BLANK) elif speechType == 3: self.speakUnicodeCharacter(charString) elif speechType == 2: self.phoneticSpellCurrentItem(charString) else: self.speakCharacter(charString) self.updateBrailleReview() self.currentReviewContents = charString return True def reviewPreviousCharacter(self, inputEvent): """Moves the flat review context to the previous character. Places the flat review cursor at character.""" context = self.getFlatReviewContext() moved = context.goPrevious(flat_review.Context.CHAR, flat_review.Context.WRAP_LINE) if moved: self._reviewCurrentCharacter(inputEvent) self.targetCursorCell = self.getBrailleCursorCell() return True def reviewEndOfLine(self, inputEvent): """Moves the flat review context to the end of the line. Places the flat review cursor at the end of the line.""" context = self.getFlatReviewContext() context.goEnd(flat_review.Context.LINE) self.reviewCurrentCharacter(inputEvent) self.targetCursorCell = self.getBrailleCursorCell() return True def reviewNextCharacter(self, inputEvent): """Moves the flat review context to the next character. Places the flat review cursor at character.""" context = self.getFlatReviewContext() moved = context.goNext(flat_review.Context.CHAR, flat_review.Context.WRAP_LINE) if moved: self._reviewCurrentCharacter(inputEvent) self.targetCursorCell = self.getBrailleCursorCell() return True def reviewAbove(self, inputEvent): """Moves the flat review context to the character most directly above the current flat review cursor. Places the flat review cursor at character.""" context = self.getFlatReviewContext() moved = context.goAbove(flat_review.Context.CHAR, flat_review.Context.WRAP_LINE) if moved: self._reviewCurrentItem(inputEvent, self.targetCursorCell) return True def reviewBelow(self, inputEvent): """Moves the flat review context to the character most directly below the current flat review cursor. Places the flat review cursor at character.""" context = self.getFlatReviewContext() moved = context.goBelow(flat_review.Context.CHAR, flat_review.Context.WRAP_LINE) if moved: self._reviewCurrentItem(inputEvent, self.targetCursorCell) return True def reviewCurrentLine(self, inputEvent): """Brailles and speaks the current flat review line.""" self._reviewCurrentLine(inputEvent, 1) return True def reviewSpellCurrentLine(self, inputEvent): """Brailles and spells the current flat review line.""" self._reviewCurrentLine(inputEvent, 2) return True def reviewPhoneticCurrentLine(self, inputEvent): """Brailles and phonetically spells the current flat review line.""" self._reviewCurrentLine(inputEvent, 3) return True def _reviewCurrentLine(self, inputEvent, speechType=1): """Presents the current flat review line via braille and speech. Arguments: - inputEvent - the current input event. - speechType - the desired presentation: speak (1), spell (2), or phonetic (3) """ context = self.getFlatReviewContext() [lineString, x, y, width, height] = \ context.getCurrent(flat_review.Context.LINE) # Don't announce anything from speech if the user used # the Braille display as an input device. # if not isinstance(inputEvent, input_event.BrailleEvent): if (not lineString) \ or (not len(lineString)) \ or (lineString == "\n"): speech.speak(messages.BLANK) elif lineString.isspace(): speech.speak(messages.WHITE_SPACE) elif lineString.isupper() \ and (speechType < 2 or speechType > 3): speech.speak(lineString, self.voices[settings.UPPERCASE_VOICE]) elif speechType == 2: self.spellCurrentItem(lineString) elif speechType == 3: self.phoneticSpellCurrentItem(lineString) else: lineString = self.utilities.adjustForRepeats(lineString) speech.speak(lineString) self.updateBrailleReview() self.currentReviewContents = lineString return True def reviewPreviousLine(self, inputEvent): """Moves the flat review context to the beginning of the previous line.""" context = self.getFlatReviewContext() moved = context.goPrevious(flat_review.Context.LINE, flat_review.Context.WRAP_LINE) if moved: self._reviewCurrentLine(inputEvent) self.targetCursorCell = self.getBrailleCursorCell() return True def reviewHome(self, inputEvent): """Moves the flat review context to the top left of the current window.""" context = self.getFlatReviewContext() context.goBegin() self._reviewCurrentLine(inputEvent) self.targetCursorCell = self.getBrailleCursorCell() return True def reviewNextLine(self, inputEvent): """Moves the flat review context to the beginning of the next line. Places the flat review cursor at the beginning of the line.""" context = self.getFlatReviewContext() moved = context.goNext(flat_review.Context.LINE, flat_review.Context.WRAP_LINE) if moved: self._reviewCurrentLine(inputEvent) self.targetCursorCell = self.getBrailleCursorCell() return True def reviewBottomLeft(self, inputEvent): """Moves the flat review context to the beginning of the last line in the window. Places the flat review cursor at the beginning of the line.""" context = self.getFlatReviewContext() context.goEnd(flat_review.Context.WINDOW) context.goBegin(flat_review.Context.LINE) self._reviewCurrentLine(inputEvent) self.targetCursorCell = self.getBrailleCursorCell() return True def reviewEnd(self, inputEvent): """Moves the flat review context to the end of the last line in the window. Places the flat review cursor at the end of the line.""" context = self.getFlatReviewContext() context.goEnd() self._reviewCurrentLine(inputEvent) self.targetCursorCell = self.getBrailleCursorCell() return True def reviewCurrentItem(self, inputEvent, targetCursorCell=0): """Brailles and speaks the current item to the user.""" self._reviewCurrentItem(inputEvent, targetCursorCell, 1) return True def reviewSpellCurrentItem(self, inputEvent, targetCursorCell=0): """Brailles and spells the current item to the user.""" self._reviewCurrentItem(inputEvent, targetCursorCell, 2) return True def reviewPhoneticCurrentItem(self, inputEvent, targetCursorCell=0): """Brailles and phonetically spells the current item to the user.""" self._reviewCurrentItem(inputEvent, targetCursorCell, 3) return True def flatReviewCopy(self, inputEvent): """Copies the contents of the item under flat review to and places them in the clipboard.""" if self.flatReviewContext: clipboard = Gtk.Clipboard.get(Gdk.Atom.intern("CLIPBOARD", False)) clipboard.set_text( self.currentReviewContents, len(self.currentReviewContents)) self.presentMessage(messages.FLAT_REVIEW_COPIED) else: self.presentMessage(messages.FLAT_REVIEW_NOT_IN) return True def _appendToClipboard(self, clipboard, text, newText): """Appends newText to text and places the results in the clipboard.""" text = text.rstrip("\n") text = "%s\n%s" % (text, newText) if clipboard: clipboard.set_text(text, len(text)) return True def flatReviewAppend(self, inputEvent): """Appends the contents of the item under flat review to the clipboard.""" if self.flatReviewContext: clipboard = Gtk.Clipboard.get(Gdk.Atom.intern("CLIPBOARD", False)) clipboard.request_text( self._appendToClipboard, self.currentReviewContents) self.presentMessage(messages.FLAT_REVIEW_APPENDED) else: self.presentMessage(messages.FLAT_REVIEW_NOT_IN) return True def sayAll(self, inputEvent, obj=None, offset=None): try: clickCount = inputEvent.getClickCount() except: clickCount = 1 doubleClick = clickCount == 2 if doubleClick: # Try to "say all" for the current dialog/window by flat # reviewing everything. See bug #354462 for more details. # context = self.getFlatReviewContext() utterances = [] context.goBegin() while True: [wordString, x, y, width, height] = \ context.getCurrent(flat_review.Context.ZONE) utterances.append(wordString) moved = context.goNext(flat_review.Context.ZONE, flat_review.Context.WRAP_LINE) if not moved: break speech.speak(utterances) return obj = obj or orca_state.locusOfFocus try: text = obj.queryText() except NotImplementedError: utterances = self.speechGenerator.generateSpeech(obj) utterances.extend(self.tutorialGenerator.getTutorial(obj, False)) speech.speak(utterances) except AttributeError: pass else: if offset == None: offset = text.caretOffset speech.sayAll(self.textLines(obj, offset), self.__sayAllProgressCallback) return True def toggleFlatReviewMode(self, inputEvent=None): """Toggles between flat review mode and focus tracking mode.""" verbosity = _settingsManager.getSetting('speechVerbosityLevel') if self.flatReviewContext: if inputEvent and verbosity != settings.VERBOSITY_LEVEL_BRIEF: self.presentMessage(messages.FLAT_REVIEW_STOP) self.flatReviewContext = None self.updateBraille(orca_state.locusOfFocus) else: if inputEvent and verbosity != settings.VERBOSITY_LEVEL_BRIEF: self.presentMessage(messages.FLAT_REVIEW_START) context = self.getFlatReviewContext() [wordString, x, y, width, height] = \ context.getCurrent(flat_review.Context.WORD) self._reviewCurrentItem(inputEvent, self.targetCursorCell) return True def toggleSilenceSpeech(self, inputEvent=None): """Toggle the silencing of speech. Returns True to indicate the input event has been consumed. """ speech.stop() if _settingsManager.getSetting('silenceSpeech'): _settingsManager.setSetting('silenceSpeech', False) self.presentMessage(messages.SPEECH_ENABLED) elif not _settingsManager.getSetting('enableSpeech'): _settingsManager.setSetting('enableSpeech', True) speech.init() self.presentMessage(messages.SPEECH_ENABLED) else: self.presentMessage(messages.SPEECH_DISABLED) _settingsManager.setSetting('silenceSpeech', True) return True def toggleSpeechVerbosity(self, inputEvent=None): """Toggles speech verbosity level between verbose and brief.""" value = _settingsManager.getSetting('speechVerbosityLevel') if value == settings.VERBOSITY_LEVEL_BRIEF: self.presentMessage(messages.SPEECH_VERBOSITY_VERBOSE) _settingsManager.setSetting( 'speechVerbosityLevel', settings.VERBOSITY_LEVEL_VERBOSE) else: self.presentMessage(messages.SPEECH_VERBOSITY_BRIEF) _settingsManager.setSetting( 'speechVerbosityLevel', settings.VERBOSITY_LEVEL_BRIEF) return True def toggleSpeakingIndentationJustification(self, inputEvent=None): """Toggles the speaking of indentation and justification.""" value = _settingsManager.getSetting('enableSpeechIndentation') _settingsManager.setSetting('enableSpeechIndentation', not value) if _settingsManager.getSetting('enableSpeechIndentation'): full = messages.INDENTATION_JUSTIFICATION_ON_FULL brief = messages.INDENTATION_JUSTIFICATION_ON_BRIEF else: full = messages.INDENTATION_JUSTIFICATION_OFF_FULL brief = messages.INDENTATION_JUSTIFICATION_OFF_BRIEF self.presentMessage(full, brief) return True def cycleSpeakingPunctuationLevel(self, inputEvent=None): """ Cycle through the punctuation levels for speech. """ currentLevel = _settingsManager.getSetting('verbalizePunctuationStyle') if currentLevel == settings.PUNCTUATION_STYLE_NONE: newLevel = settings.PUNCTUATION_STYLE_SOME full = messages.PUNCTUATION_SOME_FULL brief = messages.PUNCTUATION_SOME_BRIEF elif currentLevel == settings.PUNCTUATION_STYLE_SOME: newLevel = settings.PUNCTUATION_STYLE_MOST full = messages.PUNCTUATION_MOST_FULL brief = messages.PUNCTUATION_MOST_BRIEF elif currentLevel == settings.PUNCTUATION_STYLE_MOST: newLevel = settings.PUNCTUATION_STYLE_ALL full = messages.PUNCTUATION_ALL_FULL brief = messages.PUNCTUATION_ALL_BRIEF else: newLevel = settings.PUNCTUATION_STYLE_NONE full = messages.PUNCTUATION_NONE_FULL brief = messages.PUNCTUATION_NONE_BRIEF _settingsManager.setSetting('verbalizePunctuationStyle', newLevel) self.presentMessage(full, brief) speech.updatePunctuationLevel() return True def cycleSettingsProfile(self, inputEvent=None): """Cycle through the user's existing settings profiles.""" profiles = _settingsManager.availableProfiles() if not (profiles and profiles[0]): self.presentMessage(messages.PROFILE_NOT_FOUND) return True isMatch = lambda x: x[1] == _settingsManager.getProfile() current = list(filter(isMatch, profiles))[0] try: name, profileID = profiles[profiles.index(current) + 1] except IndexError: name, profileID = profiles[0] _settingsManager.setProfile(profileID, updateLocale=True) # TODO: The right fix is to go find each and every case where we use # self.voices directly and instead get the voices from the Settings # Manager. But that's too big a change too close to code freeze. So # for now we'll hack. self.voices = _settingsManager.getSetting('voices') # TODO: This is another "too close to code freeze" hack to cause the # command names to be presented in the correct language. self.setupInputEventHandlers() self.presentMessage(messages.PROFILE_CHANGED % name, name) return True def cycleCapitalizationStyle(self, inputEvent=None): """ Cycle through the speech-dispatcher capitalization styles. """ currentStyle = _settingsManager.getSetting('capitalizationStyle') if currentStyle == settings.CAPITALIZATION_STYLE_NONE: newStyle = settings.CAPITALIZATION_STYLE_SPELL full = messages.CAPITALIZATION_SPELL_FULL brief = messages.CAPITALIZATION_SPELL_BRIEF elif currentStyle == settings.CAPITALIZATION_STYLE_SPELL: newStyle = settings.CAPITALIZATION_STYLE_ICON full = messages.CAPITALIZATION_ICON_FULL brief = messages.CAPITALIZATION_ICON_BRIEF else: newStyle = settings.CAPITALIZATION_STYLE_NONE full = messages.CAPITALIZATION_NONE_FULL brief = messages.CAPITALIZATION_NONE_BRIEF _settingsManager.setSetting('capitalizationStyle', newStyle) self.presentMessage(full, brief) speech.updateCapitalizationStyle() return True def cycleKeyEcho(self, inputEvent=None): (newKey, newWord, newSentence) = (False, False, False) key = _settingsManager.getSetting('enableKeyEcho') word = _settingsManager.getSetting('enableEchoByWord') sentence = _settingsManager.getSetting('enableEchoBySentence') if (key, word, sentence) == (False, False, False): (newKey, newWord, newSentence) = (True, False, False) full = messages.KEY_ECHO_KEY_FULL brief = messages.KEY_ECHO_KEY_BRIEF elif (key, word, sentence) == (True, False, False): (newKey, newWord, newSentence) = (False, True, False) full = messages.KEY_ECHO_WORD_FULL brief = messages.KEY_ECHO_WORD_BRIEF elif (key, word, sentence) == (False, True, False): (newKey, newWord, newSentence) = (False, False, True) full = messages.KEY_ECHO_SENTENCE_FULL brief = messages.KEY_ECHO_SENTENCE_BRIEF elif (key, word, sentence) == (False, False, True): (newKey, newWord, newSentence) = (True, True, False) full = messages.KEY_ECHO_KEY_AND_WORD_FULL brief = messages.KEY_ECHO_KEY_AND_WORD_BRIEF elif (key, word, sentence) == (True, True, False): (newKey, newWord, newSentence) = (False, True, True) full = messages.KEY_ECHO_WORD_AND_SENTENCE_FULL brief = messages.KEY_ECHO_WORD_AND_SENTENCE_BRIEF else: (newKey, newWord, newSentence) = (False, False, False) full = messages.KEY_ECHO_NONE_FULL brief = messages.KEY_ECHO_NONE_BRIEF _settingsManager.setSetting('enableKeyEcho', newKey) _settingsManager.setSetting('enableEchoByWord', newWord) _settingsManager.setSetting('enableEchoBySentence', newSentence) self.presentMessage(full, brief) return True def toggleTableCellReadMode(self, inputEvent=None): """Toggles an indicator for whether we should just read the current table cell or read the whole row.""" speakRow = _settingsManager.getSetting('readTableCellRow') _settingsManager.setSetting('readTableCellRow', not speakRow) if not speakRow: line = messages.TABLE_MODE_ROW else: line = messages.TABLE_MODE_CELL self.presentMessage(line) return True def doWhereAmI(self, inputEvent, basicOnly): """Peforms the whereAmI operation. Arguments: - inputEvent: The original inputEvent """ obj = orca_state.locusOfFocus self.updateBraille(obj) return self.whereAmI.whereAmI(obj, basicOnly) def whereAmIBasic(self, inputEvent): """Speaks basic information about the current object of interest. """ self.doWhereAmI(inputEvent, True) def whereAmIDetailed(self, inputEvent): """Speaks detailed/custom information about the current object of interest. """ self.doWhereAmI(inputEvent, False) def cycleDebugLevel(self, inputEvent=None): levels = [debug.LEVEL_ALL, "all", debug.LEVEL_FINEST, "finest", debug.LEVEL_FINER, "finer", debug.LEVEL_FINE, "fine", debug.LEVEL_CONFIGURATION, "configuration", debug.LEVEL_INFO, "info", debug.LEVEL_WARNING, "warning", debug.LEVEL_SEVERE, "severe", debug.LEVEL_OFF, "off"] try: levelIndex = levels.index(debug.debugLevel) + 2 except: levelIndex = 0 else: if levelIndex >= len(levels): levelIndex = 0 debug.debugLevel = levels[levelIndex] briefMessage = levels[levelIndex + 1] fullMessage = "Debug level %s." % briefMessage self.presentMessage(fullMessage, briefMessage) return True ######################################################################## # # # AT-SPI OBJECT EVENT HANDLERS # # # ######################################################################## def noOp(self, event): """Just here to capture events. Arguments: - event: the Event """ pass def onActiveChanged(self, event): """Callback for object:state-changed:active accessibility events.""" if self.findCommandRun: self.findCommandRun = False self.find() def onActiveDescendantChanged(self, event): """Callback for object:active-descendant-changed accessibility events.""" if not event.any_data: return if not event.source.getState().contains(pyatspi.STATE_FOCUSED) \ and not event.any_data.getState().contains(pyatspi.STATE_FOCUSED): return if self.stopSpeechOnActiveDescendantChanged(event): speech.stop() orca.setLocusOfFocus(event, event.any_data) def onBusyChanged(self, event): """Callback for object:state-changed:busy accessibility events.""" pass def onCheckedChanged(self, event): """Callback for object:state-changed:checked accessibility events.""" obj = event.source if not self.utilities.isSameObject(obj, orca_state.locusOfFocus): return state = obj.getState() if state.contains(pyatspi.STATE_EXPANDABLE): return # Radio buttons normally change their state when you arrow to them, # so we handle the announcement of their state changes in the focus # handling code. However, we do need to handle radio buttons where # the user needs to press the space key to select them. if obj.getRole() == pyatspi.ROLE_RADIO_BUTTON: eventString, mods = self.utilities.lastKeyAndModifiers() if not eventString in [" ", "space"]: return oldObj, oldState = self.pointOfReference.get('checkedChange', (None, 0)) if hash(oldObj) == hash(obj) and oldState == event.detail1: return self.updateBraille(obj) speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True)) self.pointOfReference['checkedChange'] = hash(obj), event.detail1 def onChildrenChanged(self, event): """Called when a child node has changed. Arguments: - event: the Event """ pass def onCaretMoved(self, event): """Called whenever the caret moves. Arguments: - event: the Event """ if not orca_state.locusOfFocus: return obj, offset = self.pointOfReference.get("lastCursorPosition", (None, -1)) if offset == event.detail1 \ and self.utilities.isSameObject(obj, event.source): return # Should the event source be the locusOfFocus? # try: role = orca_state.locusOfFocus.getRole() except (LookupError, RuntimeError): role = None if role in [pyatspi.ROLE_FRAME, pyatspi.ROLE_DIALOG]: frameApp = orca_state.locusOfFocus.getApplication() eventApp = event.source.getApplication() if frameApp == eventApp \ and event.source.getState().contains(pyatspi.STATE_FOCUSED): orca.setLocusOfFocus(event, event.source, False) # Ignore caret movements from non-focused objects, unless the # currently focused object is the parent of the object which # has the caret. # if (event.source != orca_state.locusOfFocus) \ and (event.source.parent != orca_state.locusOfFocus): return # We always automatically go back to focus tracking mode when # the caret moves in the focused object. # if self.flatReviewContext: self.toggleFlatReviewMode() text = event.source.queryText() self._saveLastCursorPosition(event.source, text.caretOffset) if text.getNSelections(): return self._presentTextAtNewCaretPosition(event) def onDocumentReload(self, event): """Callback for document:reload accessibility events.""" pass def onDocumentLoadComplete(self, event): """Callback for document:load-complete accessibility events.""" pass def onDocumentLoadStopped(self, event): """Callback for document:load-stopped accessibility events.""" pass def onExpandedChanged(self, event): """Callback for object:state-changed:expanded accessibility events.""" obj = event.source role = obj.getRole() if not self.utilities.isSameObject(obj, orca_state.locusOfFocus) \ and not role in [pyatspi.ROLE_TABLE_ROW, pyatspi.ROLE_COMBO_BOX]: return oldObj, oldState = self.pointOfReference.get('expandedChange', (None, 0)) if hash(oldObj) == hash(obj) and oldState == event.detail1: return self.updateBraille(obj) speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True)) self.pointOfReference['expandedChange'] = hash(obj), event.detail1 def onIndeterminateChanged(self, event): """Callback for object:state-changed:indeterminate accessibility events.""" # If this state is cleared, the new state will become checked or unchecked # and we should get object:state-changed:checked events for those cases. # Therefore, if the state is not now indeterminate/partially checked, # ignore this event. if not event.detail1: return obj = event.source if not self.utilities.isSameObject(obj, orca_state.locusOfFocus): return oldObj, oldState = self.pointOfReference.get('indeterminateChange', (None, 0)) if hash(oldObj) == hash(obj) and oldState == event.detail1: return self.updateBraille(obj) speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True)) self.pointOfReference['indeterminateChange'] = hash(obj), event.detail1 def onMouseButton(self, event): """Called whenever the user presses or releases a mouse button. Arguments: - event: the Event """ mouseEvent = input_event.MouseButtonEvent(event) orca_state.lastInputEvent = mouseEvent if mouseEvent.pressed: speech.stop() return # If we've received a mouse button released event, then check if # there are and text selections for the locus of focus and speak # them. # obj = orca_state.locusOfFocus try: text = obj.queryText() except: return self.updateBraille(orca_state.locusOfFocus) textContents = self.utilities.allSelectedText(obj)[0] if not textContents: return utterances = [] utterances.append(textContents) utterances.append(messages.TEXT_SELECTED) speech.speak(utterances) def onNameChanged(self, event): """Callback for object:property-change:accessible-name events.""" obj = event.source names = self.pointOfReference.get('names', {}) oldName = names.get(hash(obj)) if oldName == event.any_data: return # We are ignoring name changes in comboboxes that have focus # see bgo#617204 role = obj.getRole() if role == pyatspi.ROLE_COMBO_BOX: return # Table cell accessibles in trees are often reused. When this occurs, # we get name-changed events when the selection changes. if role == pyatspi.ROLE_TABLE_CELL: return # Normally, we only care about name changes in the current object. # But with the new GtkHeaderBar, we are seeing instances where the # real frame remains the same, but the functional frame changes # e.g. g-c-c going from all settings to a specific panel. if not self.utilities.isSameObject(obj, orca_state.locusOfFocus): if role != pyatspi.ROLE_FRAME \ or not obj.getState().contains(pyatspi.STATE_ACTIVE): return names[hash(obj)] = event.any_data self.pointOfReference['names'] = names self.updateBraille(obj) speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True)) def onPressedChanged(self, event): """Callback for object:state-changed:pressed accessibility events.""" obj = event.source if not self.utilities.isSameObject(obj, orca_state.locusOfFocus): return oldObj, oldState = self.pointOfReference.get('pressedChange', (None, 0)) if hash(oldObj) == hash(obj) and oldState == event.detail1: return self.updateBraille(obj) speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True)) self.pointOfReference['pressedChange'] = hash(obj), event.detail1 def onSelectedChanged(self, event): """Callback for object:state-changed:selected accessibility events.""" obj = event.source state = obj.getState() if not state.contains(pyatspi.STATE_FOCUSED): return if not self.utilities.isSameObject(orca_state.locusOfFocus, obj): return if _settingsManager.getSetting('onlySpeakDisplayedText'): return isSelected = state.contains(pyatspi.STATE_SELECTED) announceState = False keyString, mods = self.utilities.lastKeyAndModifiers() if keyString == "space": announceState = True elif keyString in ["Down", "Up"] \ and isSelected and obj.getRole() == pyatspi.ROLE_TABLE_CELL: announceState = True if not announceState: return # TODO - JD: Unlike the other state-changed callbacks, it seems unwise # to call generateSpeech() here because that also will present the # expandable state if appropriate for the object type. The generators # need to gain some smarts w.r.t. state changes. voice = self.voices.get(settings.SYSTEM_VOICE) if event.detail1: speech.speak(messages.TEXT_SELECTED, voice, False) else: speech.speak(messages.TEXT_UNSELECTED, voice, False) def onSelectionChanged(self, event): """Callback for object:selection-changed accessibility events.""" obj = event.source state = obj.getState() if state.contains(pyatspi.STATE_MANAGES_DESCENDANTS): return # TODO - JD: We need to give more thought to where we look to this # event and where we prefer object:state-changed:selected. # If the current item's selection is toggled, we'll present that # via the state-changed event. keyString, mods = self.utilities.lastKeyAndModifiers() if keyString == "space": return # Save the event source, if it is a menu or combo box. It will be # useful for optimizing componentAtDesktopCoords in the case that # the pointer is hovering over a menu item. The alternative is to # traverse the application's tree looking for potential moused-over # menu items. if obj.getRole() in (pyatspi.ROLE_COMBO_BOX, pyatspi.ROLE_MENU): self.lastSelectedMenu = obj selectedChildren = self.utilities.selectedChildren(obj) for child in selectedChildren: if not self.utilities.isLayoutOnly(child): orca.setLocusOfFocus(event, child) break def onSensitiveChanged(self, event): """Callback for object:state-changed:sensitive accessibility events.""" pass def onFocus(self, event): """Callback for focus: accessibility events.""" pass def onFocusedChanged(self, event): """Callback for object:state-changed:focused accessibility events.""" if not event.detail1: return obj = event.source state = obj.getState() if not state.contains(pyatspi.STATE_FOCUSED): return window = self.utilities.topLevelObject(obj) if window: try: iconified = window.getState().contains(pyatspi.STATE_ICONIFIED) except: return if iconified: return if obj and obj.childCount and obj.getRole() != pyatspi.ROLE_COMBO_BOX: selectedChildren = self.utilities.selectedChildren(obj) if selectedChildren: obj = selectedChildren[0] orca.setLocusOfFocus(event, obj) def onShowingChanged(self, event): """Callback for object:state-changed:showing accessibility events.""" obj = event.source role = obj.getRole() if role == pyatspi.ROLE_NOTIFICATION: speech.speak(self.speechGenerator.generateSpeech(obj)) visibleOnly = not self.utilities.isStatusBarNotification(obj) labels = self.utilities.unrelatedLabels(obj, visibleOnly) msg = ''.join(map(self.utilities.displayedText, labels)) self.displayBrailleMessage(msg, flashTime=settings.brailleFlashTime) notification_messages.saveMessage(msg) return if role == pyatspi.ROLE_TOOL_TIP: keyString, mods = self.utilities.lastKeyAndModifiers() if keyString != "F1" \ and not _settingsManager.getSetting('presentToolTips'): return if event.detail1: self.presentToolTip(obj) return if orca_state.locusOfFocus and keyString == "F1": obj = orca_state.locusOfFocus self.updateBraille(obj) speech.speak(self.speechGenerator.generateSpeech(obj)) return def onTextAttributesChanged(self, event): """Called when an object's text attributes change. Right now this method is only to handle the presentation of spelling errors on the fly. Also note that right now, the Gecko toolkit is the only one to present this information to us. Arguments: - event: the Event """ verbosity = _settingsManager.getSetting('speechVerbosityLevel') if verbosity == settings.VERBOSITY_LEVEL_VERBOSE \ and self.utilities.isSameObject( event.source, orca_state.locusOfFocus): try: text = event.source.queryText() except: return # If the misspelled word indicator has just appeared, it's # because the user typed a word boundary or navigated out # of the word. We don't want to have to store a full set of # each object's text attributes to compare, therefore, we'll # check the previous word (most likely case) and the next # word with respect to the current position. # offset = text.caretOffset if not text.getText(offset, offset+1).isalnum(): offset -= 1 if self.utilities.isWordMisspelled(event.source, offset-1) \ or self.utilities.isWordMisspelled(event.source, offset+1): self.speakMessage(messages.MISSPELLED) def onTextDeleted(self, event): """Called whenever text is deleted from an object. Arguments: - event: the Event """ # Ignore text deletions from non-focused objects, unless the # currently focused object is the parent of the object from which # text was deleted # if (event.source != orca_state.locusOfFocus) \ and (event.source.parent != orca_state.locusOfFocus): return # We'll also ignore sliders because we get their output via # their values changing. # if event.source.getRole() == pyatspi.ROLE_SLIDER: return # [[[NOTE: WDW - if we handle events synchronously, we'll # be looking at the text object *before* the text was # actually removed from the object. If we handle events # asynchronously, we'll be looking at the text object # *after* the text was removed. The importance of knowing # this is that the output will differ depending upon how # orca.settings.asyncMode has been set. For example, the # regression tests run in synchronous mode, so the output # they see will not be the same as what the user normally # experiences.]]] self.updateBraille(event.source) # The any_data member of the event object has the deleted text in # it - If the last key pressed was a backspace or delete key, # speak the deleted text. [[[TODO: WDW - again, need to think # about the ramifications of this when it comes to editors such # as vi or emacs. # keyString, mods = self.utilities.lastKeyAndModifiers() if not keyString: return text = event.source.queryText() if keyString == "BackSpace": # Speak the character that has just been deleted. # character = event.any_data elif keyString == "Delete" \ or (keyString == "D" and mods & keybindings.CTRL_MODIFIER_MASK): # Speak the character to the right of the caret after # the current right character has been deleted. # offset = text.caretOffset [character, startOffset, endOffset] = \ text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_CHAR) else: return if len(character) == 1: self.speakCharacter(character) return if self.utilities.linkIndex(event.source, text.caretOffset) >= 0: voice = self.voices[settings.HYPERLINK_VOICE] elif character.isupper(): voice = self.voices[settings.UPPERCASE_VOICE] else: voice = self.voices[settings.DEFAULT_VOICE] # We won't interrupt what else might be being spoken # right now because it is typically something else # related to this event. # speech.speak(character, voice, False) def onTextInserted(self, event): """Called whenever text is inserted into an object. Arguments: - event: the Event """ # Ignore text insertions from non-focused objects, unless the # currently focused object is the parent of the object from which # text was inserted. # if (event.source != orca_state.locusOfFocus) \ and (event.source.parent != orca_state.locusOfFocus): return ignoreRoles = [pyatspi.ROLE_LABEL, pyatspi.ROLE_MENU, pyatspi.ROLE_MENU_ITEM, pyatspi.ROLE_SLIDER, pyatspi.ROLE_SPIN_BUTTON] role = event.source.getRole() if role in ignoreRoles: return state = event.source.getState() if role == pyatspi.ROLE_TABLE_CELL \ and not state.contains(pyatspi.STATE_FOCUSED) \ and not state.contains(pyatspi.STATE_SELECTED): return self.updateBraille(event.source) # If the last input event was a keyboard event, check to see if # the text for this event matches what the user typed. If it does, # then don't speak it. # # Note that the text widgets sometimes compress their events, # thus we might get a longer string from a single text inserted # event, while we also get individual keyboard events for the # characters used to type the string. This is ugly. We attempt # to handle it here by only echoing text if we think it was the # result of a command (e.g., a paste operation). # # Note that we have to special case the space character as it # comes across as "space" in the keyboard event and " " in the # text event. # string = event.any_data speakThis = False wasCommand = False wasAutoComplete = False if isinstance(orca_state.lastInputEvent, input_event.MouseButtonEvent): speakThis = orca_state.lastInputEvent.button == "2" else: keyString, mods = self.utilities.lastKeyAndModifiers() wasCommand = mods & keybindings.COMMAND_MODIFIER_MASK if not wasCommand and keyString in ["Return", "Tab", "space"] \ and role == pyatspi.ROLE_TERMINAL \ and event.any_data.strip(): wasCommand = True try: selections = event.source.queryText().getNSelections() except: selections = 0 if selections: wasAutoComplete = role in [pyatspi.ROLE_TEXT, pyatspi.ROLE_ENTRY] if (string == " " and keyString == "space") or string == keyString: pass elif wasCommand or wasAutoComplete: speakThis = True elif role == pyatspi.ROLE_PASSWORD_TEXT \ and _settingsManager.getSetting('enableKeyEcho') \ and _settingsManager.getSetting('enablePrintableKeys'): # Echoing "star" is preferable to echoing the descriptive # name of the bullet that has appeared (e.g. "black circle") # string = "*" speakThis = True # Auto-completed, auto-corrected, auto-inserted, etc. # speakThis = speakThis or self.utilities.isAutoTextEvent(event) # We might need to echo this if it is a single character. # speakThis = speakThis \ or (_settingsManager.getSetting('enableEchoByCharacter') \ and string \ and role != pyatspi.ROLE_PASSWORD_TEXT \ and len(string.strip()) == 1) if speakThis: if string.isupper(): speech.speak(string, self.voices[settings.UPPERCASE_VOICE]) elif not string.isalnum(): self.speakCharacter(string) else: speech.speak(string) if wasCommand: return if wasAutoComplete: self.pointOfReference['lastAutoComplete'] = hash(event.source) try: text = event.source.queryText() except NotImplementedError: return offset = text.caretOffset - 1 previousOffset = offset - 1 if (offset < 0 or previousOffset < 0): return [currentChar, startOffset, endOffset] = \ text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_CHAR) [previousChar, startOffset, endOffset] = \ text.getTextAtOffset(previousOffset, pyatspi.TEXT_BOUNDARY_CHAR) if _settingsManager.getSetting('enableEchoBySentence') \ and self.utilities.isSentenceDelimiter(currentChar, previousChar): self.echoPreviousSentence(event.source) elif _settingsManager.getSetting('enableEchoByWord') \ and self.utilities.isWordDelimiter(currentChar): self.echoPreviousWord(event.source) def onTextSelectionChanged(self, event): """Callback for object:text-selection-changed accessibility events.""" obj = event.source self.updateBraille(obj) # Note: This guesswork to figure out what actually changed with respect # to text selection will get eliminated once the new text-selection API # is added to ATK and implemented by the toolkits. (BGO 638378) textSelections = self.pointOfReference.get('textSelections', {}) oldStart, oldEnd = textSelections.get(hash(obj), (0, 0)) # TODO: JD - this doesn't yet handle the case of multiple non-contiguous # selections in a single accessible object. text = obj.queryText() newStart, newEnd = text.getSelection(0) textSelections[hash(obj)] = newStart, newEnd self.pointOfReference['textSelections'] = textSelections if self.pointOfReference.get('lastAutoComplete') == hash(obj): return nSelections = text.getNSelections() handled = self._speakTextSelectionState(nSelections) if handled: return changes = [] oldChars = set(range(oldStart, oldEnd)) newChars = set(range(newStart, newEnd)) if not oldChars.union(newChars): return if oldChars and newChars and not oldChars.intersection(newChars): # A simultaneous unselection and selection centered at one offset. changes.append([oldStart, oldEnd, messages.TEXT_UNSELECTED]) changes.append([newStart, newEnd, messages.TEXT_SELECTED]) else: change = sorted(oldChars.symmetric_difference(newChars)) if not change: return changeStart, changeEnd = change[0], change[-1] + 1 if oldChars < newChars: changes.append([changeStart, changeEnd, messages.TEXT_SELECTED]) else: changes.append([changeStart, changeEnd, messages.TEXT_UNSELECTED]) speakMessage = not _settingsManager.getSetting('onlySpeakDisplayedText') for start, end, message in changes: self.sayPhrase(obj, start, end) if speakMessage: self.speakMessage(message, interrupt=False) def onColumnReordered(self, event): """Called whenever the columns in a table are reordered. Arguments: - event: the Event """ parentTable = self.utilities.ancestorWithRole( orca_state.locusOfFocus, [pyatspi.ROLE_TABLE], [pyatspi.ROLE_FRAME]) if event.source != parentTable: return self.presentMessage(messages.TABLE_REORDERED_COLUMNS) def onRowReordered(self, event): """Called whenever the rows in a table are reordered. Arguments: - event: the Event """ parentTable = self.utilities.ancestorWithRole( orca_state.locusOfFocus, [pyatspi.ROLE_TABLE], [pyatspi.ROLE_FRAME]) if event.source != parentTable: return self.presentMessage(messages.TABLE_REORDERED_ROWS) def onValueChanged(self, event): """Called whenever an object's value changes. Currently, the value changes for non-focused objects are ignored. Arguments: - event: the Event """ obj = event.source role = obj.getRole() value = obj.queryValue() if "oldValue" in self.pointOfReference \ and (value.currentValue == self.pointOfReference["oldValue"]): return if role == pyatspi.ROLE_PROGRESS_BAR: self.handleProgressBarUpdate(event, obj) return if not self.utilities.isSameObject(obj, orca_state.locusOfFocus): return self.pointOfReference["oldValue"] = value.currentValue self.updateBraille(obj) speech.speak(self.speechGenerator.generateSpeech(obj, alreadyFocused=True)) def onWindowActivated(self, event): """Called whenever a toplevel window is activated. Arguments: - event: the Event """ self.pointOfReference = {} self.windowActivateTime = time.time() orca.setLocusOfFocus(event, event.source) # We keep track of the active window to handle situations where # we get window activated and window deactivated events out of # order (see onWindowDeactivated). # # For example, events can be: # # window:activate (w1) # window:activate (w2) # window:deactivate (w1) # # as well as: # # window:activate (w1) # window:deactivate (w1) # window:activate (w2) # orca_state.activeWindow = event.source def onWindowCreated(self, event): """Callback for window:create accessibility events.""" pass def onWindowDeactivated(self, event): """Called whenever a toplevel window is deactivated. Arguments: - event: the Event """ self.pointOfReference = {} menuRoles = [pyatspi.ROLE_MENU, pyatspi.ROLE_MENU_ITEM, pyatspi.ROLE_CHECK_MENU_ITEM, pyatspi.ROLE_RADIO_MENU_ITEM] # If we get into a popup menu, the parent application will likely # emit a window-deactivate event. But functionally we're still in # the same window. In this case, we do not want to update anything. try: role = orca_state.locusOfFocus.getRole() except: pass else: if role in menuRoles: return # If we receive a "window:deactivate" event for the object that # currently has focus, then stop the current speech output. # This is very useful for terminating long speech output from # commands running in gnome-terminal. # if orca_state.locusOfFocus and \ (orca_state.locusOfFocus.getApplication() == \ event.source.getApplication()): speech.stop() # Clear the braille display just in case we are about to give # focus to an inaccessible application. See bug #519901 for # more details. # self.clearBraille() # Hide the flat review window and reset it so that it will be # recreated. # if self.flatReviewContext: self.flatReviewContext = None self.updateBraille(orca_state.locusOfFocus) # Because window activated and deactivated events may be # received in any order when switching from one application to # another, locusOfFocus and activeWindow, we really only change # the locusOfFocus and activeWindow when we are dealing with # an event from the current activeWindow. # if event.source == orca_state.activeWindow: orca.setLocusOfFocus(event, None) orca_state.activeWindow = None # disable list notification messages mode notification_messages.listNotificationMessagesModeEnabled = False # disable learn mode orca_state.learnModeEnabled = False ######################################################################## # # # Methods for presenting content # # # ######################################################################## def _presentTextAtNewCaretPosition(self, event, otherObj=None): """Updates braille and outputs speech for the event.source or the otherObj.""" obj = otherObj or event.source text = obj.queryText() self.updateBrailleForNewCaretPosition(obj) if self._inSayAll: return if not orca_state.lastInputEvent: return if isinstance(orca_state.lastInputEvent, input_event.MouseButtonEvent): if not orca_state.lastInputEvent.pressed: self.sayLine(obj) return # Guess why the caret moved and say something appropriate. # [[[TODO: WDW - this motion assumes traditional GUI # navigation gestures. In an editor such as vi, line up and # down is done via other actions such as "i" or "j". We may # need to think about this a little harder.]]] # keyString, mods = self.utilities.lastKeyAndModifiers() if not keyString: return isControlKey = mods & keybindings.CTRL_MODIFIER_MASK if keyString in ["Up", "Down"]: self.sayLine(obj) elif keyString in ["Left", "Right"]: if isControlKey: self.sayWord(obj) else: self.sayCharacter(obj) elif keyString == "Page_Up": # TODO - JD: Why is Control special here? # If the user has typed Control-Page_Up, then we # speak the character to the right of the current text cursor # position otherwise we speak the current line. # if isControlKey: self.sayCharacter(obj) else: self.sayLine(obj) elif keyString == "Page_Down": self.sayLine(obj) elif keyString in ["Home", "End"]: if isControlKey: self.sayLine(obj) else: self.sayCharacter(obj) def _rewindSayAll(self, context, minCharCount=10): if not _settingsManager.getSetting('rewindAndFastForwardInSayAll'): return False index = self._sayAllContexts.index(context) self._sayAllContexts = self._sayAllContexts[0:index] while self._sayAllContexts: context = self._sayAllContexts.pop() if context.endOffset - context.startOffset > minCharCount: break try: text = context.obj.queryText() except: pass else: orca.setLocusOfFocus(None, context.obj, notifyScript=False) text.setCaretOffset(context.startOffset) self.sayAll(None, context.obj, context.startOffset) return True def _fastForwardSayAll(self, context): if not _settingsManager.getSetting('rewindAndFastForwardInSayAll'): return False try: text = context.obj.queryText() except: pass else: orca.setLocusOfFocus(None, context.obj, notifyScript=False) text.setCaretOffset(context.endOffset) self.sayAll(None, context.obj, context.endOffset) return True def __sayAllProgressCallback(self, context, progressType): # [[[TODO: WDW - this needs work. Need to be able to manage # the monitoring of progress and couple that with both updating # the visual progress of what is being spoken as well as # positioning the cursor when speech has stopped.]]] # try: text = context.obj.queryText() char = text.getText(context.currentOffset, context.currentOffset+1) except: return # Setting the caret at the offset of an embedded object results in # focus changes. if char == self.EMBEDDED_OBJECT_CHARACTER: return if progressType == speechserver.SayAllContext.PROGRESS: return elif progressType == speechserver.SayAllContext.INTERRUPTED: if isinstance(orca_state.lastInputEvent, input_event.KeyboardEvent): self._sayAllIsInterrupted = True lastKey = orca_state.lastInputEvent.event_string if lastKey == "Down" and self._fastForwardSayAll(context): return elif lastKey == "Up" and self._rewindSayAll(context): return self._inSayAll = False self._sayAllContexts = [] text.setCaretOffset(context.currentOffset) elif progressType == speechserver.SayAllContext.COMPLETED: orca.setLocusOfFocus(None, context.obj, notifyScript=False) text.setCaretOffset(context.currentOffset) # If there is a selection, clear it. See bug #489504 for more details. # if text.getNSelections(): text.setSelection(0, context.currentOffset, context.currentOffset) def inSayAll(self): return self._inSayAll or self._sayAllIsInterrupted def echoPreviousSentence(self, obj): """Speaks the sentence prior to the caret, as long as there is a sentence prior to the caret and there is no intervening sentence delimiter between the caret and the end of the sentence. The entry condition for this method is that the character prior to the current caret position is a sentence delimiter, and it's what caused this method to be called in the first place. Arguments: - obj: an Accessible object that implements the AccessibleText interface. """ try: text = obj.queryText() except NotImplementedError: return offset = text.caretOffset - 1 previousOffset = text.caretOffset - 2 if (offset < 0 or previousOffset < 0): return [currentChar, startOffset, endOffset] = \ text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_CHAR) [previousChar, startOffset, endOffset] = \ text.getTextAtOffset(previousOffset, pyatspi.TEXT_BOUNDARY_CHAR) if not self.utilities.isSentenceDelimiter(currentChar, previousChar): return # OK - we seem to be cool so far. So...starting with what # should be the last character in the sentence (caretOffset - 2), # work our way to the beginning of the sentence, stopping when # we hit another sentence delimiter. # sentenceEndOffset = text.caretOffset - 2 sentenceStartOffset = sentenceEndOffset while sentenceStartOffset >= 0: [currentChar, startOffset, endOffset] = \ text.getTextAtOffset(sentenceStartOffset, pyatspi.TEXT_BOUNDARY_CHAR) [previousChar, startOffset, endOffset] = \ text.getTextAtOffset(sentenceStartOffset-1, pyatspi.TEXT_BOUNDARY_CHAR) if self.utilities.isSentenceDelimiter(currentChar, previousChar): break else: sentenceStartOffset -= 1 # If we came across a sentence delimiter before hitting any # text, we really don't have a previous sentence. # # Otherwise, get the sentence. Remember we stopped when we # hit a sentence delimiter, so the sentence really starts at # sentenceStartOffset + 1. getText also does not include # the character at sentenceEndOffset, so we need to adjust # for that, too. # if sentenceStartOffset == sentenceEndOffset: return else: sentence = self.utilities.substring(obj, sentenceStartOffset + 1, sentenceEndOffset + 1) if self.utilities.linkIndex(obj, sentenceStartOffset + 1) >= 0: voice = self.voices[settings.HYPERLINK_VOICE] elif sentence.isupper(): voice = self.voices[settings.UPPERCASE_VOICE] else: voice = self.voices[settings.DEFAULT_VOICE] sentence = self.utilities.adjustForRepeats(sentence) speech.speak(sentence, voice) def echoPreviousWord(self, obj, offset=None): """Speaks the word prior to the caret, as long as there is a word prior to the caret and there is no intervening word delimiter between the caret and the end of the word. The entry condition for this method is that the character prior to the current caret position is a word delimiter, and it's what caused this method to be called in the first place. Arguments: - obj: an Accessible object that implements the AccessibleText interface. - offset: if not None, the offset within the text to use as the end of the word. """ try: text = obj.queryText() except NotImplementedError: return if not offset: if text.caretOffset == -1: offset = text.characterCount else: offset = text.caretOffset - 1 if (offset < 0): return [char, startOffset, endOffset] = \ text.getTextAtOffset( \ offset, pyatspi.TEXT_BOUNDARY_CHAR) if not self.utilities.isWordDelimiter(char): return # OK - we seem to be cool so far. So...starting with what # should be the last character in the word (caretOffset - 2), # work our way to the beginning of the word, stopping when # we hit another word delimiter. # wordEndOffset = offset - 1 wordStartOffset = wordEndOffset while wordStartOffset >= 0: [char, startOffset, endOffset] = \ text.getTextAtOffset( \ wordStartOffset, pyatspi.TEXT_BOUNDARY_CHAR) if self.utilities.isWordDelimiter(char): break else: wordStartOffset -= 1 # If we came across a word delimiter before hitting any # text, we really don't have a previous word. # # Otherwise, get the word. Remember we stopped when we # hit a word delimiter, so the word really starts at # wordStartOffset + 1. getText also does not include # the character at wordEndOffset, so we need to adjust # for that, too. # if wordStartOffset == wordEndOffset: return else: word = self.utilities.\ substring(obj, wordStartOffset + 1, wordEndOffset + 1) if self.utilities.linkIndex(obj, wordStartOffset + 1) >= 0: voice = self.voices[settings.HYPERLINK_VOICE] elif word.isupper(): voice = self.voices[settings.UPPERCASE_VOICE] else: voice = self.voices[settings.DEFAULT_VOICE] word = self.utilities.adjustForRepeats(word) speech.speak(word, voice) def handleProgressBarUpdate(self, event, obj): """Determine whether this progress bar event should be spoken or not. It should be spoken if: 1/ settings.enableProgressBarUpdates is True. 2/ settings.progressBarVerbosity matches the current location of the progress bar. 3/ The time of this event exceeds the settings.progressBarUpdateInterval value. This value indicates the time (in seconds) between potential spoken progress bar updates. 4/ The new value of the progress bar (converted to an integer), is different from the last one or equals 100 (i.e complete). Arguments: - event: if not None, the Event that caused this to happen - obj: the Accessible progress bar object. """ if _settingsManager.getSetting('enableProgressBarUpdates'): makeAnnouncement = False verbosity = _settingsManager.getSetting('progressBarVerbosity') if verbosity == settings.PROGRESS_BAR_ALL: makeAnnouncement = True elif verbosity == settings.PROGRESS_BAR_WINDOW: makeAnnouncement = self.utilities.isSameObject( self.utilities.topLevelObject(obj), self.utilities.activeWindow()) elif orca_state.locusOfFocus: makeAnnouncement = self.utilities.isSameObject( \ obj.getApplication(), orca_state.locusOfFocus.getApplication()) if makeAnnouncement: currentTime = time.time() # Check for defunct progress bars. Get rid of them if they # are all defunct. Also find out which progress bar was # the most recently updated. # defunctBars = 0 mostRecentUpdate = [obj, 0] for key, value in list(self.lastProgressBarTime.items()): if value > mostRecentUpdate[1]: mostRecentUpdate = [key, value] try: isDefunct = \ key.getState().contains(pyatspi.STATE_DEFUNCT) except: isDefunct = True if isDefunct: defunctBars += 1 if defunctBars == len(self.lastProgressBarTime): self.lastProgressBarTime = {} self.lastProgressBarValue = {} # If this progress bar is not already known, create initial # values for it. # if obj not in self.lastProgressBarTime: self.lastProgressBarTime[obj] = 0.0 if obj not in self.lastProgressBarValue: self.lastProgressBarValue[obj] = None lastProgressBarTime = self.lastProgressBarTime[obj] lastProgressBarValue = self.lastProgressBarValue[obj] value = obj.queryValue() try: if value.maximumValue == value.minimumValue: # This is a busy indicator and not a real progress bar. return except: return percentValue = int((value.currentValue / \ (value.maximumValue - value.minimumValue)) * 100.0) if _settingsManager.getSetting('progressBarBeep'): if self.lastProgressBarValue != percentValue: if percentValue < 7: self.sound.source_set_property('freq', int((98 + percentValue * 4 * 1.35))) self.sound._threadSound (0.075) else: self.sound.source_set_property('freq', int(19 * percentValue * 1.15)) self.sound.source_set_property('volume', 1 - (percentValue / 130)) if percentValue >= 99: self.sound._threadSound (1) else: self.sound._threadSound (0.075) if _settingsManager.getSetting('progressBarSpeak'): if (currentTime - lastProgressBarTime) > \ _settingsManager.getSetting('progressBarUpdateInterval') \ or (percentValue == 100): if lastProgressBarValue != percentValue: utterances = [] # There may be cases when more than one progress # bar is updating at the same time in a window. # If this is the case, then speak the index of this # progress bar in the dictionary of known progress # bars, as well as the value. But only speak the # index if this progress bar was not the most # recently updated to prevent chattiness. if len(self.lastProgressBarTime) > 1: index = 0 for key in list(self.lastProgressBarTime.keys()): if key == obj and key != mostRecentUpdate[0]: label = messages.PROGRESS_BAR_NUMBER % (index + 1) utterances.append(label) else: index += 1 utterances.extend(self.speechGenerator.generateSpeech( obj, alreadyFocused=True)) speech.speak(utterances) self.lastProgressBarTime[obj] = currentTime if lastProgressBarValue != percentValue: self.lastProgressBarValue[obj] = percentValue def presentToolTip(self, obj): """ Speaks the tooltip for the current object of interest. """ # The tooltip is generally the accessible description. If # the description is not set, present the text that is # spoken when the object receives keyboard focus. # speechResult = brailleResult = None text = "" if obj.description: speechResult = brailleResult = obj.description else: speechResult = self.whereAmI.getWhereAmI(obj, True) if speechResult: brailleResult = speechResult[0] debug.println(debug.LEVEL_FINEST, "presentToolTip: text='%s'" % speechResult) if speechResult: speech.speak(speechResult) if brailleResult: self.displayBrailleMessage(brailleResult) def sayCharacter(self, obj): """Speak the character at the caret. Arguments: - obj: an Accessible object that implements the AccessibleText interface """ text = obj.queryText() offset = text.caretOffset # If we have selected text and the last event was a move to the # right, then speak the character to the left of where the text # caret is (i.e. the selected character). # eventString, mods = self.utilities.lastKeyAndModifiers() if (mods & keybindings.SHIFT_MODIFIER_MASK) \ and eventString in ["Right", "Down"]: offset -= 1 character, startOffset, endOffset = \ text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_CHAR) if not character or character == '\r': character = "\n" if self.utilities.linkIndex(obj, offset) >= 0: voice = self.voices[settings.HYPERLINK_VOICE] elif character.isupper(): voice = self.voices[settings.UPPERCASE_VOICE] else: voice = self.voices[settings.DEFAULT_VOICE] speakBlankLines = _settingsManager.getSetting('speakBlankLines') debug.println(debug.LEVEL_FINEST, \ "sayCharacter: char=<%s>, startOffset=%d, " % \ (character, startOffset)) debug.println(debug.LEVEL_FINEST, \ "caretOffset=%d, endOffset=%d, speakBlankLines=%s" % \ (offset, endOffset, speakBlankLines)) if character == "\n": line = text.getTextAtOffset(max(0, offset), pyatspi.TEXT_BOUNDARY_LINE_START) if not line[0] or line[0] == "\n": # This is a blank line. Announce it if the user requested # that blank lines be spoken. if speakBlankLines: self.speakMessage(messages.BLANK, interrupt=False) return if character in ["\n", "\r\n"]: # This is a blank line. Announce it if the user requested # that blank lines be spoken. if speakBlankLines: self.speakMessage(messages.BLANK, interrupt=False) return else: self.speakMisspelledIndicator(obj, offset) self.speakCharacter(character) def sayLine(self, obj): """Speaks the line of an AccessibleText object that contains the caret, unless the line is empty in which case it's ignored. Arguments: - obj: an Accessible object that implements the AccessibleText interface """ # Get the AccessibleText interface of the provided object # [line, caretOffset, startOffset] = self.getTextLineAtCaret(obj) debug.println(debug.LEVEL_FINEST, \ "sayLine: line=<%s>, len=%d, start=%d, " % \ (line, len(line), startOffset)) debug.println(debug.LEVEL_FINEST, \ "caret=%d, speakBlankLines=%s" % \ (caretOffset, _settingsManager.getSetting('speakBlankLines'))) if len(line) and line != "\n": if line.isupper(): voice = self.voices[settings.UPPERCASE_VOICE] else: voice = self.voices[settings.DEFAULT_VOICE] result = \ self.speechGenerator.generateTextIndentation(obj, line=line) if result: self.speakMessage(result[0]) line = self.utilities.adjustForLinks(obj, line, startOffset) line = self.utilities.adjustForRepeats(line) speech.speak(line, voice) else: # Speak blank line if appropriate. # self.sayCharacter(obj) def sayPhrase(self, obj, startOffset, endOffset): """Speaks the text of an Accessible object between the start and end offsets, unless the phrase is empty in which case it's ignored. Arguments: - obj: an Accessible object that implements the AccessibleText interface - startOffset: the start text offset. - endOffset: the end text offset. """ phrase = self.utilities.expandEOCs(obj, startOffset, endOffset) if not phrase: return if len(phrase) > 1 or phrase.isalnum(): if phrase.isupper(): voice = self.voices[settings.UPPERCASE_VOICE] else: voice = self.voices[settings.DEFAULT_VOICE] phrase = self.utilities.adjustForRepeats(phrase) speech.speak(phrase, voice) else: self.sayCharacter(obj) def sayWord(self, obj): """Speaks the word at the caret. Arguments: - obj: an Accessible object that implements the AccessibleText interface """ text = obj.queryText() offset = text.caretOffset lastKey, mods = self.utilities.lastKeyAndModifiers() lastWord = self._lastWord [word, startOffset, endOffset] = \ text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_WORD_START) if not word: self.sayCharacter(obj) return # Speak a newline if a control-right-arrow or control-left-arrow # was used to cross a line boundary. Handling is different for # the two keys since control-right-arrow places the cursor after # the last character in a word, but control-left-arrow places # the cursor at the beginning of a word. # if lastKey == "Right" and len(lastWord) > 0: lastChar = lastWord[len(lastWord) - 1] if lastChar == "\n" and lastWord != word: self.speakCharacter("\n") if lastKey == "Left" and len(word) > 0: lastChar = word[len(word) - 1] if lastChar == "\n" and lastWord != word: self.speakCharacter("\n") if self.utilities.linkIndex(obj, offset) >= 0: voice = self.voices[settings.HYPERLINK_VOICE] elif word.isupper(): voice = self.voices[settings.UPPERCASE_VOICE] else: voice = self.voices[settings.DEFAULT_VOICE] self.speakMisspelledIndicator(obj, startOffset) word = self.utilities.adjustForRepeats(word) self._lastWord = word speech.speak(word, voice) def presentObject(self, obj, offset=0): self.updateBraille(obj) utterances = self.speechGenerator.generateSpeech(obj) speech.speak(utterances, voice) def stopSpeechOnActiveDescendantChanged(self, event): """Whether or not speech should be stopped prior to setting the locusOfFocus in onActiveDescendantChanged. Arguments: - event: the Event Returns True if speech should be stopped; False otherwise. """ if not event.any_data: return True # In an object which manages its descendants, the # 'descendants' may really be a single object which changes # its name. If the name-change occurs followed by the active # descendant changing (to the same object) we won't present # the locusOfFocus because it hasn't changed. Thus we need to # be sure not to cut of the presentation of the name-change # event. if orca_state.locusOfFocus == event.any_data: names = self.pointOfReference.get('names', {}) oldName = names.get(hash(orca_state.locusOfFocus), '') if not oldName or event.any_data.name == oldName: return False if event.source == orca_state.locusOfFocus == event.any_data.parent: return False return True def getFlatReviewContext(self): """Returns the flat review context, creating one if necessary.""" if not self.flatReviewContext: self.flatReviewContext = self.flatReviewContextClass(self) self.justEnteredFlatReviewMode = True # Remember where the cursor currently was # when the user was in focus tracking mode. We'll try to # keep the position the same as we move to characters above # and below us. # self.targetCursorCell = self.getBrailleCursorCell() return self.flatReviewContext def updateBrailleReview(self, targetCursorCell=0): """Obtains the braille regions for the current flat review line and displays them on the braille display. If the targetCursorCell is non-0, then an attempt will be made to postion the review cursor at that cell. Otherwise, we will pan in display-sized increments to show the review cursor.""" if not _settingsManager.getSetting('enableBraille') \ and not _settingsManager.getSetting('enableBrailleMonitor'): debug.println(debug.LEVEL_INFO, "BRAILLE: update review disabled") return context = self.getFlatReviewContext() [regions, regionWithFocus] = context.getCurrentBrailleRegions() if not regions: regions = [] regionWithFocus = None line = self.getNewBrailleLine() self.addBrailleRegionsToLine(regions, line) braille.setLines([line]) self.setBrailleFocus(regionWithFocus, False) if regionWithFocus: self.panBrailleToOffset(regionWithFocus.brailleOffset \ + regionWithFocus.cursorOffset) if self.justEnteredFlatReviewMode: self.refreshBraille(True, self.targetCursorCell) self.justEnteredFlatReviewMode = False else: self.refreshBraille(True, targetCursorCell) def _setFlatReviewContextToBeginningOfBrailleDisplay(self): """Sets the character of interest to be the first character showing at the beginning of the braille display.""" context = self.getFlatReviewContext() [regions, regionWithFocus] = context.getCurrentBrailleRegions() for region in regions: if ((region.brailleOffset + len(region.string)) \ > braille.viewport[0]) \ and (isinstance(region, braille.ReviewText) \ or isinstance(region, braille.ReviewComponent)): position = max(region.brailleOffset, braille.viewport[0]) offset = position - region.brailleOffset self.targetCursorCell = region.brailleOffset \ - braille.viewport[0] [word, charOffset] = region.zone.getWordAtOffset(offset) if word: self.flatReviewContext.setCurrent( word.zone.line.index, word.zone.index, word.index, charOffset) else: self.flatReviewContext.setCurrent( region.zone.line.index, region.zone.index, 0, # word index 0) # character index break def find(self, query=None): """Searches for the specified query. If no query is specified, it searches for the query specified in the Orca Find dialog. Arguments: - query: The search query to find. """ if not query: query = find.getLastQuery() if query: context = self.getFlatReviewContext() location = query.findQuery(context, self.justEnteredFlatReviewMode) if not location: self.presentMessage(messages.STRING_NOT_FOUND) else: context.setCurrent(location.lineIndex, location.zoneIndex, \ location.wordIndex, location.charIndex) self.reviewCurrentItem(None) self.targetCursorCell = self.getBrailleCursorCell() def getUnicodeCurrencySymbols(self): """Return a list of the unicode currency symbols, populating the list if this is the first time that this routine has been called. Returns a list of unicode currency symbols. """ if not self._unicodeCurrencySymbols: self._unicodeCurrencySymbols = [ \ '\u0024', # dollar sign '\u00A2', # cent sign '\u00A3', # pound sign '\u00A4', # currency sign '\u00A5', # yen sign '\u0192', # latin small letter f with hook '\u060B', # afghani sign '\u09F2', # bengali rupee mark '\u09F3', # bengali rupee sign '\u0AF1', # gujarati rupee sign '\u0BF9', # tamil rupee sign '\u0E3F', # thai currency symbol baht '\u17DB', # khmer currency symbol riel '\u2133', # script capital m '\u5143', # cjk unified ideograph-5143 '\u5186', # cjk unified ideograph-5186 '\u5706', # cjk unified ideograph-5706 '\u5713', # cjk unified ideograph-5713 '\uFDFC', # rial sign ] # Add 20A0 (EURO-CURRENCY SIGN) to 20B5 (CEDI SIGN) # for ordChar in range(ord('\u20A0'), ord('\u20B5') + 1): self._unicodeCurrencySymbols.append(chr(ordChar)) return self._unicodeCurrencySymbols def speakMisspeltWord(self, allTokens, badWord): """Called by various spell checking routine to speak the misspelt word, plus the context that it is being used in. Arguments: - allTokens: a list of all the words. - badWord: the misspelt word. """ # Create an utterance to speak consisting of the misspelt # word plus the context where it is used (upto five words # to either side of it). # for i in range(0, len(allTokens)): if allTokens[i].startswith(badWord): minIndex = i - 5 if minIndex < 0: minIndex = 0 maxIndex = i + 5 if maxIndex > (len(allTokens) - 1): maxIndex = len(allTokens) - 1 utterances = [messages.MISSPELLED_WORD % badWord] contextPhrase = " ".join(allTokens[minIndex:maxIndex+1]) utterances.append(messages.MISSPELLED_WORD_CONTEXT % contextPhrase) # Turn the list of utterances into a string. text = " ".join(utterances) speech.speak(text) def textLines(self, obj, offset=None): """Creates a generator that can be used to iterate over each line of a text object, starting at the caret offset. Arguments: - obj: an Accessible that has a text specialization Returns an iterator that produces elements of the form: [SayAllContext, acss], where SayAllContext has the text to be spoken and acss is an ACSS instance for speaking the text. """ self._sayAllIsInterrupted = False try: text = obj.queryText() except: self._inSayAll = False self._sayAllContexts = [] return self._inSayAll = True length = text.characterCount if offset == None: offset = text.caretOffset # Determine the correct "say all by" mode to use. # sayAllStyle = _settingsManager.getSetting('sayAllStyle') if sayAllStyle == settings.SAYALL_STYLE_SENTENCE: mode = pyatspi.TEXT_BOUNDARY_SENTENCE_START elif sayAllStyle == settings.SAYALL_STYLE_LINE: mode = pyatspi.TEXT_BOUNDARY_LINE_START else: mode = pyatspi.TEXT_BOUNDARY_LINE_START # Get the next line of text to read # done = False while not done: lastEndOffset = -1 while offset < length: [lineString, startOffset, endOffset] = text.getTextAtOffset( offset, mode) # Some applications that don't support sentence boundaries # will provide the line boundary results instead; others # will return nothing. # if not lineString: mode = pyatspi.TEXT_BOUNDARY_LINE_START [lineString, startOffset, endOffset] = \ text.getTextAtOffset(offset, mode) # [[[WDW - HACK: well...gnome-terminal sometimes wants to # give us outrageous values back from getTextAtOffset # (see http://bugzilla.gnome.org/show_bug.cgi?id=343133), # so we try to handle it.]]] # if startOffset < 0: break # [[[WDW - HACK: this is here because getTextAtOffset # tends not to be implemented consistently across toolkits. # Sometimes it behaves properly (i.e., giving us an endOffset # that is the beginning of the next line), sometimes it # doesn't (e.g., giving us an endOffset that is the end of # the current line). So...we hack. The whole 'max' deal # is to account for lines that might be a brazillion lines # long.]]] # if endOffset == lastEndOffset: offset = max(offset + 1, lastEndOffset + 1) lastEndOffset = endOffset continue lastEndOffset = endOffset offset = endOffset lineString = \ self.utilities.adjustForLinks(obj, lineString, startOffset) lineString = self.utilities.adjustForRepeats(lineString) if lineString.isupper(): voice = settings.voices[settings.UPPERCASE_VOICE] else: voice = settings.voices[settings.DEFAULT_VOICE] context = speechserver.SayAllContext( obj, lineString, startOffset, endOffset) self._sayAllContexts.append(context) yield [context, voice] moreLines = False relations = obj.getRelationSet() for relation in relations: if relation.getRelationType() \ == pyatspi.RELATION_FLOWS_TO: obj = relation.getTarget(0) try: text = obj.queryText() except NotImplementedError: return length = text.characterCount offset = 0 moreLines = True break if not moreLines: done = True self._inSayAll = False self._sayAllContexts = [] def getTextLineAtCaret(self, obj, offset=None, startOffset=None, endOffset=None): """To-be-removed. Returns the string, caretOffset, startOffset.""" try: text = obj.queryText() except NotImplementedError: return ["", 0, 0] # The caret might be positioned at the very end of the text area. # In these cases, calling text.getTextAtOffset on an offset that's # not positioned to a character can yield unexpected results. In # particular, we'll see the Gecko toolkit return a start and end # offset of (0, 0), and we'll see other implementations, such as # gedit, return reasonable results (i.e., gedit will give us the # last line). # # In order to accommodate the differing behavior of different # AT-SPI implementations, we'll make sure we give getTextAtOffset # the offset of an actual character. Then, we'll do a little check # to see if that character is a newline - if it is, we'll treat it # as the line. # if text.caretOffset == text.characterCount: caretOffset = max(0, text.caretOffset - 1) character = text.getText(caretOffset, caretOffset + 1) else: caretOffset = text.caretOffset character = None if (text.caretOffset == text.characterCount) \ and (character == "\n"): lineString = "" startOffset = caretOffset else: # Get the line containing the caret. [[[TODO: HACK WDW - If # there's only 1 character in the string, well, we get it. We # do this because Gecko's implementation of getTextAtOffset # is broken if there is just one character in the string.]]] # if (text.characterCount == 1): lineString = text.getText(caretOffset, caretOffset + 1) startOffset = caretOffset else: if caretOffset == -1: caretOffset = text.characterCount try: [lineString, startOffset, endOffset] = text.getTextAtOffset( caretOffset, pyatspi.TEXT_BOUNDARY_LINE_START) except: return ["", 0, 0] # Sometimes we get the trailing line-feed-- remove it # It is important that these are in order. # In some circumstances we might get: # word word\r\n # so remove \n, and then remove \r. # See bgo#619332. # lineString = lineString.rstrip('\n') lineString = lineString.rstrip('\r') return [lineString, text.caretOffset, startOffset] def phoneticSpellCurrentItem(self, itemString): """Phonetically spell the current flat review word or line. Arguments: - itemString: the string to phonetically spell. """ for (charIndex, character) in enumerate(itemString): if character.isupper(): voice = settings.voices[settings.UPPERCASE_VOICE] character = character.lower() else: voice = settings.voices[settings.DEFAULT_VOICE] phoneticString = phonnames.getPhoneticName(character) speech.speak(phoneticString, voice) def _saveLastCursorPosition(self, obj, caretOffset): """Save away the current text cursor position for next time. Arguments: - obj: the current accessible - caretOffset: the cursor position within this object """ self.pointOfReference["lastCursorPosition"] = [obj, caretOffset] def _getCtrlShiftSelectionsStrings(self): return [messages.PARAGRAPH_SELECTED_DOWN, messages.PARAGRAPH_UNSELECTED_DOWN, messages.PARAGRAPH_SELECTED_UP, messages.PARAGRAPH_UNSELECTED_UP] def _speakTextSelectionState(self, nSelections): """Hacky method to speak special cases without any valid sanity checking. It is not long for this world. Do not call it.""" if _settingsManager.getSetting('onlySpeakDisplayedText'): return False eventStr, mods = self.utilities.lastKeyAndModifiers() isControlKey = mods & keybindings.CTRL_MODIFIER_MASK isShiftKey = mods & keybindings.SHIFT_MODIFIER_MASK selectedText = nSelections > 0 line = None if (eventStr == "Page_Down") and isShiftKey and isControlKey: line = messages.LINE_SELECTED_RIGHT elif (eventStr == "Page_Up") and isShiftKey and isControlKey: line = messages.LINE_SELECTED_LEFT elif (eventStr == "Page_Down") and isShiftKey and not isControlKey: if selectedText: line = messages.PAGE_SELECTED_DOWN else: line = messages.PAGE_UNSELECTED_DOWN elif (eventStr == "Page_Up") and isShiftKey and not isControlKey: if selectedText: line = messages.PAGE_SELECTED_UP else: line = messages.PAGE_UNSELECTED_UP elif (eventStr == "Down") and isShiftKey and isControlKey: strings = self._getCtrlShiftSelectionsStrings() if selectedText: line = strings[0] else: line = strings[1] elif (eventStr == "Up") and isShiftKey and isControlKey: strings = self._getCtrlShiftSelectionsStrings() if selectedText: line = strings[2] else: line = strings[3] elif (eventStr == "Home") and isShiftKey and isControlKey: if selectedText: line = messages.DOCUMENT_SELECTED_UP else: line = messages.DOCUMENT_UNSELECTED_UP elif (eventStr == "End") and isShiftKey and isControlKey: if selectedText: line = messages.DOCUMENT_SELECTED_DOWN else: line = messages.DOCUMENT_SELECTED_UP elif (eventStr == "A") and isControlKey and selectedText: line = messages.DOCUMENT_SELECTED_ALL if line: speech.speak(line, None, False) return True return False def systemBeep(self): """Rings the system bell. This is really a hack. Ideally, we want a method that will present an earcon (any sound designated for the purpose of representing an error, event etc) """ print("\a") def speakWordUnderMouse(self, acc): """Determine if the speak-word-under-mouse capability applies to the given accessible. Arguments: - acc: Accessible to test. Returns True if this accessible should provide the single word. """ return acc and acc.getState().contains(pyatspi.STATE_EDITABLE) def speakMisspelledIndicator(self, obj, offset): """Speaks an announcement indicating that a given word is misspelled. Arguments: - obj: An accessible which implements the accessible text interface. - offset: Offset in the accessible's text for which to retrieve the attributes. """ verbosity = _settingsManager.getSetting('speechVerbosityLevel') if verbosity == settings.VERBOSITY_LEVEL_VERBOSE: try: text = obj.queryText() except: return # If we're on whitespace, we cannot be on a misspelled word. # charAndOffsets = \ text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_CHAR) if not charAndOffsets[0].strip() \ or self.utilities.isWordDelimiter(charAndOffsets[0]): self._lastWordCheckedForSpelling = charAndOffsets[0] return wordAndOffsets = \ text.getTextAtOffset(offset, pyatspi.TEXT_BOUNDARY_WORD_START) if self.utilities.isWordMisspelled(obj, offset) \ and wordAndOffsets[0] != self._lastWordCheckedForSpelling: self.speakMessage(messages.MISSPELLED) # Store this word so that we do not continue to present the # presence of the red squiggly as the user arrows amongst # the characters. # self._lastWordCheckedForSpelling = wordAndOffsets[0] ############################################################################ # # # Presentation methods # # (scripts should not call methods in braille.py or speech.py directly) # # # ############################################################################ def presentationInterrupt(self): """Convenience method to interrupt presentation of whatever is being presented at the moment.""" speech.stop() braille.killFlash() def presentKeyboardEvent(self, event): """Convenience method to present the KeyboardEvent event. Returns True if we fully present the event; False otherwise.""" if not event.isPressedKey(): self._sayAllIsInterrupted = False if not orca_state.learnModeEnabled: if event.shouldEcho == False or event.isOrcaModified(): return False try: role = orca_state.locusOfFocus.getRole() except: return False if role == pyatspi.ROLE_PASSWORD_TEXT: return False # Worst. Hack. EVER. We have no reliable way of knowing a password is # being entered into a terminal -- other than the fact that the text # typed ain't there. As a result, we have to do special things when # not in special modes. :( See bgo 668025. if role == pyatspi.ROLE_TERMINAL: if not event.isPressedKey(): try: text = orca_state.locusOfFocus.queryText() o = text.caretOffset string = text.getText(o-1, o) except: pass else: if not event.event_string in [string, 'space']: return False elif not (orca_state.learnModeEnabled or event.isLockingKey()): return False elif not event.isPressedKey(): return False
codeparrot/github-code-clean
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cloud Controller: Implementation of EC2 REST API calls, which are dispatched to other nodes via AMQP RPC. State is via distributed datastore. """ import base64 import time from oslo.config import cfg from nova.api.ec2 import ec2utils from nova.api.ec2 import inst_state from nova.api.metadata import password from nova.api import validator from nova import availability_zones from nova import block_device from nova.cloudpipe import pipelib from nova import compute from nova.compute import api as compute_api from nova.compute import flavors from nova.compute import vm_states from nova import db from nova import exception from nova.image import s3 from nova import network from nova.network.security_group import quantum_driver from nova.objects import instance as instance_obj from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova import quota from nova import servicegroup from nova import utils from nova import volume ec2_opts = [ cfg.StrOpt('ec2_host', default='$my_ip', help='the ip of the ec2 api server'), cfg.StrOpt('ec2_dmz_host', default='$my_ip', help='the internal ip of the ec2 api server'), cfg.IntOpt('ec2_port', default=8773, help='the port of the ec2 api server'), cfg.StrOpt('ec2_scheme', default='http', help='the protocol to use when connecting to the ec2 api ' 'server (http, https)'), cfg.StrOpt('ec2_path', default='/services/Cloud', help='the path prefix used to call the ec2 api server'), cfg.ListOpt('region_list', default=[], help='list of region=fqdn pairs separated by commas'), ] CONF = cfg.CONF CONF.register_opts(ec2_opts) CONF.import_opt('my_ip', 'nova.netconf') CONF.import_opt('vpn_key_suffix', 'nova.cloudpipe.pipelib') CONF.import_opt('internal_service_availability_zone', 'nova.availability_zones') LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS def validate_ec2_id(val): if not validator.validate_str()(val): raise exception.InvalidInstanceIDMalformed(val=val) try: ec2utils.ec2_id_to_id(val) except exception.InvalidEc2Id: raise exception.InvalidInstanceIDMalformed(val=val) # EC2 API can return the following values as documented in the EC2 API # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ # ApiReference-ItemType-InstanceStateType.html # pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 | # stopped 80 _STATE_DESCRIPTION_MAP = { None: inst_state.PENDING, vm_states.ACTIVE: inst_state.RUNNING, vm_states.BUILDING: inst_state.PENDING, vm_states.DELETED: inst_state.TERMINATED, vm_states.SOFT_DELETED: inst_state.TERMINATED, vm_states.STOPPED: inst_state.STOPPED, vm_states.PAUSED: inst_state.PAUSE, vm_states.SUSPENDED: inst_state.SUSPEND, vm_states.RESCUED: inst_state.RESCUE, vm_states.RESIZED: inst_state.RESIZE, } def _state_description(vm_state, _shutdown_terminate): """Map the vm state to the server status string.""" # Note(maoy): We do not provide EC2 compatibility # in shutdown_terminate flag behavior. So we ignore # it here. name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) return {'code': inst_state.name_to_code(name), 'name': name} def _parse_block_device_mapping(bdm): """Parse BlockDeviceMappingItemType into flat hash BlockDevicedMapping.<N>.DeviceName BlockDevicedMapping.<N>.Ebs.SnapshotId BlockDevicedMapping.<N>.Ebs.VolumeSize BlockDevicedMapping.<N>.Ebs.DeleteOnTermination BlockDevicedMapping.<N>.Ebs.NoDevice BlockDevicedMapping.<N>.VirtualName => remove .Ebs and allow volume id in SnapshotId """ ebs = bdm.pop('ebs', None) if ebs: ec2_id = ebs.pop('snapshot_id', None) if ec2_id: if ec2_id.startswith('snap-'): bdm['snapshot_id'] = ec2utils.ec2_snap_id_to_uuid(ec2_id) elif ec2_id.startswith('vol-'): bdm['volume_id'] = ec2utils.ec2_vol_id_to_uuid(ec2_id) ebs.setdefault('delete_on_termination', True) bdm.update(ebs) return bdm def _properties_get_mappings(properties): return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): """Construct BlockDeviceMappingItemType {'device_name': '...', 'snapshot_id': , ...} => BlockDeviceMappingItemType """ keys = (('deviceName', 'device_name'), ('virtualName', 'virtual_name')) item = {} for name, k in keys: if k in bdm: item[name] = bdm[k] if bdm.get('no_device'): item['noDevice'] = True if ('snapshot_id' in bdm) or ('volume_id' in bdm): ebs_keys = (('snapshotId', 'snapshot_id'), ('snapshotId', 'volume_id'), # snapshotId is abused ('volumeSize', 'volume_size'), ('deleteOnTermination', 'delete_on_termination')) ebs = {} for name, k in ebs_keys: if k in bdm: if k == 'snapshot_id': ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) elif k == 'volume_id': ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) else: ebs[name] = bdm[k] assert 'snapshotId' in ebs item['ebs'] = ebs return item def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType.""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] # NOTE(yamahata): overwrite mappings with block_device_mapping for bdm in block_device_mapping: for i in range(len(mappings)): if bdm['deviceName'] == mappings[i]['deviceName']: del mappings[i] break mappings.append(bdm) # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] if mappings: result['blockDeviceMapping'] = mappings def db_to_inst_obj(context, db_instance): # NOTE(danms): This is a temporary helper method for converting # Instance DB objects to NovaObjects without needing to re-query. inst_obj = instance_obj.Instance._from_db_object( instance_obj.Instance(), db_instance, expected_attrs=['system_metadata', 'metadata']) inst_obj._context = context return inst_obj class CloudController(object): """CloudController provides the critical dispatch between inbound API calls through the endpoint and messages sent to the other nodes. """ def __init__(self): self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() self.security_group_api = get_cloud_security_group_api() self.compute_api = compute.API(network_api=self.network_api, volume_api=self.volume_api, security_group_api=self.security_group_api) self.keypair_api = compute_api.KeypairAPI() self.servicegroup_api = servicegroup.API() def __str__(self): return 'CloudController' def _enforce_valid_instance_ids(self, context, instance_ids): # NOTE(mikal): Amazon's implementation of the EC2 API requires that # _all_ instance ids passed in be valid. instances = {} if instance_ids: for ec2_id in instance_ids: instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id) instance = self.compute_api.get(context, instance_uuid) instances[ec2_id] = instance return instances def _get_image_state(self, image): # NOTE(vish): fallback status if image_state isn't set state = image.get('status') if state == 'active': state = 'available' return image['properties'].get('image_state', state) def describe_availability_zones(self, context, **kwargs): if ('zone_name' in kwargs and 'verbose' in kwargs['zone_name'] and context.is_admin): return self._describe_availability_zones_verbose(context, **kwargs) else: return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() available_zones, not_available_zones = \ availability_zones.get_availability_zones(ctxt) result = [] for zone in available_zones: # Hide internal_service_availability_zone if zone == CONF.internal_service_availability_zone: continue result.append({'zoneName': zone, 'zoneState': "available"}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': "not available"}) return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): ctxt = context.elevated() available_zones, not_available_zones = \ availability_zones.get_availability_zones(ctxt) # Available services enabled_services = db.service_get_all(context, False) enabled_services = availability_zones.set_availability_zones(context, enabled_services) zone_hosts = {} host_services = {} for service in enabled_services: zone_hosts.setdefault(service['availability_zone'], []) if service['host'] not in zone_hosts[service['availability_zone']]: zone_hosts[service['availability_zone']].append( service['host']) host_services.setdefault(service['availability_zone'] + service['host'], []) host_services[service['availability_zone'] + service['host']].\ append(service) result = [] for zone in available_zones: result.append({'zoneName': zone, 'zoneState': "available"}) for host in zone_hosts[zone]: result.append({'zoneName': '|- %s' % host, 'zoneState': ''}) for service in host_services[zone + host]: alive = self.servicegroup_api.service_is_up(service) art = (alive and ":-)") or "XXX" active = 'enabled' if service['disabled']: active = 'disabled' result.append({'zoneName': '| |- %s' % service['binary'], 'zoneState': ('%s %s %s' % (active, art, service['updated_at']))}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': "not available"}) return {'availabilityZoneInfo': result} def describe_regions(self, context, region_name=None, **kwargs): if CONF.region_list: regions = [] for region in CONF.region_list: name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (CONF.ec2_scheme, host, CONF.ec2_port, CONF.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (CONF.ec2_scheme, CONF.ec2_host, CONF.ec2_port, CONF.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, context, snapshot_id=None, owner=None, restorable_by=None, **kwargs): if snapshot_id: snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_snap_id_to_uuid(ec2_id) snapshot = self.volume_api.get_snapshot( context, snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) formatted_snapshots = [] for s in snapshots: formatted = self._format_snapshot(context, s) if formatted: formatted_snapshots.append(formatted) return {'snapshotSet': formatted_snapshots} def _format_snapshot(self, context, snapshot): # NOTE(mikal): this is just a set of strings in cinder. If they # implement an enum, then we should move this code to use it. The # valid ec2 statuses are "pending", "completed", and "error". status_map = {'new': 'pending', 'creating': 'pending', 'available': 'completed', 'active': 'completed', 'deleting': 'pending', 'deleted': None, 'error': 'error'} mapped_status = status_map.get(snapshot['status'], snapshot['status']) if not mapped_status: return None s = {} s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) s['status'] = mapped_status s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] s['ownerId'] = snapshot['project_id'] s['volumeSize'] = snapshot['volume_size'] s['description'] = snapshot['display_description'] return s def create_snapshot(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) args = (context, volume_id, kwargs.get('name'), kwargs.get('description')) if kwargs.get('force', False): snapshot = self.volume_api.create_snapshot_force(*args) else: snapshot = self.volume_api.create_snapshot(*args) db.ec2_snapshot_create(context, snapshot['id']) return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_snap_id_to_uuid(snapshot_id) self.volume_api.delete_snapshot(context, snapshot_id) return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = self.keypair_api.get_key_pairs(context, context.user_id) if key_name is not None: key_pairs = [x for x in key_pairs if x['name'] in key_name] #If looking for non existent key pair if key_name is not None and not key_pairs: msg = _('Could not find key pair(s): %s') % ','.join(key_name) raise exception.KeypairNotFound(msg, code="InvalidKeyPair.Duplicate") result = [] for key_pair in key_pairs: # filter out the vpn keys suffix = CONF.vpn_key_suffix if context.is_admin or not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], 'keyFingerprint': key_pair['fingerprint'], }) return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Create key pair %s"), key_name, context=context) try: keypair = self.keypair_api.create_key_pair(context, context.user_id, key_name) except exception.KeypairLimitExceeded: msg = _("Quota exceeded, too many key pairs.") raise exception.EC2APIError(msg, code='ResourceLimitExceeded') return {'keyName': key_name, 'keyFingerprint': keypair['fingerprint'], 'keyMaterial': keypair['private_key']} # TODO(vish): when context is no longer an object, pass it here def import_key_pair(self, context, key_name, public_key_material, **kwargs): LOG.audit(_("Import key %s"), key_name, context=context) public_key = base64.b64decode(public_key_material) try: keypair = self.keypair_api.import_key_pair(context, context.user_id, key_name, public_key) except exception.KeypairLimitExceeded: msg = _("Quota exceeded, too many key pairs.") raise exception.EC2APIError(msg) except exception.InvalidKeypair: msg = _("Keypair data is invalid") raise exception.EC2APIError(msg) return {'keyName': key_name, 'keyFingerprint': keypair['fingerprint']} def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: self.keypair_api.delete_key_pair(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass return True def describe_security_groups(self, context, group_name=None, group_id=None, **kwargs): search_opts = ec2utils.search_opts_from_filters(kwargs.get('filter')) raw_groups = self.security_group_api.list(context, group_name, group_id, context.project_id, search_opts=search_opts) groups = [self._format_security_group(context, g) for g in raw_groups] return {'securityGroupInfo': list(sorted(groups, key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} g['groupDescription'] = group['description'] g['groupName'] = group['name'] g['ownerId'] = group['project_id'] g['ipPermissions'] = [] for rule in group['rules']: r = {} r['groups'] = [] r['ipRanges'] = [] if rule['group_id']: if rule.get('grantee_group'): source_group = rule['grantee_group'] r['groups'] += [{'groupName': source_group['name'], 'userId': source_group['project_id']}] else: # rule is not always joined with grantee_group # for example when using quantum driver. source_group = self.security_group_api.get( context, id=rule['group_id']) r['groups'] += [{'groupName': source_group.get('name'), 'userId': source_group.get('project_id')}] if rule['protocol']: r['ipProtocol'] = rule['protocol'].lower() r['fromPort'] = rule['from_port'] r['toPort'] = rule['to_port'] g['ipPermissions'] += [dict(r)] else: for protocol, min_port, max_port in (('icmp', -1, -1), ('tcp', 1, 65535), ('udp', 1, 65535)): r['ipProtocol'] = protocol r['fromPort'] = min_port r['toPort'] = max_port g['ipPermissions'] += [dict(r)] else: r['ipProtocol'] = rule['protocol'] r['fromPort'] = rule['from_port'] r['toPort'] = rule['to_port'] r['ipRanges'] += [{'cidrIp': rule['cidr']}] g['ipPermissions'] += [r] return g def _rule_args_to_dict(self, context, kwargs): rules = [] if 'groups' not in kwargs and 'ip_ranges' not in kwargs: rule = self._rule_dict_last_step(context, **kwargs) if rule: rules.append(rule) return rules if 'ip_ranges' in kwargs: rules = self._cidr_args_split(kwargs) else: rules = [kwargs] finalset = [] for rule in rules: if 'groups' in rule: groups_values = self._groups_args_split(rule) for groups_value in groups_values: final = self._rule_dict_last_step(context, **groups_value) finalset.append(final) else: final = self._rule_dict_last_step(context, **rule) finalset.append(final) return finalset def _cidr_args_split(self, kwargs): cidr_args_split = [] cidrs = kwargs['ip_ranges'] for key, cidr in cidrs.iteritems(): mykwargs = kwargs.copy() del mykwargs['ip_ranges'] mykwargs['cidr_ip'] = cidr['cidr_ip'] cidr_args_split.append(mykwargs) return cidr_args_split def _groups_args_split(self, kwargs): groups_args_split = [] groups = kwargs['groups'] for key, group in groups.iteritems(): mykwargs = kwargs.copy() del mykwargs['groups'] if 'group_name' in group: mykwargs['source_security_group_name'] = group['group_name'] if 'user_id' in group: mykwargs['source_security_group_owner_id'] = group['user_id'] if 'group_id' in group: mykwargs['source_security_group_id'] = group['group_id'] groups_args_split.append(mykwargs) return groups_args_split def _rule_dict_last_step(self, context, to_port=None, from_port=None, ip_protocol=None, cidr_ip=None, user_id=None, source_security_group_name=None, source_security_group_owner_id=None): if source_security_group_name: source_project_id = self._get_source_project_id(context, source_security_group_owner_id) source_security_group = db.security_group_get_by_name( context.elevated(), source_project_id, source_security_group_name) notfound = exception.SecurityGroupNotFound if not source_security_group: raise notfound(security_group_id=source_security_group_name) group_id = source_security_group['id'] return self.security_group_api.new_group_ingress_rule( group_id, ip_protocol, from_port, to_port) else: cidr = self.security_group_api.parse_cidr(cidr_ip) return self.security_group_api.new_cidr_ingress_rule( cidr, ip_protocol, from_port, to_port) def _validate_group_identifier(self, group_name, group_id): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) def _validate_rulevalues(self, rulesvalues): if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) def _validate_security_group_protocol(self, values): validprotocols = ['tcp', 'udp', 'icmp', '6', '17', '1'] if 'ip_protocol' in values and \ values['ip_protocol'] not in validprotocols: err = _('Invalid IP protocol %s.') % values['ip_protocol'] raise exception.EC2APIError(message=err, code="400") def revoke_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): self._validate_group_identifier(group_name, group_id) security_group = self.security_group_api.get(context, group_name, group_id) prevalues = kwargs.get('ip_permissions', [kwargs]) rule_ids = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) self._validate_rulevalues(rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group['id'] rule_ids.append(self.security_group_api.rule_exists( security_group, values_for_rule)) rule_ids = [id for id in rule_ids if id] if rule_ids: self.security_group_api.remove_rules(context, security_group, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): self._validate_group_identifier(group_name, group_id) security_group = self.security_group_api.get(context, group_name, group_id) prevalues = kwargs.get('ip_permissions', [kwargs]) postvalues = [] for values in prevalues: self._validate_security_group_protocol(values) rulesvalues = self._rule_args_to_dict(context, values) self._validate_rulevalues(rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group['id'] if self.security_group_api.rule_exists(security_group, values_for_rule): err = _('%s - This rule already exists in group') raise exception.EC2APIError(err % values_for_rule) postvalues.append(values_for_rule) if postvalues: self.security_group_api.add_rules(context, security_group['id'], security_group['name'], postvalues) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) def _get_source_project_id(self, context, source_security_group_owner_id): if source_security_group_owner_id: # Parse user:project for source group. source_parts = source_security_group_owner_id.split(':') # If no project name specified, assume it's same as user name. # Since we're looking up by project name, the user name is not # used here. It's only read for EC2 API compatibility. if len(source_parts) == 2: source_project_id = source_parts[1] else: source_project_id = source_parts[0] else: source_project_id = context.project_id return source_project_id def create_security_group(self, context, group_name, group_description): if isinstance(group_name, unicode): group_name = group_name.encode('utf-8') if CONF.ec2_strict_validation: # EC2 specification gives constraints for name and description: # Accepts alphanumeric characters, spaces, dashes, and underscores allowed = '^[a-zA-Z0-9_\- ]+$' self.security_group_api.validate_property(group_name, 'name', allowed) self.security_group_api.validate_property(group_description, 'description', allowed) else: # Amazon accepts more symbols. # So, allow POSIX [:print:] characters. allowed = r'^[\x20-\x7E]+$' self.security_group_api.validate_property(group_name, 'name', allowed) group_ref = self.security_group_api.create_security_group( context, group_name, group_description) return {'securityGroupSet': [self._format_security_group(context, group_ref)]} def delete_security_group(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) security_group = self.security_group_api.get(context, group_name, group_id) self.security_group_api.destroy(context, security_group) return True def get_password_data(self, context, instance_id, **kwargs): # instance_id may be passed in as a list of instances if isinstance(instance_id, list): ec2_id = instance_id[0] else: ec2_id = instance_id validate_ec2_id(ec2_id) instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id) instance = self.compute_api.get(context, instance_uuid) output = password.extract_password(instance) # NOTE(vish): this should be timestamp from the metadata fields # but it isn't important enough to implement properly now = timeutils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "passwordData": output} def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) # instance_id may be passed in as a list of instances if isinstance(instance_id, list): ec2_id = instance_id[0] else: ec2_id = instance_id validate_ec2_id(ec2_id) instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id) instance = self.compute_api.get(context, instance_uuid) output = self.compute_api.get_console_output(context, instance) now = timeutils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: validate_ec2_id(ec2_id) internal_id = ec2utils.ec2_vol_id_to_uuid(ec2_id) volume = self.volume_api.get(context, internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} def _format_volume(self, context, volume): valid_ec2_api_volume_status_map = { 'attaching': 'in-use', 'detaching': 'in-use'} instance_ec2_id = None if volume.get('instance_uuid', None): instance_uuid = volume['instance_uuid'] instance = db.instance_get_by_uuid(context.elevated(), instance_uuid) instance_ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid) v = {} v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) v['status'] = valid_ec2_api_volume_status_map.get(volume['status'], volume['status']) v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] v['createTime'] = volume['created_at'] if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': False, 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') is not None: v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) else: v['snapshotId'] = None return v def create_volume(self, context, **kwargs): snapshot_ec2id = kwargs.get('snapshot_id', None) if snapshot_ec2id is not None: snapshot_id = ec2utils.ec2_snap_id_to_uuid(kwargs['snapshot_id']) snapshot = self.volume_api.get_snapshot(context, snapshot_id) LOG.audit(_("Create volume from snapshot %s"), snapshot_ec2id, context=context) else: snapshot = None LOG.audit(_("Create volume of %s GB"), kwargs.get('size'), context=context) create_kwargs = dict(snapshot=snapshot, volume_type=kwargs.get('volume_type'), metadata=kwargs.get('metadata'), availability_zone=kwargs.get('availability_zone')) volume = self.volume_api.create(context, kwargs.get('size'), kwargs.get('name'), kwargs.get('description'), **create_kwargs) db.ec2_volume_create(context, volume['id']) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) try: self.volume_api.delete(context, volume_id) except exception.InvalidVolume: raise exception.EC2APIError(_('Delete Failed')) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): validate_ec2_id(instance_id) validate_ec2_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id) instance = self.compute_api.get(context, instance_uuid) LOG.audit(_('Attach volume %(volume_id)s to instance %(instance_id)s ' 'at %(device)s'), {'volume_id': volume_id, 'instance_id': instance_id, 'device': device}, context=context) try: self.compute_api.attach_volume(context, instance, volume_id, device) except exception.InvalidVolume: raise exception.EC2APIError(_('Attach Failed.')) volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_inst_id(instance_uuid), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def _get_instance_from_volume(self, context, volume): if volume['instance_uuid']: try: return db.instance_get_by_uuid(context, volume['instance_uuid']) except exception.InstanceNotFound: pass raise exception.VolumeUnattached(volume_id=volume['id']) def detach_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) instance = self._get_instance_from_volume(context, volume) try: self.compute_api.detach_volume(context, instance, volume) except exception.InvalidVolume: raise exception.EC2APIError(_('Detach Volume Failed.')) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_inst_id( volume['instance_uuid']), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def _format_kernel_id(self, context, instance_ref, result, key): kernel_uuid = instance_ref['kernel_id'] if kernel_uuid is None or kernel_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki') def _format_ramdisk_id(self, context, instance_ref, result, key): ramdisk_uuid = instance_ref['ramdisk_id'] if ramdisk_uuid is None or ramdisk_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid, 'ari') def describe_instance_attribute(self, context, instance_id, attribute, **kwargs): def _unsupported_attribute(instance, result): raise exception.EC2APIError(_('attribute not supported: %s') % attribute) def _format_attr_block_device_mapping(instance, result): tmp = {} self._format_instance_root_device_name(instance, tmp) self._format_instance_bdm(context, instance['uuid'], tmp['rootDeviceName'], result) def _format_attr_disable_api_termination(instance, result): result['disableApiTermination'] = instance['disable_terminate'] def _format_attr_group_set(instance, result): CloudController._format_group_set(instance, result) def _format_attr_instance_initiated_shutdown_behavior(instance, result): if instance['shutdown_terminate']: result['instanceInitiatedShutdownBehavior'] = 'terminate' else: result['instanceInitiatedShutdownBehavior'] = 'stop' def _format_attr_instance_type(instance, result): self._format_instance_type(instance, result) def _format_attr_kernel(instance, result): self._format_kernel_id(context, instance, result, 'kernel') def _format_attr_ramdisk(instance, result): self._format_ramdisk_id(context, instance, result, 'ramdisk') def _format_attr_root_device_name(instance, result): self._format_instance_root_device_name(instance, result) def _format_attr_source_dest_check(instance, result): _unsupported_attribute(instance, result) def _format_attr_user_data(instance, result): result['userData'] = base64.b64decode(instance['user_data']) attribute_formatter = { 'blockDeviceMapping': _format_attr_block_device_mapping, 'disableApiTermination': _format_attr_disable_api_termination, 'groupSet': _format_attr_group_set, 'instanceInitiatedShutdownBehavior': _format_attr_instance_initiated_shutdown_behavior, 'instanceType': _format_attr_instance_type, 'kernel': _format_attr_kernel, 'ramdisk': _format_attr_ramdisk, 'rootDeviceName': _format_attr_root_device_name, 'sourceDestCheck': _format_attr_source_dest_check, 'userData': _format_attr_user_data, } fn = attribute_formatter.get(attribute) if fn is None: raise exception.EC2APIError( _('attribute not supported: %s') % attribute) validate_ec2_id(instance_id) instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id) instance = self.compute_api.get(context, instance_uuid) result = {'instance_id': instance_id} fn(instance, result) return result def describe_instances(self, context, **kwargs): # Optional DescribeInstances argument instance_id = kwargs.get('instance_id', None) filters = kwargs.get('filter', None) instances = self._enforce_valid_instance_ids(context, instance_id) return self._format_describe_instances(context, instance_id=instance_id, instance_cache=instances, filter=filters) def describe_instances_v6(self, context, **kwargs): # Optional DescribeInstancesV6 argument instance_id = kwargs.get('instance_id', None) filters = kwargs.get('filter', None) instances = self._enforce_valid_instance_ids(context, instance_id) return self._format_describe_instances(context, instance_id=instance_id, instance_cache=instances, filter=filters, use_v6=True) def _format_describe_instances(self, context, **kwargs): return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] def _format_terminate_instances(self, context, instance_id, previous_states): instances_set = [] for (ec2_id, previous_state) in zip(instance_id, previous_states): i = {} i['instanceId'] = ec2_id i['previousState'] = _state_description(previous_state['vm_state'], previous_state['shutdown_terminate']) try: instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id) instance = self.compute_api.get(context, instance_uuid) i['currentState'] = _state_description(instance['vm_state'], instance['shutdown_terminate']) except exception.NotFound: i['currentState'] = _state_description(vm_states.DELETED, True) instances_set.append(i) return {'instancesSet': instances_set} def _format_instance_bdm(self, context, instance_uuid, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType.""" root_device_type = 'instance-store' mapping = [] for bdm in block_device.legacy_mapping( db.block_device_mapping_get_all_by_instance(context, instance_uuid)): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue if (bdm['device_name'] == root_device_name and (bdm['snapshot_id'] or bdm['volume_id'])): assert not bdm['virtual_name'] root_device_type = 'ebs' vol = self.volume_api.get(context, volume_id) LOG.debug(_("vol = %s\n"), vol) # TODO(yamahata): volume attach time ebs = {'volumeId': ec2utils.id_to_ec2_vol_id(volume_id), 'deleteOnTermination': bdm['delete_on_termination'], 'attachTime': vol['attach_time'] or '', 'status': vol['attach_status'], } res = {'deviceName': bdm['device_name'], 'ebs': ebs, } mapping.append(res) if mapping: result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type @staticmethod def _format_instance_root_device_name(instance, result): result['rootDeviceName'] = (instance.get('root_device_name') or block_device.DEFAULT_ROOT_DEV_NAME) @staticmethod def _format_instance_type(instance, result): instance_type = flavors.extract_flavor(instance) result['instanceType'] = instance_type['name'] @staticmethod def _format_group_set(instance, result): security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) result['groupSet'] = utils.convert_to_list_dict( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, instances_cache=None, **search_opts): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} if not instances_cache: instances_cache = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: if ec2_id in instances_cache: instances.append(instances_cache[ec2_id]) else: try: instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id) instance = self.compute_api.get(context, instance_uuid) except exception.NotFound: continue instances.append(instance) else: try: # always filter out deleted instances search_opts['deleted'] = False instances = self.compute_api.get_all(context, search_opts=search_opts, sort_dir='asc') except exception.NotFound: instances = [] for instance in instances: if not context.is_admin: if pipelib.is_vpn_image(instance['image_ref']): continue i = {} instance_uuid = instance['uuid'] ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid) i['instanceId'] = ec2_id image_uuid = instance['image_ref'] i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid) self._format_kernel_id(context, instance, i, 'kernelId') self._format_ramdisk_id(context, instance, i, 'ramdiskId') i['instanceState'] = _state_description( instance['vm_state'], instance['shutdown_terminate']) fixed_ip = None floating_ip = None ip_info = ec2utils.get_ip_info_for_instance(context, instance) if ip_info['fixed_ips']: fixed_ip = ip_info['fixed_ips'][0] if ip_info['floating_ips']: floating_ip = ip_info['floating_ips'][0] if ip_info['fixed_ip6s']: i['dnsNameV6'] = ip_info['fixed_ip6s'][0] if CONF.ec2_private_dns_show_ip: i['privateDnsName'] = fixed_ip else: i['privateDnsName'] = instance['hostname'] i['privateIpAddress'] = fixed_ip i['publicDnsName'] = floating_ip i['ipAddress'] = floating_ip or fixed_ip i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] i['tagSet'] = [] for k, v in self.compute_api.get_instance_metadata( context, instance).iteritems(): i['tagSet'].append({'key': k, 'value': v}) if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = utils.convert_to_list_dict([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance['uuid'], i['rootDeviceName'], i) host = instance['host'] zone = ec2utils.get_availability_zone_by_host(host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values()) def describe_addresses(self, context, public_ip=None, **kwargs): if public_ip: floatings = [] for address in public_ip: floating = self.network_api.get_floating_ip_by_address(context, address) floatings.append(floating) else: floatings = self.network_api.get_floating_ips_by_project(context) addresses = [self._format_address(context, f) for f in floatings] return {'addressesSet': addresses} def _format_address(self, context, floating_ip): ec2_id = None if floating_ip['fixed_ip_id']: fixed_id = floating_ip['fixed_ip_id'] fixed = self.network_api.get_fixed_ip(context, fixed_id) if fixed['instance_uuid'] is not None: ec2_id = ec2utils.id_to_ec2_inst_id(fixed['instance_uuid']) address = {'public_ip': floating_ip['address'], 'instance_id': ec2_id} if context.is_admin: details = "%s (%s)" % (address['instance_id'], floating_ip['project_id']) address['instance_id'] = details return address def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) try: public_ip = self.network_api.allocate_floating_ip(context) except exception.FloatingIpLimitExceeded: raise exception.EC2APIError(_('No more floating IPs available')) return {'publicIp': public_ip} def release_address(self, context, public_ip, **kwargs): LOG.audit(_('Release address %s'), public_ip, context=context) try: self.network_api.release_floating_ip(context, address=public_ip) return {'return': "true"} except exception.FloatingIpNotFound: raise exception.EC2APIError(_('Unable to release IP Address.')) def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to instance " "%(instance_id)s"), {'public_ip': public_ip, 'instance_id': instance_id}, context=context) instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id) instance = self.compute_api.get(context, instance_uuid) cached_ipinfo = ec2utils.get_ip_info_for_instance(context, instance) fixed_ips = cached_ipinfo['fixed_ips'] + cached_ipinfo['fixed_ip6s'] if not fixed_ips: msg = _('Unable to associate IP Address, no fixed_ips.') raise exception.EC2APIError(msg) # TODO(tr3buchet): this will associate the floating IP with the # first fixed_ip an instance has. This should be # changed to support specifying a particular fixed_ip if # multiple exist but this may not apply to ec2.. if len(fixed_ips) > 1: msg = _('multiple fixed_ips exist, using the first: %s') LOG.warning(msg, fixed_ips[0]) try: self.network_api.associate_floating_ip(context, instance, floating_address=public_ip, fixed_address=fixed_ips[0]) return {'return': 'true'} except exception.FloatingIpAssociated: msg = _('Floating ip is already associated.') raise exception.EC2APIError(msg) except exception.NoFloatingIpInterface: msg = _('l3driver call to add floating ip failed.') raise exception.EC2APIError(msg) except Exception: msg = _('Error, unable to associate floating ip.') LOG.exception(msg) raise exception.EC2APIError(msg) def disassociate_address(self, context, public_ip, **kwargs): instance_id = self.network_api.get_instance_id_by_floating_address( context, public_ip) instance = self.compute_api.get(context, instance_id) LOG.audit(_("Disassociate address %s"), public_ip, context=context) try: self.network_api.disassociate_floating_ip(context, instance, address=public_ip) except exception.FloatingIpNotAssociated: msg = _('Floating ip is not associated.') raise exception.EC2APIError(msg) except exception.CannotDisassociateAutoAssignedFloatingIP: msg = _('Cannot disassociate auto assigned floating ip') raise exception.EC2APIError(msg) return {'return': "true"} def run_instances(self, context, **kwargs): min_count = int(kwargs.get('min_count', 1)) if kwargs.get('kernel_id'): kernel = self._get_image(context, kwargs['kernel_id']) kwargs['kernel_id'] = ec2utils.id_to_glance_id(context, kernel['id']) if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context, ramdisk['id']) for bdm in kwargs.get('block_device_mapping', []): _parse_block_device_mapping(bdm) image = self._get_image(context, kwargs['image_id']) image_uuid = ec2utils.id_to_glance_id(context, image['id']) if image: image_state = self._get_image_state(image) else: raise exception.ImageNotFoundEC2(image_id=kwargs['image_id']) if image_state != 'available': raise exception.EC2APIError(_('Image must be available')) (instances, resv_id) = self.compute_api.create(context, instance_type=flavors.get_flavor_by_name( kwargs.get('instance_type', None)), image_href=image_uuid, max_count=int(kwargs.get('max_count', min_count)), min_count=min_count, kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( 'availability_zone'), block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, resv_id) def _ec2_ids_to_instances(self, context, instance_id, objects=False): """Get all instances first, to prevent partial executions.""" instances = [] extra = ['system_metadata', 'metadata'] for ec2_id in instance_id: validate_ec2_id(ec2_id) instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id) if objects: instance = instance_obj.Instance.get_by_uuid( context, instance_uuid, expected_attrs=extra) else: instance = self.compute_api.get(context, instance_uuid) instances.append(instance) return instances def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified. """ previous_states = self._ec2_ids_to_instances(context, instance_id) LOG.debug(_("Going to start terminating instances")) for instance in previous_states: self.compute_api.delete(context, instance) return self._format_terminate_instances(context, instance_id, previous_states) def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids.""" instances = self._ec2_ids_to_instances(context, instance_id) LOG.audit(_("Reboot instance %r"), instance_id, context=context) for instance in instances: self.compute_api.reboot(context, instance, 'HARD') return True def stop_instances(self, context, instance_id, **kwargs): """Stop each instances in instance_id. Here instance_id is a list of instance ids """ instances = self._ec2_ids_to_instances(context, instance_id, True) LOG.debug(_("Going to stop instances")) for instance in instances: self.compute_api.stop(context, instance) return True def start_instances(self, context, instance_id, **kwargs): """Start each instances in instance_id. Here instance_id is a list of instance ids """ instances = self._ec2_ids_to_instances(context, instance_id, True) LOG.debug(_("Going to start instances")) for instance in instances: self.compute_api.start(context, instance) return True def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) image = self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): filters = {'name': ec2_id} images = self.image_service.detail(context, filters=filters) try: return images[0] except IndexError: raise exception.ImageNotFound(image_id=ec2_id) image_type = ec2_id.split('-')[0] if ec2utils.image_type(image.get('container_format')) != image_type: raise exception.ImageNotFound(image_id=ec2_id) return image def _format_image(self, image): """Convert from format defined by GlanceImageService to S3 format.""" i = {} image_type = ec2utils.image_type(image.get('container_format')) ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari') i['imageOwnerId'] = image.get('owner') img_loc = image['properties'].get('image_location') if img_loc: i['imageLocation'] = img_loc else: i['imageLocation'] = "%s (%s)" % (img_loc, name) i['name'] = name if not name and img_loc: # This should only occur for images registered with ec2 api # prior to that api populating the glance name i['name'] = img_loc i['imageState'] = self._get_image_state(image) i['description'] = image.get('description') display_mapping = {'aki': 'kernel', 'ari': 'ramdisk', 'ami': 'machine'} i['imageType'] = display_mapping.get(image_type) i['isPublic'] = not not image.get('is_public') i['architecture'] = image['properties'].get('architecture') properties = image['properties'] root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (block_device.strip_dev(bdm.get('device_name')) == block_device.strip_dev(root_device_name) and ('snapshot_id' in bdm or 'volume_id' in bdm) and not bdm.get('no_device')): root_device_type = 'ebs' i['rootDeviceName'] = (root_device_name or block_device.DEFAULT_ROOT_DEV_NAME) i['rootDeviceType'] = root_device_type _format_mappings(properties, i) return i def describe_images(self, context, image_id=None, **kwargs): # NOTE: image_id is a list! if image_id: images = [] for ec2_id in image_id: try: image = self._get_image(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) images.append(image) else: images = self.image_service.detail(context) images = [self._format_image(i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): LOG.audit(_("De-registering image %s"), image_id, context=context) image = self._get_image(context, image_id) internal_id = image['id'] self.image_service.delete(context, internal_id) return True def _register_image(self, context, metadata): image = self.image_service.create(context, metadata) image_type = ec2utils.image_type(image.get('container_format')) image_id = ec2utils.image_ec2_id(image['id'], image_type) return image_id def register_image(self, context, image_location=None, **kwargs): if image_location is None and kwargs.get('name'): image_location = kwargs['name'] if image_location is None: raise exception.EC2APIError(_('imageLocation is required')) metadata = {'properties': {'image_location': image_location}} if kwargs.get('name'): metadata['name'] = kwargs['name'] else: metadata['name'] = image_location if 'root_device_name' in kwargs: metadata['properties']['root_device_name'] = kwargs.get( 'root_device_name') mappings = [_parse_block_device_mapping(bdm) for bdm in kwargs.get('block_device_mapping', [])] if mappings: metadata['properties']['block_device_mapping'] = mappings image_id = self._register_image(context, metadata) LOG.audit(_('Registered image %(image_location)s with id ' '%(image_id)s'), {'image_location': image_location, 'image_id': image_id}, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): def _block_device_mapping_attribute(image, result): _format_mappings(image['properties'], result) def _launch_permission_attribute(image, result): result['launchPermission'] = [] if image['is_public']: result['launchPermission'].append({'group': 'all'}) def _root_device_name_attribute(image, result): _prop_root_dev_name = block_device.properties_root_device_name result['rootDeviceName'] = _prop_root_dev_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME def _kernel_attribute(image, result): kernel_id = image['properties'].get('kernel_id') if kernel_id: result['kernel'] = { 'value': ec2utils.image_ec2_id(kernel_id, 'aki') } def _ramdisk_attribute(image, result): ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: result['ramdisk'] = { 'value': ec2utils.image_ec2_id(ramdisk_id, 'ari') } supported_attributes = { 'blockDeviceMapping': _block_device_mapping_attribute, 'launchPermission': _launch_permission_attribute, 'rootDeviceName': _root_device_name_attribute, 'kernel': _kernel_attribute, 'ramdisk': _ramdisk_attribute, } fn = supported_attributes.get(attribute) if fn is None: raise exception.EC2APIError(_('attribute not supported: %s') % attribute) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) result = {'imageId': image_id} fn(image, result) return result def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': raise exception.EC2APIError(_('attribute not supported: %s') % attribute) if 'user_group' not in kwargs: raise exception.EC2APIError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': raise exception.EC2APIError(_('only group "all" is supported')) if operation_type not in ['add', 'remove']: msg = _('operation_type must be add or remove') raise exception.EC2APIError(msg) LOG.audit(_("Updating image %s publicity"), image_id, context=context) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) internal_id = image['id'] del(image['id']) image['is_public'] = (operation_type == 'add') try: return self.image_service.update(context, internal_id, image) except exception.ImageNotAuthorized: msg = _('Not allowed to modify attributes for image %s') raise exception.EC2APIError(msg % image_id) def update_image(self, context, image_id, **kwargs): internal_id = ec2utils.ec2_id_to_id(image_id) result = self.image_service.update(context, internal_id, dict(kwargs)) return result # TODO(yamahata): race condition # At the moment there is no way to prevent others from # manipulating instances/volumes/snapshots. # As other code doesn't take it into consideration, here we don't # care of it for now. Ostrich algorithm def create_image(self, context, instance_id, **kwargs): # NOTE(yamahata): name/description are ignored by register_image(), # do so here no_reboot = kwargs.get('no_reboot', False) name = kwargs.get('name') validate_ec2_id(instance_id) ec2_instance_id = instance_id instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_instance_id) instance = self.compute_api.get(context, instance_uuid) bdms = self.compute_api.get_instance_bdms(context, instance) # CreateImage only supported for the analogue of EBS-backed instances if not self.compute_api.is_volume_backed_instance(context, instance, bdms): msg = _("Invalid value '%(ec2_instance_id)s' for instanceId. " "Instance does not have a volume attached at root " "(%(root)s)") % {'root': instance['root_device_name'], 'ec2_instance_id': ec2_instance_id} raise exception.InvalidParameterValue(err=msg) # stop the instance if necessary restart_instance = False if not no_reboot: vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) if vm_state == vm_states.ACTIVE: restart_instance = True inst_obj = db_to_inst_obj(context, instance) self.compute_api.stop(context, inst_obj) # wait instance for really stopped start_time = time.time() while vm_state != vm_states.STOPPED: time.sleep(1) instance = self.compute_api.get(context, instance_uuid) vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 if time.time() > start_time + timeout: raise exception.EC2APIError( _('Couldn\'t stop instance with in %d sec') % timeout) glance_uuid = instance['image_ref'] ec2_image_id = ec2utils.glance_id_to_ec2_id(context, glance_uuid) src_image = self._get_image(context, ec2_image_id) image_meta = dict(src_image) def _unmap_id_property(properties, name): if properties[name]: properties[name] = ec2utils.id_to_glance_id(context, properties[name]) # ensure the ID properties are unmapped back to the glance UUID _unmap_id_property(image_meta['properties'], 'kernel_id') _unmap_id_property(image_meta['properties'], 'ramdisk_id') # meaningful image name name_map = dict(instance=instance['uuid'], now=timeutils.isotime()) name = name or _('image of %(instance)s at %(now)s') % name_map new_image = self.compute_api.snapshot_volume_backed(context, instance, image_meta, name) ec2_id = ec2utils.glance_id_to_ec2_id(context, new_image['id']) if restart_instance: inst_obj = db_to_inst_obj(context, instance) self.compute_api.start(context, inst_obj) return {'imageId': ec2_id} def create_tags(self, context, **kwargs): """Add tags to a resource Returns True on success, error on failure. :param context: context under which the method is called """ resources = kwargs.get('resource_id', None) tags = kwargs.get('tag', None) if resources is None or tags is None: raise exception.EC2APIError(_('resource_id and tag are required')) if not isinstance(resources, (tuple, list, set)): raise exception.EC2APIError(_('Expecting a list of resources')) for r in resources: if ec2utils.resource_type_from_id(context, r) != 'instance': raise exception.EC2APIError(_('Only instances implemented')) if not isinstance(tags, (tuple, list, set)): raise exception.EC2APIError(_('Expecting a list of tagSets')) metadata = {} for tag in tags: if not isinstance(tag, dict): raise exception.EC2APIError(_ ('Expecting tagSet to be key/value pairs')) key = tag.get('key', None) val = tag.get('value', None) if key is None or val is None: raise exception.EC2APIError(_ ('Expecting both key and value to be set')) metadata[key] = val for ec2_id in resources: instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id) instance = self.compute_api.get(context, instance_uuid) self.compute_api.update_instance_metadata(context, instance, metadata) return True def delete_tags(self, context, **kwargs): """Delete tags Returns True on success, error on failure. :param context: context under which the method is called """ resources = kwargs.get('resource_id', None) tags = kwargs.get('tag', None) if resources is None or tags is None: raise exception.EC2APIError(_('resource_id and tag are required')) if not isinstance(resources, (tuple, list, set)): raise exception.EC2APIError(_('Expecting a list of resources')) for r in resources: if ec2utils.resource_type_from_id(context, r) != 'instance': raise exception.EC2APIError(_('Only instances implemented')) if not isinstance(tags, (tuple, list, set)): raise exception.EC2APIError(_('Expecting a list of tagSets')) for ec2_id in resources: instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id) instance = self.compute_api.get(context, instance_uuid) for tag in tags: if not isinstance(tag, dict): raise exception.EC2APIError(_ ('Expecting tagSet to be key/value pairs')) key = tag.get('key', None) if key is None: raise exception.EC2APIError(_('Expecting key to be set')) self.compute_api.delete_instance_metadata(context, instance, key) return True def describe_tags(self, context, **kwargs): """List tags Returns a dict with a single key 'tagSet' on success, error on failure. :param context: context under which the method is called """ filters = kwargs.get('filter', None) search_filts = [] if filters: for filter_block in filters: key_name = filter_block.get('name', None) val = filter_block.get('value', None) if val: if isinstance(val, dict): val = val.values() if not isinstance(val, (tuple, list, set)): val = (val,) if key_name: search_block = {} if key_name == 'resource_id': search_block['resource_id'] = [] for res_id in val: search_block['resource_id'].append( ec2utils.ec2_inst_id_to_uuid(context, res_id)) elif key_name in ['key', 'value']: search_block[key_name] = val elif key_name == 'resource_type': for res_type in val: if res_type != 'instance': raise exception.EC2APIError(_ ('Only instances implemented')) search_block[key_name] = 'instance' if len(search_block.keys()) > 0: search_filts.append(search_block) ts = [] for tag in self.compute_api.get_all_instance_metadata(context, search_filts): ts.append({ 'resource_id': ec2utils.id_to_ec2_inst_id(tag['instance_id']), 'resource_type': 'instance', 'key': tag['key'], 'value': tag['value'] }) return {"tagSet": ts} class EC2SecurityGroupExceptions(object): @staticmethod def raise_invalid_property(msg): raise exception.InvalidParameterValue(err=msg) @staticmethod def raise_group_already_exists(msg): raise exception.EC2APIError(message=msg) @staticmethod def raise_invalid_group(msg): raise exception.InvalidGroup(reason=msg) @staticmethod def raise_invalid_cidr(cidr, decoding_exception=None): if decoding_exception: raise decoding_exception else: raise exception.EC2APIError(_("Invalid CIDR")) @staticmethod def raise_over_quota(msg): raise exception.EC2APIError(message=msg) @staticmethod def raise_not_found(msg): pass class CloudSecurityGroupNovaAPI(EC2SecurityGroupExceptions, compute_api.SecurityGroupAPI): pass class CloudSecurityGroupQuantumAPI(EC2SecurityGroupExceptions, quantum_driver.SecurityGroupAPI): pass def get_cloud_security_group_api(): if cfg.CONF.security_group_api.lower() == 'nova': return CloudSecurityGroupNovaAPI() elif cfg.CONF.security_group_api.lower() == 'quantum': return CloudSecurityGroupQuantumAPI() else: raise NotImplementedError()
codeparrot/github-code-clean
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Host Filters. """ import httplib from oslo.config import cfg import stubout from nova import context from nova import db from nova.openstack.common import jsonutils from nova.openstack.common import timeutils from nova.pci import pci_stats from nova.scheduler import filters from nova.scheduler.filters import extra_specs_ops from nova.scheduler.filters import trusted_filter from nova import servicegroup from nova import test from nova.tests.scheduler import fakes from nova import utils CONF = cfg.CONF CONF.import_opt('my_ip', 'nova.netconf') class TestFilter(filters.BaseHostFilter): pass class TestBogusFilter(object): """Class that doesn't inherit from BaseHostFilter.""" pass class ExtraSpecsOpsTestCase(test.NoDBTestCase): def _do_extra_specs_ops_test(self, value, req, matches): assertion = self.assertTrue if matches else self.assertFalse assertion(extra_specs_ops.match(value, req)) def test_extra_specs_matches_simple(self): self._do_extra_specs_ops_test( value='1', req='1', matches=True) def test_extra_specs_fails_simple(self): self._do_extra_specs_ops_test( value='', req='1', matches=False) def test_extra_specs_fails_simple2(self): self._do_extra_specs_ops_test( value='3', req='1', matches=False) def test_extra_specs_fails_simple3(self): self._do_extra_specs_ops_test( value='222', req='2', matches=False) def test_extra_specs_fails_with_bogus_ops(self): self._do_extra_specs_ops_test( value='4', req='> 2', matches=False) def test_extra_specs_matches_with_op_eq(self): self._do_extra_specs_ops_test( value='123', req='= 123', matches=True) def test_extra_specs_matches_with_op_eq2(self): self._do_extra_specs_ops_test( value='124', req='= 123', matches=True) def test_extra_specs_fails_with_op_eq(self): self._do_extra_specs_ops_test( value='34', req='= 234', matches=False) def test_extra_specs_fails_with_op_eq3(self): self._do_extra_specs_ops_test( value='34', req='=', matches=False) def test_extra_specs_matches_with_op_seq(self): self._do_extra_specs_ops_test( value='123', req='s== 123', matches=True) def test_extra_specs_fails_with_op_seq(self): self._do_extra_specs_ops_test( value='1234', req='s== 123', matches=False) def test_extra_specs_matches_with_op_sneq(self): self._do_extra_specs_ops_test( value='1234', req='s!= 123', matches=True) def test_extra_specs_fails_with_op_sneq(self): self._do_extra_specs_ops_test( value='123', req='s!= 123', matches=False) def test_extra_specs_fails_with_op_sge(self): self._do_extra_specs_ops_test( value='1000', req='s>= 234', matches=False) def test_extra_specs_fails_with_op_sle(self): self._do_extra_specs_ops_test( value='1234', req='s<= 1000', matches=False) def test_extra_specs_fails_with_op_sl(self): self._do_extra_specs_ops_test( value='2', req='s< 12', matches=False) def test_extra_specs_fails_with_op_sg(self): self._do_extra_specs_ops_test( value='12', req='s> 2', matches=False) def test_extra_specs_matches_with_op_in(self): self._do_extra_specs_ops_test( value='12311321', req='<in> 11', matches=True) def test_extra_specs_matches_with_op_in2(self): self._do_extra_specs_ops_test( value='12311321', req='<in> 12311321', matches=True) def test_extra_specs_matches_with_op_in3(self): self._do_extra_specs_ops_test( value='12311321', req='<in> 12311321 <in>', matches=True) def test_extra_specs_fails_with_op_in(self): self._do_extra_specs_ops_test( value='12310321', req='<in> 11', matches=False) def test_extra_specs_fails_with_op_in2(self): self._do_extra_specs_ops_test( value='12310321', req='<in> 11 <in>', matches=False) def test_extra_specs_matches_with_op_or(self): self._do_extra_specs_ops_test( value='12', req='<or> 11 <or> 12', matches=True) def test_extra_specs_matches_with_op_or2(self): self._do_extra_specs_ops_test( value='12', req='<or> 11 <or> 12 <or>', matches=True) def test_extra_specs_fails_with_op_or(self): self._do_extra_specs_ops_test( value='13', req='<or> 11 <or> 12', matches=False) def test_extra_specs_fails_with_op_or2(self): self._do_extra_specs_ops_test( value='13', req='<or> 11 <or> 12 <or>', matches=False) def test_extra_specs_matches_with_op_le(self): self._do_extra_specs_ops_test( value='2', req='<= 10', matches=True) def test_extra_specs_fails_with_op_le(self): self._do_extra_specs_ops_test( value='3', req='<= 2', matches=False) def test_extra_specs_matches_with_op_ge(self): self._do_extra_specs_ops_test( value='3', req='>= 1', matches=True) def test_extra_specs_fails_with_op_ge(self): self._do_extra_specs_ops_test( value='2', req='>= 3', matches=False) class HostFiltersTestCase(test.NoDBTestCase): """Test case for host filters.""" # FIXME(sirp): These tests still require DB access until we can separate # the testing of the DB API code from the host-filter code. USES_DB = True def fake_oat_request(self, *args, **kwargs): """Stubs out the response from OAT service.""" self.oat_attested = True return httplib.OK, self.oat_data def setUp(self): super(HostFiltersTestCase, self).setUp() self.oat_data = '' self.oat_attested = False self.stubs = stubout.StubOutForTesting() self.stubs.Set(trusted_filter.AttestationService, '_request', self.fake_oat_request) self.context = context.RequestContext('fake', 'fake') self.json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024]]) filter_handler = filters.HostFilterHandler() classes = filter_handler.get_matching_classes( ['nova.scheduler.filters.all_filters']) self.class_map = {} for cls in classes: self.class_map[cls.__name__] = cls def test_all_filters(self): # Double check at least a couple of known filters exist self.assertIn('AllHostsFilter', self.class_map) self.assertIn('ComputeFilter', self.class_map) def test_all_host_filter(self): filt_cls = self.class_map['AllHostsFilter']() host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(filt_cls.host_passes(host, {})) def _stub_service_is_up(self, ret_value): def fake_service_is_up(self, service): return ret_value self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up) def test_affinity_different_filter_passes(self): filt_cls = self.class_map['DifferentHostFilter']() host = fakes.FakeHostState('host1', 'node1', {}) instance = fakes.FakeInstance(context=self.context, params={'host': 'host2'}) instance_uuid = instance.uuid filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [instance_uuid], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_affinity_different_filter_no_list_passes(self): filt_cls = self.class_map['DifferentHostFilter']() host = fakes.FakeHostState('host1', 'node1', {}) instance = fakes.FakeInstance(context=self.context, params={'host': 'host2'}) instance_uuid = instance.uuid filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': instance_uuid}} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_affinity_different_filter_fails(self): filt_cls = self.class_map['DifferentHostFilter']() host = fakes.FakeHostState('host1', 'node1', {}) instance = fakes.FakeInstance(context=self.context, params={'host': 'host1'}) instance_uuid = instance.uuid filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [instance_uuid], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_affinity_different_filter_handles_none(self): filt_cls = self.class_map['DifferentHostFilter']() host = fakes.FakeHostState('host1', 'node1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': None} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_affinity_different_filter_handles_deleted_instance(self): filt_cls = self.class_map['DifferentHostFilter']() host = fakes.FakeHostState('host1', 'node1', {}) instance = fakes.FakeInstance(context=self.context, params={'host': 'host1'}) instance_uuid = instance.uuid db.instance_destroy(self.context, instance_uuid) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [instance_uuid], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_affinity_same_filter_no_list_passes(self): filt_cls = self.class_map['SameHostFilter']() host = fakes.FakeHostState('host1', 'node1', {}) instance = fakes.FakeInstance(context=self.context, params={'host': 'host1'}) instance_uuid = instance.uuid filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': instance_uuid}} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_affinity_same_filter_passes(self): filt_cls = self.class_map['SameHostFilter']() host = fakes.FakeHostState('host1', 'node1', {}) instance = fakes.FakeInstance(context=self.context, params={'host': 'host1'}) instance_uuid = instance.uuid filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [instance_uuid], }} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_affinity_same_filter_fails(self): filt_cls = self.class_map['SameHostFilter']() host = fakes.FakeHostState('host1', 'node1', {}) instance = fakes.FakeInstance(context=self.context, params={'host': 'host2'}) instance_uuid = instance.uuid filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [instance_uuid], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_affinity_same_filter_handles_none(self): filt_cls = self.class_map['SameHostFilter']() host = fakes.FakeHostState('host1', 'node1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': None} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_affinity_same_filter_handles_deleted_instance(self): filt_cls = self.class_map['SameHostFilter']() host = fakes.FakeHostState('host1', 'node1', {}) instance = fakes.FakeInstance(context=self.context, params={'host': 'host1'}) instance_uuid = instance.uuid db.instance_destroy(self.context, instance_uuid) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [instance_uuid], }} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_affinity_simple_cidr_filter_passes(self): filt_cls = self.class_map['SimpleCIDRAffinityFilter']() host = fakes.FakeHostState('host1', 'node1', {}) host.host_ip = '10.8.1.1' affinity_ip = "10.8.1.100" filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'cidr': '/24', 'build_near_host_ip': affinity_ip}} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_affinity_simple_cidr_filter_fails(self): filt_cls = self.class_map['SimpleCIDRAffinityFilter']() host = fakes.FakeHostState('host1', 'node1', {}) host.host_ip = '10.8.1.1' affinity_ip = "10.8.1.100" filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'cidr': '/32', 'build_near_host_ip': affinity_ip}} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_affinity_simple_cidr_filter_handles_none(self): filt_cls = self.class_map['SimpleCIDRAffinityFilter']() host = fakes.FakeHostState('host1', 'node1', {}) affinity_ip = CONF.my_ip.split('.')[0:3] affinity_ip.append('100') affinity_ip = str.join('.', affinity_ip) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': None} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_compute_filter_passes(self): self._stub_service_is_up(True) filt_cls = self.class_map['ComputeFilter']() filter_properties = {'instance_type': {'memory_mb': 1024}} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_type_filter(self): self._stub_service_is_up(True) filt_cls = self.class_map['TypeAffinityFilter']() filter_properties = {'context': self.context, 'instance_type': {'id': 1}} filter2_properties = {'context': self.context, 'instance_type': {'id': 2}} service = {'disabled': False} host = fakes.FakeHostState('fake_host', 'fake_node', {'service': service}) #True since empty self.assertTrue(filt_cls.host_passes(host, filter_properties)) fakes.FakeInstance(context=self.context, params={'host': 'fake_host', 'instance_type_id': 1}) #True since same type self.assertTrue(filt_cls.host_passes(host, filter_properties)) #False since different type self.assertFalse(filt_cls.host_passes(host, filter2_properties)) #False since node not homogeneous fakes.FakeInstance(context=self.context, params={'host': 'fake_host', 'instance_type_id': 2}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_aggregate_type_filter(self): self._stub_service_is_up(True) filt_cls = self.class_map['AggregateTypeAffinityFilter']() filter_properties = {'context': self.context, 'instance_type': {'name': 'fake1'}} filter2_properties = {'context': self.context, 'instance_type': {'name': 'fake2'}} service = {'disabled': False} host = fakes.FakeHostState('fake_host', 'fake_node', {'service': service}) #True since no aggregates self.assertTrue(filt_cls.host_passes(host, filter_properties)) #True since type matches aggregate, metadata self._create_aggregate_with_host(name='fake_aggregate', hosts=['fake_host'], metadata={'instance_type': 'fake1'}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) #False since type matches aggregate, metadata self.assertFalse(filt_cls.host_passes(host, filter2_properties)) def test_ram_filter_fails_on_memory(self): self._stub_service_is_up(True) filt_cls = self.class_map['RamFilter']() self.flags(ram_allocation_ratio=1.0) filter_properties = {'instance_type': {'memory_mb': 1024}} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_ram_filter_passes(self): self._stub_service_is_up(True) filt_cls = self.class_map['RamFilter']() self.flags(ram_allocation_ratio=1.0) filter_properties = {'instance_type': {'memory_mb': 1024}} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_ram_filter_oversubscribe(self): self._stub_service_is_up(True) filt_cls = self.class_map['RamFilter']() self.flags(ram_allocation_ratio=2.0) filter_properties = {'instance_type': {'memory_mb': 1024}} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': -1024, 'total_usable_ram_mb': 2048, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) self.assertEqual(2048 * 2.0, host.limits['memory_mb']) def test_aggregate_ram_filter_value_error(self): self._stub_service_is_up(True) filt_cls = self.class_map['AggregateRamFilter']() self.flags(ram_allocation_ratio=1.0) filter_properties = {'context': self.context, 'instance_type': {'memory_mb': 1024}} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024, 'service': service}) self._create_aggregate_with_host(name='fake_aggregate', hosts=['host1'], metadata={'ram_allocation_ratio': 'XXX'}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) self.assertEqual(1024 * 1.0, host.limits['memory_mb']) def test_aggregate_ram_filter_default_value(self): self._stub_service_is_up(True) filt_cls = self.class_map['AggregateRamFilter']() self.flags(ram_allocation_ratio=1.0) filter_properties = {'context': self.context, 'instance_type': {'memory_mb': 1024}} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024, 'service': service}) # False: fallback to default flag w/o aggregates self.assertFalse(filt_cls.host_passes(host, filter_properties)) self._create_aggregate_with_host(name='fake_aggregate', hosts=['host1'], metadata={'ram_allocation_ratio': '2.0'}) # True: use ratio from aggregates self.assertTrue(filt_cls.host_passes(host, filter_properties)) self.assertEqual(1024 * 2.0, host.limits['memory_mb']) def test_aggregate_ram_filter_conflict_values(self): self._stub_service_is_up(True) filt_cls = self.class_map['AggregateRamFilter']() self.flags(ram_allocation_ratio=1.0) filter_properties = {'context': self.context, 'instance_type': {'memory_mb': 1024}} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024, 'service': service}) self._create_aggregate_with_host(name='fake_aggregate1', hosts=['host1'], metadata={'ram_allocation_ratio': '1.5'}) self._create_aggregate_with_host(name='fake_aggregate2', hosts=['host1'], metadata={'ram_allocation_ratio': '2.0'}) # use the minimum ratio from aggregates self.assertTrue(filt_cls.host_passes(host, filter_properties)) self.assertEqual(1024 * 1.5, host.limits['memory_mb']) def test_disk_filter_passes(self): self._stub_service_is_up(True) filt_cls = self.class_map['DiskFilter']() self.flags(disk_allocation_ratio=1.0) filter_properties = {'instance_type': {'root_gb': 1, 'ephemeral_gb': 1, 'swap': 512}} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_disk_filter_fails(self): self._stub_service_is_up(True) filt_cls = self.class_map['DiskFilter']() self.flags(disk_allocation_ratio=1.0) filter_properties = {'instance_type': {'root_gb': 10, 'ephemeral_gb': 1, 'swap': 1024}} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_disk_filter_oversubscribe(self): self._stub_service_is_up(True) filt_cls = self.class_map['DiskFilter']() self.flags(disk_allocation_ratio=10.0) filter_properties = {'instance_type': {'root_gb': 100, 'ephemeral_gb': 18, 'swap': 1024}} service = {'disabled': False} # 1GB used... so 119GB allowed... host = fakes.FakeHostState('host1', 'node1', {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) self.assertEqual(12 * 10.0, host.limits['disk_gb']) def test_disk_filter_oversubscribe_fail(self): self._stub_service_is_up(True) filt_cls = self.class_map['DiskFilter']() self.flags(disk_allocation_ratio=10.0) filter_properties = {'instance_type': {'root_gb': 100, 'ephemeral_gb': 19, 'swap': 1024}} service = {'disabled': False} # 1GB used... so 119GB allowed... host = fakes.FakeHostState('host1', 'node1', {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def _test_compute_filter_fails_on_service_disabled(self, reason=None): self._stub_service_is_up(True) filt_cls = self.class_map['ComputeFilter']() filter_properties = {'instance_type': {'memory_mb': 1024}} service = {'disabled': True} if reason: service['disabled_reason'] = reason host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_compute_filter_fails_on_service_disabled_no_reason(self): self._test_compute_filter_fails_on_service_disabled() def test_compute_filter_fails_on_service_disabled(self): self._test_compute_filter_fails_on_service_disabled(reason='Test') def test_compute_filter_fails_on_service_down(self): self._stub_service_is_up(False) filt_cls = self.class_map['ComputeFilter']() filter_properties = {'instance_type': {'memory_mb': 1024}} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_passes_same_inst_props_and_version(self): self._stub_service_is_up(True) filt_cls = self.class_map['ImagePropertiesFilter']() img_props = {'properties': {'_architecture': 'x86_64', 'hypervisor_type': 'kvm', 'vm_mode': 'hvm', 'hypervisor_version_requires': '>=6.0,<6.2' }} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_fails_different_inst_props(self): self._stub_service_is_up(True) filt_cls = self.class_map['ImagePropertiesFilter']() img_props = {'properties': {'architecture': 'arm', 'hypervisor_type': 'qemu', 'vm_mode': 'hvm'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_fails_different_hyper_version(self): self._stub_service_is_up(True) filt_cls = self.class_map['ImagePropertiesFilter']() img_props = {'properties': {'architecture': 'x86_64', 'hypervisor_type': 'kvm', 'vm_mode': 'hvm', 'hypervisor_version_requires': '>=6.2'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'enabled': True, 'supported_instances': [('x86_64', 'kvm', 'hvm')], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_passes_partial_inst_props(self): self._stub_service_is_up(True) filt_cls = self.class_map['ImagePropertiesFilter']() img_props = {'properties': {'architecture': 'x86_64', 'vm_mode': 'hvm'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_fails_partial_inst_props(self): self._stub_service_is_up(True) filt_cls = self.class_map['ImagePropertiesFilter']() img_props = {'properties': {'architecture': 'x86_64', 'vm_mode': 'hvm'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [('x86_64', 'xen', 'xen')], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_passes_without_inst_props(self): self._stub_service_is_up(True) filt_cls = self.class_map['ImagePropertiesFilter']() filter_properties = {'request_spec': {}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_fails_without_host_props(self): self._stub_service_is_up(True) filt_cls = self.class_map['ImagePropertiesFilter']() img_props = {'properties': {'architecture': 'x86_64', 'hypervisor_type': 'kvm', 'vm_mode': 'hvm'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'enabled': True, 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_passes_without_hyper_version(self): self._stub_service_is_up(True) filt_cls = self.class_map['ImagePropertiesFilter']() img_props = {'properties': {'architecture': 'x86_64', 'hypervisor_type': 'kvm', 'vm_mode': 'hvm', 'hypervisor_version_requires': '>=6.0'}} filter_properties = {'request_spec': {'image': img_props}} capabilities = {'enabled': True, 'supported_instances': [('x86_64', 'kvm', 'hvm')]} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_fails_with_unsupported_hyper_ver(self): self._stub_service_is_up(True) filt_cls = self.class_map['ImagePropertiesFilter']() img_props = {'properties': {'architecture': 'x86_64', 'hypervisor_type': 'kvm', 'vm_mode': 'hvm', 'hypervisor_version_requires': '>=6.0'}} filter_properties = {'request_spec': {'image': img_props}} capabilities = {'enabled': True, 'supported_instances': [('x86_64', 'kvm', 'hvm')], 'hypervisor_version': 5000} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes): """In real Openstack runtime environment,compute capabilities value may be number, so we should use number to do unit test. """ self._stub_service_is_up(True) filt_cls = self.class_map['ComputeCapabilitiesFilter']() capabilities = {} capabilities.update(ecaps) service = {'disabled': False} filter_properties = {'instance_type': {'memory_mb': 1024, 'extra_specs': especs}} host_state = {'free_ram_mb': 1024, 'service': service} host_state.update(capabilities) host = fakes.FakeHostState('host1', 'node1', host_state) assertion = self.assertTrue if passes else self.assertFalse assertion(filt_cls.host_passes(host, filter_properties)) def test_compute_filter_passes_extra_specs_simple(self): self._do_test_compute_filter_extra_specs( ecaps={'stats': {'opt1': 1, 'opt2': 2}}, especs={'opt1': '1', 'opt2': '2', 'trust:trusted_host': 'true'}, passes=True) def test_compute_filter_fails_extra_specs_simple(self): self._do_test_compute_filter_extra_specs( ecaps={'stats': {'opt1': 1, 'opt2': 2}}, especs={'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true'}, passes=False) def test_compute_filter_pass_extra_specs_simple_with_scope(self): self._do_test_compute_filter_extra_specs( ecaps={'stats': {'opt1': 1, 'opt2': 2}}, especs={'capabilities:opt1': '1', 'trust:trusted_host': 'true'}, passes=True) def test_compute_filter_pass_extra_specs_same_as_scope(self): # Make sure this still works even if the key is the same as the scope self._do_test_compute_filter_extra_specs( ecaps={'capabilities': 1}, especs={'capabilities': '1'}, passes=True) def test_compute_filter_extra_specs_simple_with_wrong_scope(self): self._do_test_compute_filter_extra_specs( ecaps={'opt1': 1, 'opt2': 2}, especs={'wrong_scope:opt1': '1', 'trust:trusted_host': 'true'}, passes=True) def test_compute_filter_extra_specs_pass_multi_level_with_scope(self): self._do_test_compute_filter_extra_specs( ecaps={'stats': {'opt1': {'a': 1, 'b': {'aa': 2}}, 'opt2': 2}}, especs={'opt1:a': '1', 'capabilities:opt1:b:aa': '2', 'trust:trusted_host': 'true'}, passes=True) def test_aggregate_filter_passes_no_extra_specs(self): self._stub_service_is_up(True) filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']() capabilities = {'opt1': 1, 'opt2': 2} filter_properties = {'context': self.context, 'instance_type': {'memory_mb': 1024}} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def _create_aggregate_with_host(self, name='fake_aggregate', metadata=None, hosts=['host1']): values = {'name': name} if metadata: metadata['availability_zone'] = 'fake_avail_zone' else: metadata = {'availability_zone': 'fake_avail_zone'} result = db.aggregate_create(self.context.elevated(), values, metadata) for host in hosts: db.aggregate_host_add(self.context.elevated(), result['id'], host) return result def _do_test_aggregate_filter_extra_specs(self, emeta, especs, passes): self._stub_service_is_up(True) filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']() self._create_aggregate_with_host(name='fake2', metadata=emeta) filter_properties = {'context': self.context, 'instance_type': {'memory_mb': 1024, 'extra_specs': especs}} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024}) assertion = self.assertTrue if passes else self.assertFalse assertion(filt_cls.host_passes(host, filter_properties)) def test_aggregate_filter_fails_extra_specs_deleted_host(self): self._stub_service_is_up(True) filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']() extra_specs = {'opt1': 's== 1', 'opt2': 's== 2', 'trust:trusted_host': 'true'} self._create_aggregate_with_host(metadata={'opt1': '1'}) agg2 = self._create_aggregate_with_host(name='fake2', metadata={'opt2': '2'}) filter_properties = {'context': self.context, 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024}) db.aggregate_host_delete(self.context.elevated(), agg2['id'], 'host1') self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_aggregate_filter_passes_extra_specs_simple(self): especs = { # Un-scoped extra spec 'opt1': '1', # Scoped extra spec that applies to this filter 'aggregate_instance_extra_specs:opt2': '2', # Scoped extra spec that does not apply to this filter 'trust:trusted_host': 'true', } self._do_test_aggregate_filter_extra_specs( emeta={'opt1': '1', 'opt2': '2'}, especs=especs, passes=True) def test_aggregate_filter_passes_with_key_same_as_scope(self): especs = { # Un-scoped extra spec, make sure we don't blow up if it # happens to match our scope. 'aggregate_instance_extra_specs': '1', } self._do_test_aggregate_filter_extra_specs( emeta={'aggregate_instance_extra_specs': '1'}, especs=especs, passes=True) def test_aggregate_filter_fails_extra_specs_simple(self): self._do_test_aggregate_filter_extra_specs( emeta={'opt1': '1', 'opt2': '2'}, especs={'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true'}, passes=False) def _do_test_isolated_hosts(self, host_in_list, image_in_list, set_flags=True, restrict_isolated_hosts_to_isolated_images=True): if set_flags: self.flags(isolated_images=['isolated_image'], isolated_hosts=['isolated_host'], restrict_isolated_hosts_to_isolated_images= restrict_isolated_hosts_to_isolated_images) host_name = 'isolated_host' if host_in_list else 'free_host' image_ref = 'isolated_image' if image_in_list else 'free_image' filter_properties = { 'request_spec': { 'instance_properties': {'image_ref': image_ref} } } filt_cls = self.class_map['IsolatedHostsFilter']() host = fakes.FakeHostState(host_name, 'node', {}) return filt_cls.host_passes(host, filter_properties) def test_isolated_hosts_fails_isolated_on_non_isolated(self): self.assertFalse(self._do_test_isolated_hosts(False, True)) def test_isolated_hosts_fails_non_isolated_on_isolated(self): self.assertFalse(self._do_test_isolated_hosts(True, False)) def test_isolated_hosts_passes_isolated_on_isolated(self): self.assertTrue(self._do_test_isolated_hosts(True, True)) def test_isolated_hosts_passes_non_isolated_on_non_isolated(self): self.assertTrue(self._do_test_isolated_hosts(False, False)) def test_isolated_hosts_no_config(self): # If there are no hosts nor isolated images in the config, it should # not filter at all. This is the default config. self.assertTrue(self._do_test_isolated_hosts(False, True, False)) self.assertTrue(self._do_test_isolated_hosts(True, False, False)) self.assertTrue(self._do_test_isolated_hosts(True, True, False)) self.assertTrue(self._do_test_isolated_hosts(False, False, False)) def test_isolated_hosts_no_hosts_config(self): self.flags(isolated_images=['isolated_image']) # If there are no hosts in the config, it should only filter out # images that are listed self.assertFalse(self._do_test_isolated_hosts(False, True, False)) self.assertTrue(self._do_test_isolated_hosts(True, False, False)) self.assertFalse(self._do_test_isolated_hosts(True, True, False)) self.assertTrue(self._do_test_isolated_hosts(False, False, False)) def test_isolated_hosts_no_images_config(self): self.flags(isolated_hosts=['isolated_host']) # If there are no images in the config, it should only filter out # isolated_hosts self.assertTrue(self._do_test_isolated_hosts(False, True, False)) self.assertFalse(self._do_test_isolated_hosts(True, False, False)) self.assertFalse(self._do_test_isolated_hosts(True, True, False)) self.assertTrue(self._do_test_isolated_hosts(False, False, False)) def test_isolated_hosts_less_restrictive(self): # If there are isolated hosts and non isolated images self.assertTrue(self._do_test_isolated_hosts(True, False, True, False)) # If there are isolated hosts and isolated images self.assertTrue(self._do_test_isolated_hosts(True, True, True, False)) # If there are non isolated hosts and non isolated images self.assertTrue(self._do_test_isolated_hosts(False, False, True, False)) # If there are non isolated hosts and isolated images self.assertFalse(self._do_test_isolated_hosts(False, True, True, False)) def test_json_filter_passes(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'instance_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_json_filter_passes_with_no_query(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'instance_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 0, 'free_disk_mb': 0}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_json_filter_fails_on_memory(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'instance_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1023, 'free_disk_mb': 200 * 1024}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_fails_on_disk(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'instance_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'free_disk_mb': (200 * 1024) - 1}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_fails_on_service_disabled(self): filt_cls = self.class_map['JsonFilter']() json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024], ['not', '$service.disabled']]) filter_properties = {'instance_type': {'memory_mb': 1024, 'local_gb': 200}, 'scheduler_hints': {'query': json_query}} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_happy_day(self): # Test json filter more thoroughly. filt_cls = self.class_map['JsonFilter']() raw = ['and', '$capabilities.enabled', ['=', '$capabilities.opt1', 'match'], ['or', ['and', ['<', '$free_ram_mb', 30], ['<', '$free_disk_mb', 300]], ['and', ['>', '$free_ram_mb', 30], ['>', '$free_disk_mb', 300]]]] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } # Passes capabilities = {'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 10, 'free_disk_mb': 200, 'capabilities': capabilities, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) # Passes capabilities = {'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) # Fails due to capabilities being disabled capabilities = {'enabled': False, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) # Fails due to being exact memory/disk we don't want capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 30, 'free_disk_mb': 300, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) # Fails due to memory lower but disk higher capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) # Fails due to capabilities 'opt1' not equal capabilities = {'enabled': True, 'opt1': 'no-match'} service = {'enabled': True} host = fakes.FakeHostState('host1', 'node1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_basic_operators(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeHostState('host1', 'node1', {}) # (operator, arguments, expected_result) ops_to_test = [ ['=', [1, 1], True], ['=', [1, 2], False], ['<', [1, 2], True], ['<', [1, 1], False], ['<', [2, 1], False], ['>', [2, 1], True], ['>', [2, 2], False], ['>', [2, 3], False], ['<=', [1, 2], True], ['<=', [1, 1], True], ['<=', [2, 1], False], ['>=', [2, 1], True], ['>=', [2, 2], True], ['>=', [2, 3], False], ['in', [1, 1], True], ['in', [1, 1, 2, 3], True], ['in', [4, 1, 2, 3], False], ['not', [True], False], ['not', [False], True], ['or', [True, False], True], ['or', [False, False], False], ['and', [True, True], True], ['and', [False, False], False], ['and', [True, False], False], # Nested ((True or False) and (2 > 1)) == Passes ['and', [['or', True, False], ['>', 2, 1]], True]] for (op, args, expected) in ops_to_test: raw = [op] + args filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertEqual(expected, filt_cls.host_passes(host, filter_properties)) # This results in [False, True, False, True] and if any are True # then it passes... raw = ['not', True, False, True, False] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) # This results in [False, False, False] and if any are True # then it passes...which this doesn't raw = ['not', True, True, True] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_unknown_operator_raises(self): filt_cls = self.class_map['JsonFilter']() raw = ['!=', 1, 2] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } host = fakes.FakeHostState('host1', 'node1', {}) self.assertRaises(KeyError, filt_cls.host_passes, host, filter_properties) def test_json_filter_empty_filters_pass(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeHostState('host1', 'node1', {}) raw = [] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) raw = {} filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_json_filter_invalid_num_arguments_fails(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeHostState('host1', 'node1', {}) raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(filt_cls.host_passes(host, filter_properties)) raw = ['>', 1] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_json_filter_unknown_variable_ignored(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeHostState('host1', 'node1', {}) raw = ['=', '$........', 1, 1] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) raw = ['=', '$foo', 2, 2] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_default_passes(self): self._stub_service_is_up(True) filt_cls = self.class_map['TrustedFilter']() filter_properties = {'context': self.context.elevated(), 'instance_type': {'memory_mb': 1024}} host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_trusted_and_trusted_passes(self): self.oat_data = {"hosts": [{"host_name": "host1", "trust_lvl": "trusted", "vtime": timeutils.isotime()}]} self._stub_service_is_up(True) filt_cls = self.class_map['TrustedFilter']() extra_specs = {'trust:trusted_host': 'trusted'} filter_properties = {'context': self.context.elevated(), 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_trusted_and_untrusted_fails(self): self.oat_data = {"hosts": [{"host_name": "host1", "trust_lvl": "untrusted", "vtime": timeutils.isotime()}]} self._stub_service_is_up(True) filt_cls = self.class_map['TrustedFilter']() extra_specs = {'trust:trusted_host': 'trusted'} filter_properties = {'context': self.context.elevated(), 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_untrusted_and_trusted_fails(self): self.oat_data = {"hosts": [{"host_name": "host1", "trust_lvl": "trusted", "vtime": timeutils.isotime()}]} self._stub_service_is_up(True) filt_cls = self.class_map['TrustedFilter']() extra_specs = {'trust:trusted_host': 'untrusted'} filter_properties = {'context': self.context.elevated(), 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_untrusted_and_untrusted_passes(self): self.oat_data = {"hosts": [{"host_name": "host1", "trust_lvl": "untrusted", "vtime": timeutils.isotime()}]} self._stub_service_is_up(True) filt_cls = self.class_map['TrustedFilter']() extra_specs = {'trust:trusted_host': 'untrusted'} filter_properties = {'context': self.context.elevated(), 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_update_cache(self): self.oat_data = {"hosts": [{"host_name": "host1", "trust_lvl": "untrusted", "vtime": timeutils.isotime()}]} filt_cls = self.class_map['TrustedFilter']() extra_specs = {'trust:trusted_host': 'untrusted'} filter_properties = {'context': self.context.elevated(), 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {}) filt_cls.host_passes(host, filter_properties) # Fill the caches self.oat_attested = False filt_cls.host_passes(host, filter_properties) self.assertFalse(self.oat_attested) self.oat_attested = False timeutils.set_time_override(timeutils.utcnow()) timeutils.advance_time_seconds( CONF.trusted_computing.attestation_auth_timeout + 80) filt_cls.host_passes(host, filter_properties) self.assertTrue(self.oat_attested) timeutils.clear_time_override() def test_trusted_filter_update_cache_timezone(self): self.oat_data = {"hosts": [{"host_name": "host1", "trust_lvl": "untrusted", "vtime": "2012-09-09T05:10:40-04:00"}]} filt_cls = self.class_map['TrustedFilter']() extra_specs = {'trust:trusted_host': 'untrusted'} filter_properties = {'context': self.context.elevated(), 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {}) timeutils.set_time_override( timeutils.normalize_time( timeutils.parse_isotime("2012-09-09T09:10:40Z"))) filt_cls.host_passes(host, filter_properties) # Fill the caches self.oat_attested = False filt_cls.host_passes(host, filter_properties) self.assertFalse(self.oat_attested) self.oat_attested = False timeutils.advance_time_seconds( CONF.trusted_computing.attestation_auth_timeout - 10) filt_cls.host_passes(host, filter_properties) self.assertFalse(self.oat_attested) timeutils.clear_time_override() def test_core_filter_passes(self): filt_cls = self.class_map['CoreFilter']() filter_properties = {'instance_type': {'vcpus': 1}} self.flags(cpu_allocation_ratio=2) host = fakes.FakeHostState('host1', 'node1', {'vcpus_total': 4, 'vcpus_used': 7}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_core_filter_fails_safe(self): filt_cls = self.class_map['CoreFilter']() filter_properties = {'instance_type': {'vcpus': 1}} host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_core_filter_fails(self): filt_cls = self.class_map['CoreFilter']() filter_properties = {'instance_type': {'vcpus': 1}} self.flags(cpu_allocation_ratio=2) host = fakes.FakeHostState('host1', 'node1', {'vcpus_total': 4, 'vcpus_used': 8}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_aggregate_core_filter_value_error(self): filt_cls = self.class_map['AggregateCoreFilter']() filter_properties = {'context': self.context, 'instance_type': {'vcpus': 1}} self.flags(cpu_allocation_ratio=2) host = fakes.FakeHostState('host1', 'node1', {'vcpus_total': 4, 'vcpus_used': 7}) self._create_aggregate_with_host(name='fake_aggregate', hosts=['host1'], metadata={'cpu_allocation_ratio': 'XXX'}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) self.assertEqual(4 * 2, host.limits['vcpu']) def test_aggregate_core_filter_default_value(self): filt_cls = self.class_map['AggregateCoreFilter']() filter_properties = {'context': self.context, 'instance_type': {'vcpus': 1}} self.flags(cpu_allocation_ratio=2) host = fakes.FakeHostState('host1', 'node1', {'vcpus_total': 4, 'vcpus_used': 8}) # False: fallback to default flag w/o aggregates self.assertFalse(filt_cls.host_passes(host, filter_properties)) self._create_aggregate_with_host(name='fake_aggregate', hosts=['host1'], metadata={'cpu_allocation_ratio': '3'}) # True: use ratio from aggregates self.assertTrue(filt_cls.host_passes(host, filter_properties)) self.assertEqual(4 * 3, host.limits['vcpu']) def test_aggregate_core_filter_conflict_values(self): filt_cls = self.class_map['AggregateCoreFilter']() filter_properties = {'context': self.context, 'instance_type': {'vcpus': 1}} self.flags(cpu_allocation_ratio=1) host = fakes.FakeHostState('host1', 'node1', {'vcpus_total': 4, 'vcpus_used': 8}) self._create_aggregate_with_host(name='fake_aggregate1', hosts=['host1'], metadata={'cpu_allocation_ratio': '2'}) self._create_aggregate_with_host(name='fake_aggregate2', hosts=['host1'], metadata={'cpu_allocation_ratio': '3'}) # use the minimum ratio from aggregates self.assertFalse(filt_cls.host_passes(host, filter_properties)) self.assertEqual(4 * 2, host.limits['vcpu']) @staticmethod def _make_zone_request(zone, is_admin=False): ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin) return { 'context': ctxt, 'request_spec': { 'instance_properties': { 'availability_zone': zone } } } def test_availability_zone_filter_same(self): filt_cls = self.class_map['AvailabilityZoneFilter']() service = {'availability_zone': 'nova'} request = self._make_zone_request('nova') host = fakes.FakeHostState('host1', 'node1', {'service': service}) self.assertTrue(filt_cls.host_passes(host, request)) def test_availability_zone_filter_different(self): filt_cls = self.class_map['AvailabilityZoneFilter']() service = {'availability_zone': 'nova'} request = self._make_zone_request('bad') host = fakes.FakeHostState('host1', 'node1', {'service': service}) self.assertFalse(filt_cls.host_passes(host, request)) def test_retry_filter_disabled(self): # Test case where retry/re-scheduling is disabled. filt_cls = self.class_map['RetryFilter']() host = fakes.FakeHostState('host1', 'node1', {}) filter_properties = {} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_retry_filter_pass(self): # Node not previously tried. filt_cls = self.class_map['RetryFilter']() host = fakes.FakeHostState('host1', 'nodeX', {}) retry = dict(num_attempts=2, hosts=[['host1', 'node1'], # same host, different node ['host2', 'node2'], # different host and node ]) filter_properties = dict(retry=retry) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_retry_filter_fail(self): # Node was already tried. filt_cls = self.class_map['RetryFilter']() host = fakes.FakeHostState('host1', 'node1', {}) retry = dict(num_attempts=1, hosts=[['host1', 'node1']]) filter_properties = dict(retry=retry) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_filter_num_iops_passes(self): self.flags(max_io_ops_per_host=8) filt_cls = self.class_map['IoOpsFilter']() host = fakes.FakeHostState('host1', 'node1', {'num_io_ops': 7}) filter_properties = {} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_filter_num_iops_fails(self): self.flags(max_io_ops_per_host=8) filt_cls = self.class_map['IoOpsFilter']() host = fakes.FakeHostState('host1', 'node1', {'num_io_ops': 8}) filter_properties = {} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_filter_num_instances_passes(self): self.flags(max_instances_per_host=5) filt_cls = self.class_map['NumInstancesFilter']() host = fakes.FakeHostState('host1', 'node1', {'num_instances': 4}) filter_properties = {} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_filter_num_instances_fails(self): self.flags(max_instances_per_host=5) filt_cls = self.class_map['NumInstancesFilter']() host = fakes.FakeHostState('host1', 'node1', {'num_instances': 5}) filter_properties = {} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_group_anti_affinity_filter_passes(self): filt_cls = self.class_map['GroupAntiAffinityFilter']() host = fakes.FakeHostState('host1', 'node1', {}) filter_properties = {'group_hosts': []} self.assertTrue(filt_cls.host_passes(host, filter_properties)) filter_properties = {'group_hosts': ['host2']} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_group_anti_affinity_filter_fails(self): filt_cls = self.class_map['GroupAntiAffinityFilter']() host = fakes.FakeHostState('host1', 'node1', {}) filter_properties = {'group_hosts': ['host1']} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_group_affinity_filter_passes(self): filt_cls = self.class_map['GroupAffinityFilter']() host = fakes.FakeHostState('host1', 'node1', {}) filter_properties = {'group_hosts': ['host1']} self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_group_affinity_filter_fails(self): filt_cls = self.class_map['GroupAffinityFilter']() host = fakes.FakeHostState('host1', 'node1', {}) filter_properties = {'group_hosts': ['host2']} self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_aggregate_multi_tenancy_isolation_with_meta_passes(self): self._stub_service_is_up(True) filt_cls = self.class_map['AggregateMultiTenancyIsolation']() aggr_meta = {'filter_tenant_id': 'my_tenantid'} self._create_aggregate_with_host(name='fake1', metadata=aggr_meta, hosts=['host1']) filter_properties = {'context': self.context, 'request_spec': { 'instance_properties': { 'project_id': 'my_tenantid'}}} host = fakes.FakeHostState('host1', 'compute', {}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_aggregate_multi_tenancy_isolation_fails(self): self._stub_service_is_up(True) filt_cls = self.class_map['AggregateMultiTenancyIsolation']() aggr_meta = {'filter_tenant_id': 'other_tenantid'} self._create_aggregate_with_host(name='fake1', metadata=aggr_meta, hosts=['host1']) filter_properties = {'context': self.context, 'request_spec': { 'instance_properties': { 'project_id': 'my_tenantid'}}} host = fakes.FakeHostState('host1', 'compute', {}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_aggregate_multi_tenancy_isolation_no_meta_passes(self): self._stub_service_is_up(True) filt_cls = self.class_map['AggregateMultiTenancyIsolation']() aggr_meta = {} self._create_aggregate_with_host(name='fake1', metadata=aggr_meta, hosts=['host1']) filter_properties = {'context': self.context, 'request_spec': { 'instance_properties': { 'project_id': 'my_tenantid'}}} host = fakes.FakeHostState('host1', 'compute', {}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def _fake_pci_support_requests(self, pci_requests): self.pci_requests = pci_requests return self.pci_request_result def test_pci_passthrough_pass(self): filt_cls = self.class_map['PciPassthroughFilter']() requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}] filter_properties = {'pci_requests': requests} self.stubs.Set(pci_stats.PciDeviceStats, 'support_requests', self._fake_pci_support_requests) host = fakes.FakeHostState( 'host1', 'node1', attribute_dict={'pci_stats': pci_stats.PciDeviceStats()}) self.pci_request_result = True self.assertTrue(filt_cls.host_passes(host, filter_properties)) self.assertEqual(self.pci_requests, requests) def test_pci_passthrough_fail(self): filt_cls = self.class_map['PciPassthroughFilter']() requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}] filter_properties = {'pci_requests': requests} self.stubs.Set(pci_stats.PciDeviceStats, 'support_requests', self._fake_pci_support_requests) host = fakes.FakeHostState( 'host1', 'node1', attribute_dict={'pci_stats': pci_stats.PciDeviceStats()}) self.pci_request_result = False self.assertFalse(filt_cls.host_passes(host, filter_properties)) self.assertEqual(self.pci_requests, requests) def test_pci_passthrough_no_pci_request(self): filt_cls = self.class_map['PciPassthroughFilter']() filter_properties = {} host = fakes.FakeHostState('h1', 'n1', {}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_pci_passthrough_comopute_stats(self): filt_cls = self.class_map['PciPassthroughFilter']() requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}] filter_properties = {'pci_requests': requests} self.stubs.Set(pci_stats.PciDeviceStats, 'support_requests', self._fake_pci_support_requests) host = fakes.FakeHostState( 'host1', 'node1', attribute_dict={}) self.pci_request_result = True self.assertRaises(AttributeError, filt_cls.host_passes, host, filter_properties)
codeparrot/github-code-clean
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes for storing ragged tensors and their values.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.client import session from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_ragged_conversion_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_config from tensorflow.python.ops.ragged import ragged_tensor_value from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.ops.ragged import segment_id_ops from tensorflow.python.util.tf_export import tf_export # pylint: disable=protected-access _eval_using_default_session = ops._eval_using_default_session # pylint: enable=protected-access #=============================================================================== # RaggedTensor #=============================================================================== @tf_export("RaggedTensor") class RaggedTensor(composite_tensor.CompositeTensor): """Represents a ragged tensor. A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are dimensions whose slices may have different lengths. For example, the inner (column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths. Dimensions whose slices all have the same length are called *uniform dimensions*. The outermost dimension of a `RaggedTensor` is always uniform, since it consists of a single slice (and so there is no possibility for differing slice lengths). The total number of dimensions in a `RaggedTensor` is called its *rank*, and the number of ragged dimensions in a `RaggedTensor` is called its *ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation time: it can't depend on the runtime values of `Tensor`s, and can't vary dynamically for different session runs. ### Potentially Ragged Tensors Many ops support both `Tensor`s and `RaggedTensor`s. The term "potentially ragged tensor" may be used to refer to a tensor that might be either a `Tensor` or a `RaggedTensor`. The ragged-rank of a `Tensor` is zero. ### Documenting RaggedTensor Shapes When documenting the shape of a RaggedTensor, ragged dimensions can be indicated by enclosing them in parentheses. For example, the shape of a 3-D `RaggedTensor` that stores the fixed-size word embedding for each word in a sentence, for each sentence in a batch, could be written as `[num_sentences, (num_words), embedding_size]`. The parentheses around `(num_words)` indicate that dimension is ragged, and that the length of each element list in that dimension may vary for each item. ### Component Tensors Internally, a `RaggedTensor` consists of a concatenated list of values that are partitioned into variable-length rows. In particular, each `RaggedTensor` consists of: * A `values` tensor, which concatenates the variable-length rows into a flattened list. For example, the `values` tensor for `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`. * A `row_splits` vector, which indicates how those flattened values are divided into rows. In particular, the values for row `rt[i]` are stored in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`. Example: ```python >>> print(tf.RaggedTensor.from_row_splits( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_splits=[0, 4, 4, 7, 8, 8])) <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> ``` ### Alternative Row-Partitioning Schemes In addition to `row_splits`, ragged tensors provide support for four other row-partitioning schemes: * `row_lengths`: a vector with shape `[nrows]`, which specifies the length of each row. * `value_rowids` and `nrows`: `value_rowids` is a vector with shape `[nvals]`, corresponding one-to-one with `values`, which specifies each value's row index. In particular, the row `rt[row]` consists of the values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an integer scalar that specifies the number of rows in the `RaggedTensor`. (`nrows` is used to indicate trailing empty rows.) * `row_starts`: a vector with shape `[nrows]`, which specifies the start offset of each row. Equivalent to `row_splits[:-1]`. * `row_limits`: a vector with shape `[nrows]`, which specifies the stop offset of each row. Equivalent to `row_splits[1:]`. Example: The following ragged tensors are equivalent, and all represent the nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`. ```python >>> values = [3, 1, 4, 1, 5, 9, 2, 6] >>> rt1 = RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8]) >>> rt2 = RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0]) >>> rt3 = RaggedTensor.from_value_rowids( ... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5) >>> rt4 = RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8]) >>> rt5 = RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8]) ``` ### Multiple Ragged Dimensions `RaggedTensor`s with multiple ragged dimensions can be defined by using a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor` adds a single ragged dimension. ```python >>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above ... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) >>> outer_rt = RaggedTensor.from_row_splits( ... values=inner_rt, row_splits=[0, 3, 3, 5]) >>> print outer_rt.to_list() [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]] >>> print outer_rt.ragged_rank 2 ``` The factory function `RaggedTensor.from_nested_row_splits` may be used to construct a `RaggedTensor` with multiple ragged dimensions directly, by providing a list of `row_splits` tensors: ```python >>> RaggedTensor.from_nested_row_splits( ... flat_values=[3, 1, 4, 1, 5, 9, 2, 6], ... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list() [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]] ``` ### Uniform Inner Dimensions `RaggedTensor`s with uniform inner dimensions can be defined by using a multidimensional `Tensor` for `values`. ```python >>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3]), .. row_splits=[0, 2, 5]) >>> print rt.to_list() [[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]]] >>> print rt.shape (2, ?, 3) ``` ### RaggedTensor Shape Restrictions The shape of a RaggedTensor is currently restricted to have the following form: * A single uniform dimension * Followed by one or more ragged dimensions * Followed by zero or more uniform dimensions. This restriction follows from the fact that each nested `RaggedTensor` replaces the uniform outermost dimension of its `values` with a uniform dimension followed by a ragged dimension. """ #============================================================================= # Constructor (private) #============================================================================= def __init__(self, values, row_splits, cached_row_lengths=None, cached_value_rowids=None, cached_nrows=None, internal=False): """Creates a `RaggedTensor` with a specified partitioning for `values`. This constructor is private -- please use one of the following ops to build `RaggedTensor`s: * `tf.RaggedTensor.from_row_lengths` * `tf.RaggedTensor.from_value_rowids` * `tf.RaggedTensor.from_row_splits` * `tf.RaggedTensor.from_row_starts` * `tf.RaggedTensor.from_row_limits` * `tf.RaggedTensor.from_nested_row_splits` * `tf.RaggedTensor.from_nested_row_lengths` * `tf.RaggedTensor.from_nested_value_rowids` Args: values: A potentially ragged tensor of any dtype and shape `[nvals, ...]`. row_splits: A 1-D integer tensor with shape `[nrows+1]`. cached_row_lengths: A 1-D integer tensor with shape `[nrows]` cached_value_rowids: A 1-D integer tensor with shape `[nvals]`. cached_nrows: A 1-D integer scalar tensor. internal: True if the constructor is being called by one of the factory methods. If false, an exception will be raised. Raises: TypeError: If a row partitioning tensor has an inappropriate dtype. TypeError: If exactly one row partitioning argument was not specified. ValueError: If a row partitioning tensor has an inappropriate shape. ValueError: If multiple partitioning arguments are specified. ValueError: If nrows is specified but value_rowids is not None. """ if not internal: raise ValueError("RaggedTensor constructor is private; please use one " "of the factory methods instead (e.g., " "RaggedTensor.from_row_lengths())") is_tensor_spec = isinstance(row_splits, tensor_spec.TensorSpec) if is_tensor_spec: if not (isinstance(values, tensor_spec.TensorSpec) or (isinstance(values, RaggedTensor) and isinstance(values.row_splits, tensor_spec.TensorSpec))): raise TypeError("Expected values to be a TensorSpec, got %r" % values) else: # Validate the arguments. if not isinstance(row_splits, ops.Tensor): raise TypeError("Row-partitioning argument must be a Tensor, got %r" % row_splits) if not isinstance(values, (RaggedTensor, ops.Tensor)): raise TypeError("values must be a Tensor or RaggedTensor, got %r" % values) if row_splits.dtype not in (dtypes.int32, dtypes.int64): raise ValueError("Row-partitioning argument must be int32 or int64") # Validate shapes & dtypes. row_splits.shape.assert_has_rank(1) values.shape.with_rank_at_least(1) if not is_tensor_spec: row_splits.set_shape([None]) if isinstance(values, RaggedTensor): assert row_splits.dtype == values.row_splits.dtype self._values = values self._row_splits = row_splits # Store any cached tensors. These are used to avoid unnecessary # round-trip conversions when a RaggedTensor is constructed from # lengths or rowids, and we later want those lengths/rowids back. for tensor in [cached_row_lengths, cached_value_rowids, cached_nrows]: if tensor is not None: if not isinstance(tensor, ops.Tensor): raise TypeError("Cached value must be a Tensor or None.") elif tensor.dtype not in (dtypes.int32, dtypes.int64): raise TypeError("Cached value must be int32 or int64.") self._cached_row_lengths = cached_row_lengths self._cached_value_rowids = cached_value_rowids self._cached_nrows = cached_nrows #============================================================================= # Factory Methods #============================================================================= @classmethod def from_value_rowids(cls, values, value_rowids, nrows=None, name=None, validate=True): """Creates a `RaggedTensor` with rows partitioned by `value_rowids`. The returned `RaggedTensor` corresponds with the python list defined by: ```python result = [[values[i] for i in range(len(values)) if value_rowids[i] == row] for row in range(nrows)] ``` Args: values: A potentially ragged tensor with shape `[nvals, ...]`. value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds one-to-one with `values`, and specifies each value's row index. Must be nonnegative, and must be sorted in ascending order. nrows: An integer scalar specifying the number of rows. This should be specified if the `RaggedTensor` may containing empty training rows. Must be greater than `value_rowids[-1]` (or zero if `value_rowids` is empty). Defaults to `value_rowids[-1]` (or zero if `value_rowids` is empty). name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor`. `result.rank = values.rank + 1`. `result.ragged_rank = values.ragged_rank + 1`. Raises: ValueError: If `nrows` is incompatible with `value_rowids`. #### Example: ```python >>> print(tf.RaggedTensor.from_value_rowids( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], ... nrows=5)) <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> ``` """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") with ops.name_scope(name, "RaggedFromValueRowIds", [values, value_rowids, nrows]): values, value_rowids = cls._convert_values_and_row_partition( values, value_rowids, "value_rowids") if nrows is None: const_rowids = tensor_util.constant_value(value_rowids) if const_rowids is None: nrows = array_ops.concat([value_rowids[-1:], [-1]], axis=0)[0] + 1 const_nrows = None else: const_nrows = const_rowids[-1] + 1 if const_rowids.size > 0 else 0 nrows = ops.convert_to_tensor(const_nrows, value_rowids.dtype, name="nrows") else: nrows = ops.convert_to_tensor(nrows, value_rowids.dtype, "nrows") const_nrows = tensor_util.constant_value(nrows) if const_nrows is not None: if const_nrows < 0: raise ValueError("Expected nrows >= 0; got %d" % const_nrows) const_rowids = tensor_util.constant_value(value_rowids) if const_rowids is not None and const_rowids.size > 0: if not const_nrows >= const_rowids[-1] + 1: raise ValueError( "Expected nrows >= value_rowids[-1] + 1; got nrows=%d, " "value_rowids[-1]=%d" % (const_nrows, const_rowids[-1])) value_rowids.shape.assert_has_rank(1) nrows.shape.assert_has_rank(0) values.shape[:1].assert_is_compatible_with(value_rowids.shape) if validate: msg = "Arguments to from_value_rowids do not form a valid RaggedTensor" nvals1 = _nrows(values) nvals2 = _nrows(value_rowids) checks = [ check_ops.assert_rank(value_rowids, 1, message=msg), check_ops.assert_rank(nrows, 0, message=msg), check_ops.assert_equal(nvals1, nvals2, message=msg), check_ops.assert_non_negative(value_rowids[:1], message=msg), _assert_monotonic_increasing(value_rowids, message=msg), check_ops.assert_less(value_rowids[-1:], nrows, message=msg), ] if not isinstance(values, RaggedTensor): checks.append(check_ops.assert_rank_at_least(values, 1)) value_rowids = control_flow_ops.with_dependencies(checks, value_rowids) # Convert value_rowids & nrows to row_splits. # Note: we don't use segment_ids_to_row_splits() here because we want # to save the intermediate value `row_lengths`, so we can cache it. # TODO(b/116708836) Upgrade bincount to accept int64 so we can skip the # cast. value_rowids_int32 = math_ops.cast(value_rowids, dtypes.int32) nrows_int32 = math_ops.cast(nrows, dtypes.int32) row_lengths = math_ops.bincount( value_rowids_int32, minlength=nrows_int32, maxlength=nrows_int32, dtype=value_rowids.dtype) row_splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0) if const_nrows is not None: row_lengths.set_shape([const_nrows]) row_splits.set_shape([const_nrows + 1]) return cls( values, row_splits, cached_row_lengths=row_lengths, cached_value_rowids=value_rowids, cached_nrows=nrows, internal=True) @classmethod def from_row_splits(cls, values, row_splits, name=None, validate=True): """Creates a `RaggedTensor` with rows partitioned by `row_splits`. The returned `RaggedTensor` corresponds with the python list defined by: ```python result = [values[row_splits[i]:row_splits[i + 1]] for i in range(len(row_splits) - 1)] ``` Args: values: A potentially ragged tensor with shape `[nvals, ...]`. row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be empty, and must be sorted in ascending order. `row_splits[0]` must be zero and `row_splits[-1]` must be `nvals`. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor`. `result.rank = values.rank + 1`. `result.ragged_rank = values.ragged_rank + 1`. Raises: ValueError: If `row_splits` is an empty list. #### Example: ```python >>> print(tf.RaggedTensor.from_row_splits( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_splits=[0, 4, 4, 7, 8, 8])) <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> ``` """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") if isinstance(row_splits, (list, tuple)) and not row_splits: raise ValueError("row_splits tensor may not be empty.") if isinstance(row_splits, tensor_spec.TensorSpec): return cls(values=values, row_splits=row_splits, internal=True) with ops.name_scope(name, "RaggedFromRowSplits", [values, row_splits]): values, row_splits = cls._convert_values_and_row_partition( values, row_splits, "row_splits") row_splits.shape.assert_has_rank(1) if validate: msg = "Arguments to from_row_splits do not form a valid RaggedTensor" nvals = _nrows(values, row_splits.dtype) checks = [ check_ops.assert_rank(row_splits, 1, message=msg), _assert_zero(row_splits[0], message=msg), _assert_monotonic_increasing(row_splits, message=msg), check_ops.assert_equal(row_splits[-1], nvals, message=msg), ] if not isinstance(values, RaggedTensor): checks.append(check_ops.assert_rank_at_least(values, 1)) row_splits = control_flow_ops.with_dependencies(checks, row_splits) return cls(values=values, row_splits=row_splits, internal=True) @classmethod def from_row_lengths(cls, values, row_lengths, name=None, validate=True): """Creates a `RaggedTensor` with rows partitioned by `row_lengths`. The returned `RaggedTensor` corresponds with the python list defined by: ```python result = [[values.pop(0) for i in range(length)] for length in row_lengths] ``` Args: values: A potentially ragged tensor with shape `[nvals, ...]`. row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be nonnegative. `sum(row_lengths)` must be `nvals`. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor`. `result.rank = values.rank + 1`. `result.ragged_rank = values.ragged_rank + 1`. #### Example: ```python >>> print(tf.RaggedTensor.from_row_lengths( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_lengths=[4, 0, 3, 1, 0])) <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []])> ``` """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") with ops.name_scope(name, "RaggedFromRowLengths", [values, row_lengths]): values, row_lengths = cls._convert_values_and_row_partition( values, row_lengths, "row_lengths") row_lengths.shape.assert_has_rank(1) if validate: msg = "Arguments to from_row_lengths do not form a valid RaggedTensor" nvals1 = math_ops.reduce_sum(row_lengths) nvals2 = _nrows(values, row_lengths.dtype) checks = [ check_ops.assert_rank(row_lengths, 1, message=msg), check_ops.assert_non_negative(row_lengths, message=msg), check_ops.assert_equal(nvals1, nvals2, message=msg) ] if not isinstance(values, RaggedTensor): checks.append(check_ops.assert_rank_at_least(values, 1)) row_lengths = control_flow_ops.with_dependencies(checks, row_lengths) row_limits = math_ops.cumsum(row_lengths) row_splits = array_ops.concat([[0], row_limits], axis=0) return cls( values=values, row_splits=row_splits, cached_row_lengths=row_lengths, internal=True) @classmethod def from_row_starts(cls, values, row_starts, name=None, validate=True): """Creates a `RaggedTensor` with rows partitioned by `row_starts`. Equivalent to: `from_row_splits(values, concat([row_starts, nvals]))`. Args: values: A potentially ragged tensor with shape `[nvals, ...]`. row_starts: A 1-D integer tensor with shape `[nrows]`. Must be nonnegative and sorted in ascending order. If `nrows>0`, then `row_starts[0]` must be zero. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor`. `result.rank = values.rank + 1`. `result.ragged_rank = values.ragged_rank + 1`. #### Example: ```python >>> print(tf.RaggedTensor.from_row_starts( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_starts=[0, 4, 4, 7, 8])) <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> ``` """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") with ops.name_scope(name, "RaggedFromRowStarts", [values, row_starts]): values, row_starts = cls._convert_values_and_row_partition( values, row_starts, "row_starts") row_starts.shape.assert_has_rank(1) nvals = _nrows(values, row_starts.dtype) if validate: msg = "Arguments to from_row_starts do not form a valid RaggedTensor" checks = [ check_ops.assert_rank(row_starts, 1, message=msg), _assert_zero(row_starts[:1], message=msg), _assert_monotonic_increasing(row_starts, message=msg), check_ops.assert_less_equal(row_starts[-1:], nvals, message=msg), ] if not isinstance(values, RaggedTensor): checks.append(check_ops.assert_rank_at_least(values, 1)) row_starts = control_flow_ops.with_dependencies(checks, row_starts) row_splits = array_ops.concat([row_starts, [nvals]], axis=0) return cls(values=values, row_splits=row_splits, internal=True) @classmethod def from_row_limits(cls, values, row_limits, name=None, validate=True): """Creates a `RaggedTensor` with rows partitioned by `row_limits`. Equivalent to: `from_row_splits(values, concat([0, row_limits]))`. Args: values: A potentially ragged tensor with shape `[nvals, ...]`. row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in ascending order. If `nrows>0`, then `row_limits[-1]` must be `nvals`. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor`. `result.rank = values.rank + 1`. `result.ragged_rank = values.ragged_rank + 1`. #### Example: ```python >>> print(tf.RaggedTensor.from_row_limits( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_limits=[4, 4, 7, 8, 8])) <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> ``` """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") with ops.name_scope(name, "RaggedFromRowLimits", [values, row_limits]): values, row_limits = cls._convert_values_and_row_partition( values, row_limits, "row_limits") row_limits.shape.assert_has_rank(1) if validate: msg = "Arguments to from_row_limits do not form a valid RaggedTensor" nvals = _nrows(values, row_limits.dtype) checks = [ check_ops.assert_rank(row_limits, 1, message=msg), check_ops.assert_non_negative(row_limits[:1], message=msg), _assert_monotonic_increasing(row_limits, message=msg), check_ops.assert_equal(row_limits[-1:], nvals, message=msg) ] if not isinstance(values, RaggedTensor): checks.append(check_ops.assert_rank_at_least(values, 1)) row_limits = control_flow_ops.with_dependencies(checks, row_limits) zero = array_ops.zeros([1], row_limits.dtype) row_splits = array_ops.concat([zero, row_limits], axis=0) return cls(values=values, row_splits=row_splits, internal=True) @classmethod def from_nested_value_rowids(cls, flat_values, nested_value_rowids, nested_nrows=None, name=None, validate=True): """Creates a `RaggedTensor` from a nested list of `value_rowids` tensors. Equivalent to: ```python result = flat_values for (rowids, nrows) in reversed(zip(nested_value_rowids, nested_nrows)): result = from_value_rowids(result, rowids, nrows) ``` Args: flat_values: A potentially ragged tensor. nested_value_rowids: A list of 1-D integer tensors. The `i`th tensor is used as the `value_rowids` for the `i`th ragged dimension. nested_nrows: A list of integer scalars. The `i`th scalar is used as the `nrows` for the `i`th ragged dimension. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor` (or `flat_values` if `nested_value_rowids` is empty). Raises: ValueError: If `len(nested_values_rowids) != len(nested_nrows)`. """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") if isinstance(nested_value_rowids, ops.Tensor): raise TypeError("nested_value_rowids must be a list of Tensors") if nested_nrows is None: nested_nrows = [None] * len(nested_value_rowids) else: if isinstance(nested_nrows, ops.Tensor): raise TypeError("nested_nrows must be a list of Tensors") if len(nested_nrows) != len(nested_value_rowids): raise ValueError("nested_nrows must have the same length as " "nested_value_rowids") with ops.name_scope( name, "RaggedFromNestedValueRowIds", [flat_values] + list(nested_value_rowids) + list(nested_nrows)): result = flat_values for value_rowids, nrows in reversed( list(zip(nested_value_rowids, nested_nrows))): result = cls.from_value_rowids(result, value_rowids, nrows, validate=validate) return result @classmethod def from_nested_row_splits(cls, flat_values, nested_row_splits, name=None, validate=True): """Creates a `RaggedTensor` from a nested list of `row_splits` tensors. Equivalent to: ```python result = flat_values for row_splits in reversed(nested_row_splits): result = from_row_splits(result, row_splits) ``` Args: flat_values: A potentially ragged tensor. nested_row_splits: A list of 1-D integer tensors. The `i`th tensor is used as the `row_splits` for the `i`th ragged dimension. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor` (or `flat_values` if `nested_row_splits` is empty). """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") if isinstance(nested_row_splits, ops.Tensor): raise TypeError("nested_row_splits must be a list of Tensors") with ops.name_scope(name, "RaggedFromNestedRowSplits", [flat_values] + list(nested_row_splits)): result = flat_values for splits in reversed(nested_row_splits): result = cls.from_row_splits(result, splits, validate=validate) return result @classmethod def from_nested_row_lengths(cls, flat_values, nested_row_lengths, name=None, validate=True): """Creates a `RaggedTensor` from a nested list of `row_lengths` tensors. Equivalent to: ```python result = flat_values for row_lengths in reversed(nested_row_lengths): result = from_row_lengths(result, row_lengths) ``` Args: flat_values: A potentially ragged tensor. nested_row_lengths: A list of 1-D integer tensors. The `i`th tensor is used as the `row_lengths` for the `i`th ragged dimension. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty). """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") if isinstance(nested_row_lengths, ops.Tensor): raise TypeError("nested_row_lengths must be a list of Tensors") with ops.name_scope(name, "RaggedFromNestedRowlengths", [flat_values] + list(nested_row_lengths)): result = flat_values for lengths in reversed(nested_row_lengths): result = cls.from_row_lengths(result, lengths, validate=validate) return result @classmethod def _convert_values_and_row_partition(cls, values, partition, name): """Converts `values` and `partition` to Tensors. If `values` is a `RaggedTensor`, then converts `values` and `partition` to have compatible row-partitioning dtypes. In particular, if any of the row partitioning tensors are `int64`, then all of the other row partitioning tensors wil be cast to `int64` (if auto_cast_partition_dtype() is true) or an error will be raised (if auto_cast_partition_dtype() is false). Args: values: The `values` for the `RaggedTensor` being constructed. partition: A row-partitioning tensor for the `RaggedTensor` being constructed. I.e., one of: row_splits, row_lengths, row_starts, row_limits, value_rowids. name: The name of the row-partitioning tensor. Returns: A tuple (values, partition). """ if isinstance(values, RaggedTensor): if isinstance(partition, ops.Tensor): if partition.dtype not in (dtypes.int32, dtypes.int64): raise ValueError("%s must have dtype int32 or int64" % name) if values.row_splits.dtype != partition.dtype: if not ragged_config.auto_cast_partition_dtype(): raise ValueError("dtype mismatch: %s (%s) vs values.row_splits (%s)" % (name, partition.dtype, values.row_splits.dtype)) partition = math_ops.cast(partition, dtypes.int64) values = values.with_row_splits_dtype(dtypes.int64) else: partition = ops.convert_to_tensor(partition, values.row_splits.dtype, name=name) else: values = ops.convert_to_tensor(values, name="values") partition = ops.convert_to_tensor( partition, preferred_dtype=dtypes.int64, name=name) if partition.dtype not in (dtypes.int32, dtypes.int64): raise ValueError("%s must have dtype int32 or int64" % name) return (values, partition) #============================================================================= # Accessors #============================================================================= @property def dtype(self): """The `DType` of values in this tensor.""" return self._values.dtype @property def shape(self): """The statically known shape of this ragged tensor. Returns: A `TensorShape` containing the statically known shape of this ragged tensor. Ragged dimensions have a size of `None`. Examples: ```python >>> ragged.constant([[0], [1, 2]]).shape TensorShape([Dimension(2), Dimension(None)]) >>> ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape TensorShape([Dimension(2), Dimension(None), Dimension(2) ``` """ nrows = tensor_shape.dimension_at_index(self._row_splits.shape, 0) - 1 values_shape = self._values.shape value_shape = values_shape[1:] return tensor_shape.TensorShape([nrows, None]).concatenate(value_shape) @property def ragged_rank(self): """The number of ragged dimensions in this ragged tensor. Returns: A Python `int` indicating the number of ragged dimensions in this ragged tensor. The outermost dimension is not considered ragged. """ values_is_ragged = isinstance(self._values, RaggedTensor) return self._values.ragged_rank + 1 if values_is_ragged else 1 @property def values(self): """The concatenated rows for this ragged tensor. `rt.values` is a potentially ragged tensor formed by flattening the two outermost dimensions of `rt` into a single dimension. `rt.values.shape = [nvals] + rt.shape[2:]` (where `nvals` is the number of items in the outer two dimensions of `rt`). `rt.ragged_rank = self.ragged_rank - 1` Returns: A potentially ragged tensor. #### Example: ```python >>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print rt.values tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) ``` """ return self._values @property def row_splits(self): """The row-split indices for this ragged tensor's `values`. `rt.row_splits` specifies where the values for each row begin and end in `rt.values`. In particular, the values for row `rt[i]` are stored in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`. Returns: A 1-D integer `Tensor` with shape `[self.nrows+1]`. The returned tensor is non-empty, and is sorted in ascending order. `self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to `self.values.shape[0]`. #### Example: ```python >>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print rt.row_splits # indices of row splits in rt.values tf.Tensor([0, 4, 4, 7, 8, 8]) ``` """ return self._row_splits @property def flat_values(self): """The innermost `values` tensor for this ragged tensor. Concretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is `rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`. Conceptually, `flat_values` is the tensor formed by flattening the outermost dimension and all of the ragged dimensions into a single dimension. `rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]` (where `nvals` is the number of items in the flattened dimensions). Returns: A `Tensor`. #### Example: ```python >>> rt = ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]) >>> print rt.flat_values() tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) ``` """ rt_values = self.values while isinstance(rt_values, RaggedTensor): rt_values = rt_values.values return rt_values @property def nested_row_splits(self): """A tuple containing the row_splits for all ragged dimensions. `rt.nested_row_splits` is a tuple containing the `row_splits` tensors for all ragged dimensions in `rt`, ordered from outermost to innermost. In particular, `rt.nested_row_splits = (rt.row_splits,) + value_splits` where: * `value_splits = ()` if `rt.values` is a `Tensor`. * `value_splits = rt.values.nested_row_splits` otherwise. Returns: A `tuple` of 1-D integer `Tensor`s. #### Example: ```python >>> rt = ragged.constant([[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]]) >>> for i, splits in enumerate(rt.nested_row_splits()): ... print('Splits for dimension %d: %s' % (i+1, splits)) Splits for dimension 1: [0, 1] Splits for dimension 2: [0, 3, 3, 5] Splits for dimension 3: [0, 4, 4, 7, 8, 8] ``` """ rt_nested_splits = [self.row_splits] rt_values = self.values while isinstance(rt_values, RaggedTensor): rt_nested_splits.append(rt_values.row_splits) rt_values = rt_values.values return tuple(rt_nested_splits) def value_rowids(self, name=None): """Returns the row indices for the `values` in this ragged tensor. `rt.value_rowids()` corresponds one-to-one with the outermost dimension of `rt.values`, and specifies the row containing each value. In particular, the row `rt[row]` consists of the values `rt.values[j]` where `rt.value_rowids()[j] == row`. Args: name: A name prefix for the returned tensor (optional). Returns: A 1-D integer `Tensor` with shape `self.values.shape[:1]`. The returned tensor is nonnegative, and is sorted in ascending order. #### Example: ```python >>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt.values tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) >>> rt.value_rowids() tf.Tensor([0, 0, 0, 0, 2, 2, 2, 3]) # corresponds 1:1 with rt.values ``` """ if self._cached_value_rowids is not None: return self._cached_value_rowids with ops.name_scope(name, "RaggedValueRowIds", [self]): return segment_id_ops.row_splits_to_segment_ids(self.row_splits) def nrows(self, out_type=None, name=None): """Returns the number of rows in this ragged tensor. I.e., the size of the outermost dimension of the tensor. Args: out_type: `dtype` for the returned tensor. Defaults to `self.row_splits.dtype`. name: A name prefix for the returned tensor (optional). Returns: A scalar `Tensor` with dtype `out_type`. #### Example: ```python >>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt.nrows() # rt has 5 rows. 5 ``` """ if out_type is None: out_type = self._row_splits.dtype else: out_type = dtypes.as_dtype(out_type) if self._cached_nrows is not None: return math_ops.cast(self._cached_nrows, out_type) with ops.name_scope(name, "RaggedNRows", [self]): return array_ops.shape(self.row_splits, out_type=out_type)[0] - 1 def row_starts(self, name=None): """Returns the start indices for rows in this ragged tensor. These indices specify where the values for each row begin in `self.values`. `rt.row_starts()` is equal to `rt.row_splits[:-1]`. Args: name: A name prefix for the returned tensor (optional). Returns: A 1-D integer Tensor with shape `[nrows]`. The returned tensor is nonnegative, and is sorted in ascending order. #### Example: ```python >>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt.values tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) >>> rt.row_starts() # indices of row starts in rt.values tf.Tensor([0, 4, 4, 7, 8]) ``` """ with ops.name_scope(name, "RaggedRowStarts", [self]): return self.row_splits[:-1] def row_limits(self, name=None): """Returns the limit indices for rows in this ragged tensor. These indices specify where the values for each row end in `self.values`. `rt.row_limits(self)` is equal to `rt.row_splits[:-1]`. Args: name: A name prefix for the returned tensor (optional). Returns: A 1-D integer Tensor with shape `[nrows]`. The returned tensor is nonnegative, and is sorted in ascending order. #### Example: ```python >>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt.values tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) >>> rt.row_limits() # indices of row limits in rt.values tf.Tensor([4, 4, 7, 8, 8]) ``` """ with ops.name_scope(name, "RaggedRowLimits", [self]): return self.row_splits[1:] def row_lengths(self, axis=1, name=None): """Returns the lengths of the rows in this ragged tensor. `rt.row_lengths()[i]` indicates the number of values in the `i`th row of `rt`. Args: axis: An integer constant indicating the axis whose row lengths should be returned. name: A name prefix for the returned tensor (optional). Returns: A potentially ragged integer Tensor with shape `self.shape[:axis]`. Raises: ValueError: If `axis` is out of bounds. #### Example: ```python >>> rt = ragged.constant([[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []]) >>> rt.row_lengths(rt) # lengths of rows in rt tf.Tensor([2, 0, 2, 1, 0]) >>> rt.row_lengths(axis=2) # lengths of axis=2 rows. <tf.RaggedTensor [[3, 1], [], [2, 1], [1], []]> ``` """ if self._cached_row_lengths is not None: return self._cached_row_lengths with ops.name_scope(name, "RaggedRowLengths", [self]): axis = ragged_util.get_positive_axis(axis, self.shape.ndims) if axis == 0: return self.nrows() elif axis == 1: splits = self.row_splits return splits[1:] - splits[:-1] elif isinstance(self.values, RaggedTensor): return self.with_values(self.values.row_lengths(axis - 1)) else: shape = array_ops.shape(self.values, out_type=self._row_splits.dtype) return self.with_values( array_ops.ones(shape[:axis - 1], self._row_splits.dtype) * shape[axis - 1]) def nested_row_lengths(self, name=None): """Returns a tuple containing the row_lengths for all ragged dimensions. `rtnested_row_lengths()` is a tuple containing the `row_lengths` tensors for all ragged dimensions in `rt`, ordered from outermost to innermost. Args: name: A name prefix for the returned tensors (optional). Returns: A `tuple` of 1-D integer `Tensors`. The length of the tuple is equal to `self.ragged_rank`. """ with ops.name_scope(name, "RaggedNestedRowLengths", [self]): rt_nested_row_lengths = [] rt = self while isinstance(rt, RaggedTensor): rt_nested_row_lengths.append(rt.row_lengths()) rt = rt.values return tuple(rt_nested_row_lengths) def bounding_shape(self, axis=None, name=None, out_type=None): """Returns the tight bounding box shape for this `RaggedTensor`. Args: axis: An integer scalar or vector indicating which axes to return the bounding box for. If not specified, then the full bounding box is returned. name: A name prefix for the returned tensor (optional). out_type: `dtype` for the returned tensor. Defaults to `self.row_splits.dtype`. Returns: An integer `Tensor` (`dtype=self.row_splits.dtype`). If `axis` is not specified, then `output` is a vector with `output.shape=[self.shape.ndims]`. If `axis` is a scalar, then the `output` is a scalar. If `axis` is a vector, then `output` is a vector, where `output[i]` is the bounding size for dimension `axis[i]`. #### Example: ```python >>> rt = ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]]) >>> rt.bounding_shape() [5, 4] ``` """ if out_type is None: out_type = self._row_splits.dtype else: out_type = dtypes.as_dtype(out_type) with ops.name_scope(name, "RaggedBoundingBox", [self, axis]): nested_splits = self.nested_row_splits rt_flat_values = self.flat_values # Optimized special cases for when axis=0 or axis=1: if isinstance(axis, int): if axis == 0: return array_ops.shape(nested_splits[0], out_type=out_type)[0] - 1 elif axis == 1: return math_ops.maximum(math_ops.reduce_max(self.row_lengths()), 0) splits_shape = array_ops.shape(self.row_splits, out_type=out_type) flat_values_shape = array_ops.shape(rt_flat_values, out_type=out_type) ragged_dimensions = array_ops.stack([splits_shape[0] - 1] + [ math_ops.maximum(math_ops.reduce_max(splits[1:] - splits[:-1]), 0) for splits in nested_splits ]) inner_dimensions = flat_values_shape[1:] bbox = array_ops.concat([ragged_dimensions, inner_dimensions], axis=0) return bbox if axis is None else array_ops.gather(bbox, axis) #============================================================================= # Transformation #============================================================================= def with_values(self, new_values): """Returns a copy of `self` with `values` replaced by `new_value`. Preserves cached row-partitioning tensors such as `self.cached_nrows` and `self.cached_value_rowids` if they have values. Args: new_values: Potentially ragged tensor to use as the `values` for the returned `RaggedTensor`. Must have `rank > 0`, and must have the same number of rows as `self.values`. Returns: A `RaggedTensor`. `result.rank = 1 + new_values.rank`. `result.ragged_rank = 1 + new_values.ragged_rank` """ new_values.shape.with_rank_at_least(1) self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1]) if (isinstance(new_values, RaggedTensor) and self._row_splits.dtype != new_values.row_splits.dtype): if not ragged_config.auto_cast_partition_dtype(): raise ValueError("self and new_values have mismatched row_splits " "dtypes; use RaggedTensor.with_row_splits_dtype() to " "convert them to compatible dtypes.") new_values = new_values.with_row_splits_dtype(dtypes.int64) return self.with_row_splits_dtype(dtypes.int64).with_values(new_values) return RaggedTensor( new_values, self._row_splits, self._cached_row_lengths, self._cached_value_rowids, self._cached_nrows, internal=True) def with_flat_values(self, new_values): """Returns a copy of `self` with `flat_values` replaced by `new_value`. Preserves cached row-partitioning tensors such as `self.cached_nrows` and `self.cached_value_rowids` if they have values. Args: new_values: Potentially ragged tensor that should replace `self.flat_values`. Must have `rank > 0`, and must have the same number of rows as `self.flat_values`. Returns: A `RaggedTensor`. `result.rank = self.ragged_rank + new_values.rank`. `result.ragged_rank = self.ragged_rank + new_values.ragged_rank`. """ if isinstance(self._values, ops.Tensor): return self.with_values(new_values) else: return self.with_values(self.values.with_flat_values(new_values)) def with_row_splits_dtype(self, dtype): """Returns a copy of this RaggedTensor with the given `row_splits` dtype. For RaggedTensors with multiple ragged dimensions, the `row_splits` for all nested `RaggedTensor` objects are cast to the given dtype. Args: dtype: The dtype for `row_splits`. One of `tf.int32` or `tf.int64`. Returns: A copy of this RaggedTensor, with the `row_splits` cast to the given type. """ dtype = dtypes.as_dtype(dtype) if dtype not in (dtypes.int32, dtypes.int64): raise ValueError("dtype must be int32 or int64") if self._row_splits.dtype == dtype: return self row_splits = math_ops.cast(self._row_splits, dtype) values = self._values if isinstance(values, RaggedTensor): values = values.with_row_splits_dtype(dtype) cached_row_lengths = self._cached_row_lengths if cached_row_lengths is not None: cached_row_lengths = math_ops.cast(cached_row_lengths, dtype) cached_value_rowids = self._cached_value_rowids if cached_value_rowids is not None: cached_value_rowids = math_ops.cast(cached_value_rowids, dtype) cached_nrows = self._cached_nrows if cached_value_rowids is not None: cached_value_rowids = math_ops.cast(cached_value_rowids, dtype) return RaggedTensor(values, row_splits, cached_row_lengths, cached_value_rowids, cached_nrows, internal=True) #============================================================================= # Tensor Type Conversions #============================================================================= @classmethod def from_tensor(cls, tensor, lengths=None, padding=None, ragged_rank=1, name=None, row_splits_dtype=dtypes.int64): """Converts a `tf.Tensor` into a `RaggedTensor`. The set of absent/default values may be specified using a vector of lengths or a padding value (but not both). If `lengths` is specified, then the output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`. If 'lengths' is a list of lists or tuple of lists, those lists will be used as nested row lengths. If `padding` is specified, then any row *suffix* consisting entirely of `padding` will be excluded from the returned `RaggedTensor`. If neither `lengths` nor `padding` is specified, then the returned `RaggedTensor` will have no absent/default values. Examples: ```python >>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]]) >>> tf.RaggedTensor.from_tensor(dt) <tf.RaggedTensor [[5, 7, 0], [0, 3, 0], [6, 0, 0]]> >>> tf.RaggedTensor.from_tensor(dt, lengths=[1, 0, 3]) <tf.RaggedTensor [[5], [], [6, 0, 0]]> >>> tf.RaggedTensor.from_tensor(dt, padding=0) <tf.RaggedTensor [[5, 7], [0, 3], [6]]> >>> dt = tf.constant([[[5, 0], [7, 0], [0, 0]], [[0, 0], [3, 0], [0, 0]], [[6, 0], [0, 0], [0, 0]]]) >>> tf.RaggedTensor.from_tensor(dt, lengths=([2, 0, 3], [1, 1, 2, 0, 1])) <tf.RaggedTensor [[[5], [7]], [], [[6, 0], [], [0]]]> ``` Args: tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or higher. lengths: An optional set of row lengths, specified using a 1-D integer `Tensor` whose length is equal to `tensor.shape[0]` (the number of rows in `tensor`). If specified, then `output[row]` will contain `tensor[row][:lengths[row]]`. Negative lengths are treated as zero. You may optionally pass a list or tuple of lengths to this argument, which will be used as nested row lengths to construct a ragged tensor with multiple ragged dimensions. padding: An optional padding value. If specified, then any row suffix consisting entirely of `padding` will be excluded from the returned RaggedTensor. `padding` is a `Tensor` with the same dtype as `tensor` and with `shape=tensor.shape[ragged_rank + 1:]`. ragged_rank: Integer specifying the ragged rank for the returned `RaggedTensor`. Must be greater than zero. name: A name prefix for the returned tensors (optional). row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits` tensor. One of `tf.int32` or `tf.int64`. Returns: A `RaggedTensor` with the specified `ragged_rank`. The shape of the returned ragged tensor is compatible with the shape of `tensor`. Raises: ValueError: If both `lengths` and `padding` are specified. """ row_splits_dtype = dtypes.as_dtype(row_splits_dtype) if lengths is not None and padding is not None: raise ValueError("Specify lengths or padding, but not both") if not isinstance(ragged_rank, int): raise TypeError("ragged_rank expected int, got %r" % ragged_rank) if ragged_rank <= 0: raise ValueError( "ragged_rank must be greater than 0; got %s" % ragged_rank) with ops.name_scope(name, "RaggedFromTensor", [tensor, lengths, padding]): tensor = ops.convert_to_tensor(tensor, name="tensor") tensor.shape.with_rank_at_least(ragged_rank + 1) input_shape = array_ops.shape(tensor, out_type=row_splits_dtype) ncols = input_shape[1] # Handle ragged_rank>1 via recursion: # If the output should have multiple ragged dimensions, then first # flatten the tensor to eliminate all but the last ragged dimension, # and recursively convert that flattened tensor. Then add on the splits # for the dimensions that we flattened out. if ragged_rank > 1: # Flatten `tensor` to eliminate all but the last ragged dimension. new_shape = array_ops.concat([ constant_op.constant([-1], row_splits_dtype), input_shape[ragged_rank:] ], axis=0) flattened = array_ops.reshape(tensor, new_shape) # Recursively convert the flattened tensor. values = cls.from_tensor(flattened, lengths, padding, row_splits_dtype=row_splits_dtype) # The total number of elements in each dimension. E.g., if # input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total. dim_size = math_ops.cumprod(input_shape) # Construct splits tensors for the dimensions that were flattened. new_splits = [ math_ops.range(0, dim_size[dim - 1] + 1) * input_shape[dim] for dim in range(1, ragged_rank) ] return cls.from_nested_row_splits(values, new_splits, validate=False) # If padding was specified, then use it to find row lengths. if padding is not None: padding = ops.convert_to_tensor( padding, name="padding", dtype=tensor.dtype) padding.shape.assert_is_compatible_with(tensor.shape[2:]) # Find places where the padding is equal to the tensor. (This will # broadcast `padding` across the outermost 2 dimensions of `tensor`, # so `has_default_value.shape = tensor.shape`.) has_default_value = math_ops.equal(padding, tensor) # If the padding isn't a scalar, then require that all values in the # padding match each item in the tensor. After this block of code, # `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just # use reduce_all for both cases, becaue when you pass an empty `axis` # list to reduce_all, it reduces all axes; but we want it to reduce no # axes -- i.e., to be a no-op.) tensor_rank = array_ops.rank(tensor) reduce_axis = math_ops.range(2, tensor_rank) has_default = control_flow_ops.cond( tensor_rank > 2, lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis), lambda: has_default_value) has_default.set_shape(tensor_shape.TensorShape([None, None])) has_default.set_shape(tensor.shape[:2]) # Use has_default to find the length of each row: for each # non-default item in a row, calculate the length that the row needs to # have to include that item; and then take the max of those values # (across each row). has_nondefault = math_ops.logical_not(has_default) has_nondefault = math_ops.cast(has_nondefault, row_splits_dtype) length_for_nondefault_value = ( has_nondefault * array_ops.expand_dims( math_ops.range(1, ncols + 1), 0)) lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1) if lengths is not None: if isinstance(lengths, (list, tuple)) and len(lengths) and not isinstance( lengths[0], (int, float)): # In this case, we've been given nested row lengths. Rather than # reconstructing the tensor mask directly, we can recreate it as # a boolean RaggedTensor, then densify that and use that as the # mask to clear out the unused data in the passed tensor. tensor.shape.with_rank_at_least(len(lengths) + 1) num_tokens = math_ops.reduce_sum(lengths[-1]) ones_mask = array_ops.ones([num_tokens], dtype=dtypes.bool) ragged_mask = cls.from_nested_row_lengths( ones_mask, lengths, validate=False) dense_ragged_mask = ragged_mask.to_tensor(default_value=False) masked_data = array_ops.boolean_mask(tensor, dense_ragged_mask) return cls.from_nested_row_lengths( masked_data, lengths, validate=False) else: # If we have lengths (either directly supplied, or computed from # paddings), then use those to construct splits; and then use masking # to get the corresponding values. lengths = ragged_util.convert_to_int_tensor(lengths, "lengths", row_splits_dtype) lengths.shape.assert_has_rank(1) lengths = math_ops.minimum(lengths, ncols) lengths = math_ops.maximum(lengths, 0) limits = math_ops.cumsum(lengths) splits = array_ops.concat( [array_ops.zeros([1], row_splits_dtype), limits], axis=0) mask = array_ops.sequence_mask(lengths, maxlen=ncols) values = array_ops.boolean_mask(tensor, mask) return cls.from_row_splits(values, splits, validate=False) # If neither padding nor lengths were specified, then create a splits # vector that contains no default values, and reshape the input tensor # to form the values for the RaggedTensor. nrows = input_shape[0] nvals = nrows * ncols splits = math_ops.range(nrows + 1) * ncols values_shape = array_ops.concat([[nvals], input_shape[2:]], axis=0) values = array_ops.reshape(tensor, values_shape) return cls.from_row_splits(values, splits, validate=False) def to_tensor(self, default_value=None, name=None): """Converts this `RaggedTensor` into a `tf.Tensor`. Example: ```python >>> rt = ragged.constant([[9, 8, 7], [], [6, 5], [4]]) >>> print rt.to_tensor() [[9 8 7] [0 0 0] [6 5 0] [4 0 0]] ``` Args: default_value: Value to set for indices not specified in `self`. Defaults to zero. `default_value` must be broadcastable to `self.shape[self.ragged_rank + 1:]`. name: A name prefix for the returned tensors (optional). Returns: A `Tensor` with shape `ragged.bounding_shape(self)` and the values specified by the non-empty values in `self`. Empty values are assigned `default_value`. """ with ops.name_scope(name, "RaggedToTensor", [self, default_value]): if default_value is not None: default_value = ops.convert_to_tensor( default_value, name="default_value", dtype=self.dtype) # If ragged_rank > 1, then recursively convert the ragged values into a # `Tensor` before we proceed. values = self.values if is_ragged(values): values = values.to_tensor(default_value) # Tile the default value, if necessary. if default_value is not None: if values.shape.ndims is not None: default_value.shape.with_rank_at_most(values.shape.ndims - 1) if (values.shape.ndims is None or default_value.shape.ndims is None or values.shape.ndims != default_value.shape.ndims + 1): value_shape = array_ops.shape(values)[1:] default_value = array_ops.broadcast_to(default_value, value_shape) default_value.shape.assert_is_compatible_with(values.shape[1:]) # Get the expected dense shape ([nrows, ncols] + value_shape). rt_row_lengths = [self.row_splits[1:] - self.row_splits[:-1]] nrows = array_ops.shape(self.row_splits, out_type=self._row_splits.dtype)[0] - 1 ncols = math_ops.maximum(math_ops.reduce_max(rt_row_lengths), 0) values_shape = array_ops.shape(values, out_type=self._row_splits.dtype) value_shape = values_shape[1:] nvals = values_shape[0] # Build a default value if none was supplied. if default_value is None: default_value = array_ops.zeros(value_shape, dtype=values.dtype) default_value.shape.assert_is_compatible_with(values.shape[1:]) default_value.set_shape(values.shape[1:]) # Get the row start indices, and expand to shape=[nrows, 1]. starts = array_ops.expand_dims(self.row_splits[:-1], 1) # Get the row limit indices, and expand to shape=[nrows, 1]. limits = array_ops.expand_dims(self.row_splits[1:], 1) # Get the column indices, and expand to shape=[1, ncols]. columns = array_ops.expand_dims(math_ops.range(0, ncols), 0) # Build a list containing the values plus the default value. We will use # tf.gather to collect values from this list for the `Tensor` (using # nvals as the index for the default value). values_and_default = array_ops.concat( [values, array_ops.stack([default_value])], axis=0) # Construct a matrix "indices" pointing into values_and_default. I.e., # output[r, c] = values_and_default[indices[r, c]. nondefault_index = starts + columns has_value = nondefault_index < limits default_index = array_ops.fill(array_ops.stack([nrows, ncols]), nvals) indices = array_ops.where(has_value, nondefault_index, default_index) # Gather the results into a `Tensor`. return array_ops.gather(values_and_default, indices) @classmethod def from_sparse(cls, st_input, name=None, row_splits_dtype=dtypes.int64): """Converts a 2D `tf.SparseTensor` to a `RaggedTensor`. Each row of the `output` `RaggedTensor` will contain the explicit values from the same row in `st_input`. `st_input` must be ragged-right. If not it is not ragged-right, then an error will be generated. Example: ```python >>> st = SparseTensor(indices=[[0, 1], [0, 2], [0, 3], [1, 0], [3, 0]], ... values=[1, 2, 3, 4, 5], ... dense_shape=[4, 3]) >>> rt.RaggedTensor.from_sparse(st).eval().tolist() [[1, 2, 3], [4], [], [5]] ``` Currently, only two-dimensional `SparseTensors` are supported. Args: st_input: The sparse tensor to convert. Must have rank 2. name: A name prefix for the returned tensors (optional). row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits` tensor. One of `tf.int32` or `tf.int64`. Returns: A `RaggedTensor` with the same values as `st_input`. `output.ragged_rank = rank(st_input) - 1`. `output.shape = [st_input.dense_shape[0], None]`. Raises: ValueError: If the number of dimensions in `st_input` is not known statically, or is not two. """ row_splits_dtype = dtypes.as_dtype(row_splits_dtype) if not sparse_tensor.is_sparse(st_input): raise TypeError("Expected SparseTensor, got %s" % type(st_input).__name__) with ops.name_scope(name, "RaggedFromSparse", [st_input]): st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor( st_input, name="st_input") if st_input.dense_shape.shape.ndims is None: static_rank_from_dense_shape = None else: static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value if st_input.indices.shape.ndims is None: static_rank_from_indices = None else: static_rank_from_indices = st_input.indices.shape.dims[1].value if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2: raise ValueError("rank(st_input) must be 2") with ops.control_dependencies( _assert_sparse_indices_are_ragged_right(st_input.indices)): # Treat sparse row indices as segment ids to generate a splits tensor # thta we can pair with the sparse tensor values. (Ignore sparse column # indices.) segment_ids = math_ops.cast(st_input.indices[:, 0], row_splits_dtype) num_segments = math_ops.cast(st_input.dense_shape[0], row_splits_dtype) return cls.from_value_rowids( st_input.values, segment_ids, num_segments, validate=False) def to_sparse(self, name=None): """Converts this `RaggedTensor` into a `tf.SparseTensor`. Example: ```python >>> rt = ragged.constant([[1, 2, 3], [4], [], [5, 6]]) >>> rt.to_sparse().eval() SparseTensorValue(indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [3, 1]], values=[1, 2, 3, 4, 5, 6], dense_shape=[4, 3]) ``` Args: name: A name prefix for the returned tensors (optional). Returns: A SparseTensor with the same values as `self`. """ with ops.name_scope(name, "RaggedToSparse", [self]): result = gen_ragged_conversion_ops.ragged_tensor_to_sparse( self.nested_row_splits, self.flat_values, name=name) return sparse_tensor.SparseTensor(result.sparse_indices, result.sparse_values, result.sparse_dense_shape) @classmethod def _from_variant(cls, variant, dtype, output_ragged_rank, input_ragged_rank=None, name=None): """Converts a `variant` Tensor into a `RaggedTensor`. The input `variant` could be a scalar, meaning it encodes a single `RaggedTensor` with ragged_rank `output_ragged_rank`. Alternatively it could have an arbitrary rank, in which case each element is decoded into a `RaggedTensor` with ragged_rank `input_ragged_rank` and these are then stacked according to the input shape to output a single `RaggedTensor` with ragged_rank `output_ragged_rank`. If `input_ragged_rank` is not provided, it is inferred dynamically as `output_ragged_rank` - `rank(variant)`. If `input_ragged_rank` is provided, the following must be true: `output_ragged_rank` = `input_ragged_rank` + `rank(variant)`. Example: ```python >>> rt = ragged.constant([[0], [1, 2]]) >>> et = rt._to_variant() >>> stacked_et = ragged.stack([et, et]) >>> ragged.RaggedTensor._from_variant( # scalar input. et, dtype=tf.int32, output_ragged_rank=1).eval().tolist() [[0], [1, 2]] >>> ragged.RaggedTensor._from_variant( # batched input. stacked_et, dtype=tf.int32, output_ragged_rank=2).eval().tolist() [[[0], [1, 2]], [[0], [1, 2]]] ``` Args: variant: A `variant` Tensor representing an encoded (possibly nested-batched) `RaggedTensor`. dtype: The dtype of the encoded `RaggedTensor`. output_ragged_rank: The expected ragged rank of the output `RaggedTensor`. input_ragged_rank: The ragged rank of each encoded `RaggedTensor`. This is optional and inferred dynamically if not provided. name: A name prefix for the returned tensors (optional). Returns: A `RaggedTensor` of dtype `dtype` and ragged rank `output_ragged_rank`. Raises: ValueError: If the input rank is known, `input_ragged_rank` is provided and `output_ragged_rank` = `input_ragged_rank` + `rank(variant)` does not hold. """ variant = ops.convert_to_tensor( variant, name="variant", dtype=dtypes.variant) if (variant.shape.ndims is not None and input_ragged_rank is not None and output_ragged_rank != input_ragged_rank + variant.shape.ndims): raise ValueError( "output_ragged_rank must be equal to input_ragged_rank +" "variant.shape.ndims, found variant.shape.ndims: %d, " "input_ragged_rank: %d, output_ragged_rank: %d" % (variant.shape.ndims, input_ragged_rank, output_ragged_rank)) input_ragged_rank = -1 if input_ragged_rank is None else input_ragged_rank with ops.name_scope( name, "RaggedFromVariant", [variant, dtype, input_ragged_rank, output_ragged_rank]): result = gen_ragged_conversion_ops.ragged_tensor_from_variant( variant, input_ragged_rank, output_ragged_rank, dtype, dtypes.int64, name) return cls.from_nested_row_splits( result.output_dense_values, result.output_nested_splits, validate=False) def _to_variant(self, batched_input=False, name=None): """Converts this `RaggedTensor` into a `variant` Tensor. If `batched_input` is `True`, then the `RaggedTensor` is unbatched along the zero-th dimension, each component `RaggedTensor` is encoded into a scalar `variant` Tensor, and these are stacked to return a 1-D `variant` Tensor. If `batched_input` is `False`, then the `RaggedTensor` is encoded as is and a scalar `variant` Tensor is returned. Example: >>> rt = ragged.constant([[[0]], [[1]], [[2]]]) >>> rt._to_variant().shape.as_list() [] >>> rt._to_variant(batched_input=True).shape.as_list() [3] Args: batched_input: If `True`, the `RaggedTensor` is unbatched and converted to a `variant` vector. Set to `False` by default. name: A name prefix for the returned tensors (optional). Returns: A `variant` Tensor that encodes this `RaggedTensor`. """ with ops.name_scope(name, "RaggedToVariant", [self, batched_input]): return gen_ragged_conversion_ops.ragged_tensor_to_variant( self.nested_row_splits, self.flat_values, batched_input, name) #============================================================================= # String Encoding #============================================================================= def __str__(self): if self._is_eager(): return "<tf.RaggedTensor %s>" % self.to_list() else: return self.__repr__() def __repr__(self): return "tf.RaggedTensor(values=%s, row_splits=%s)" % (self._values, self._row_splits) #============================================================================= # Eager Execution Mode #============================================================================= def to_list(self): """Returns a nested Python `list` with the values for this `RaggedTensor`. Requires that `rt` was constructed in eager execution mode. Returns: A nested Python `list`. """ if self._is_eager(): return self._eager_value().to_list() else: raise ValueError("RaggedTensor.to_list() is only supported in eager " "mode; in graph mode, evaluate the RaggedTensor first " "and then use RaggedTensorValue.to_list().") def _eager_value(self): """Returns a RaggedTensorValue for self. Requires self._is_eager()=true.""" value = self.flat_values.numpy() for row_splits in reversed(self.nested_row_splits): value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy()) return value def _is_eager(self): """Returns True if values & row_splits Tensors are all `EagerTensor`s.""" rt = self while isinstance(rt, RaggedTensor): if not isinstance(rt.row_splits, ops.EagerTensor): return False rt = rt.values return isinstance(rt, ops.EagerTensor) #============================================================================= # Indexing & Slicing #============================================================================= def __getitem__(self, key): """Returns the specified piece of this RaggedTensor.""" # See ragged_getitem.py for the documentation and implementation of this # method. # # Note: the imports in ragged/__init__.py ensure that this method always # gets overridden before it is called. #============================================================================= # Name Scope #============================================================================= # This private function is used by ops.name_scope to ensure that all of the # input tensors for the scope belong to the same graph. Defining this means # that you may include `RaggedTensor` objects in the name_scope `values` # list. def _as_graph_element(self): """Convert `self` to a graph element.""" values = self.values while isinstance(values, RaggedTensor): values = values.values return values #============================================================================= # Composite Tensor #============================================================================= def _to_components(self): return (self.flat_values,) + self.nested_row_splits @classmethod def _from_components(cls, components, metadata): return cls.from_nested_row_splits( components[0], components[1:], validate=False) def _shape_invariant_to_components(self, shape=None): ragged_rank = self.ragged_rank flat_values = self.flat_values if shape is None: # Default shape invariant value_shape = flat_values.shape[1:] values_shape = tensor_shape.TensorShape([None]).concatenate(value_shape) return ((values_shape, self._row_splits.shape) + tuple(tensor_shape.TensorShape([None]) for i in range(1, ragged_rank))) else: # Explicitly specified shape invariant if shape.ndims is not None and shape.ndims <= ragged_rank: raise ValueError("Shape invariant %s does not have sufficient rank " "for a RaggedTensor with %d ragged dimensions." % (shape, self.ragged_rank)) if any(tensor_shape.dimension_value(shape[dim]) is not None for dim in range(1, self.ragged_rank + 1)): raise ValueError("Shape invariant dimension size must be None for " "ragged dimenions.") nrows = tensor_shape.dimension_value(shape[0]) value_shape = shape[self.ragged_rank + 1:] values_shape = tensor_shape.TensorShape([None]).concatenate(value_shape) if nrows is None: outer_splits_shape = tensor_shape.TensorShape([None]) else: outer_splits_shape = tensor_shape.TensorShape([nrows + 1]) return ((values_shape, outer_splits_shape) + tuple(tensor_shape.TensorShape([None]) for i in range(1, ragged_rank))) @property def _is_graph_tensor(self): return hasattr(self._row_splits, "graph") def consumers(self): return self._consumers() def is_ragged(value): """Returns true if `value` is a ragged tensor or ragged tensor value.""" return isinstance(value, (RaggedTensor, ragged_tensor_value.RaggedTensorValue)) def match_row_splits_dtypes(*tensors, **kwargs): """Return a copy of `tensors` with row_splits all having the same dtype. Args: *tensors: A list of Tensors or RaggedTensors. **kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors), where `dtype` is the data type used by row-splits, and `tensors` is the converted list of `Tensors` and `RaggedTensors`. Returns: The converted list of `Tensors` and `RaggedTensors`. """ return_dtype = kwargs.pop("return_dtype", False) if kwargs: raise ValueError("Unexpected keyword args %r" % kwargs) has_int32 = False has_int64 = False for tensor in tensors: if isinstance(tensor, RaggedTensor): if tensor.row_splits.dtype == dtypes.int32: has_int32 = True else: has_int64 = True if has_int32 and has_int64: if not ragged_config.auto_cast_partition_dtype(): raise ValueError("Input RaggedTensors have mismatched row_splits dtypes; " "use RaggedTensor.with_row_splits_dtype() to convert " "them to compatible dtypes.") dtype = dtypes.int64 tensors = tuple(t.with_row_splits_dtype(dtypes.int64) if isinstance(t, RaggedTensor) else t for t in tensors) elif has_int32: dtype = dtypes.int32 else: dtype = dtypes.int64 if return_dtype: return (dtype, tensors) else: return tensors #=============================================================================== # Convert value -> tensor #=============================================================================== def convert_to_tensor_or_ragged_tensor(value, dtype=None, preferred_dtype=None, name=None): """Converts value to a `RaggedTensor` or `Tensor`. * If `value` is a `RaggedTensor`, then return it as-is. * If `value` is a `RaggedTensorValue`, return a corresponding constant `RaggedTensor`. * Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`. Args: value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing the type is inferred from the type of `value`. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. This argument has no effect if `value` is already a tensor, or when conversion is not possible. name: Optional name to use if a new `Tensor` is created. Returns: A `Tensor` or `RaggedTensor`. """ if isinstance(value, RaggedTensor): if dtype and not dtype.is_compatible_with(value.dtype): raise ValueError("Tensor conversion requested dtype %s for " "RaggedTensor with dtype %s: %r" % (dtype.name, value.dtype.name, value)) return value elif isinstance(value, ragged_tensor_value.RaggedTensorValue): with ops.name_scope(name, "ConvertToTensorOrRaggedTensor", []): flat_values = ops.convert_to_tensor( value=value.flat_values, dtype=dtype, preferred_dtype=preferred_dtype, name="flat_values") return RaggedTensor.from_nested_row_splits( flat_values, value.nested_row_splits, validate=False) else: return ops.convert_to_tensor( value=value, dtype=dtype, preferred_dtype=preferred_dtype, name=name) #=============================================================================== # Register RaggedTensor for use with session.run. #=============================================================================== def _ragged_tensor_value_from_components(components): components = list(components) value = components.pop() while components: value = ragged_tensor_value.RaggedTensorValue(value, components.pop()) return value def _ragged_tensor_session_fetch(rt): components = rt.nested_row_splits + (rt.flat_values,) return (components, _ragged_tensor_value_from_components) def _ragged_tensor_session_feed(feed_key, feed_val): key_components = feed_key.nested_row_splits + (feed_key.flat_values,) val_components = feed_val.nested_row_splits + (feed_val.flat_values,) return zip(key_components, val_components) def _ragged_tensor_session_feed_for_partial_run(feed_key): return feed_key.nested_row_splits + (feed_key.flat_values,) session.register_session_run_conversion_functions( RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed, _ragged_tensor_session_feed_for_partial_run) #=============================================================================== # RaggedTensorType #=============================================================================== class RaggedTensorType(object): """Encoding of a static type for a `RaggedTensor`. Use this type to express/declare that an output must have the type of `RaggedTensor`. """ def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64): """Initializes a RaggedTensorType object. Args: dtype: data type of the `RaggedTensor`'s inner values. ragged_rank: ragged_rank of the declared `RaggedTensor`. row_splits_dtype: data type for the `RaggedTensor`'s row splits. One of: `tf.int32` or `tf.int64`. """ row_splits_dtype = dtypes.as_dtype(row_splits_dtype) self._dtype = dtype self._ragged_rank = ragged_rank self._row_splits_dtype = row_splits_dtype dtype = property(lambda self: self._dtype) ragged_rank = property(lambda self: self._ragged_rank) row_splits_dtype = property(lambda self: self._row_splits_dtype) #=============================================================================== # Helper Functions #=============================================================================== def _assert_sparse_indices_are_ragged_right(indices): """Checks that the given SparseTensor.indices tensor is ragged-right. Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right because the entry `[3, 1]` skips a cell. Args: indices: The SparseTensor indices to check. Returns: A list of control dependency op tensors. """ index_prefix = indices[:, :-1] index_suffix = indices[:, -1] # Check whether each index is starting a new row in the innermost dimension # (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]). # (Note: this skips the first index; we will check that separately below.) index_prefix_changed = math_ops.reduce_any( math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1) # Check two cases: # * For indices that start a new row: index_suffix[i] must be zero. # * For indices that continue a row: index_suffix[i] must be equal to # index_suffix[i-1]+1. index_ok = array_ops.where( index_prefix_changed, math_ops.equal(index_suffix[1:], 0), math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1)) # Also check that the very first index didn't skip any cells. The first # index starts a new row (by definition), so its suffix should be zero. sparse_indices_are_ragged_right = math_ops.logical_and( math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)), math_ops.reduce_all(index_ok)) message = [ "SparseTensor is not right-ragged", "SparseTensor.indices =", indices ] return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)] @ops.RegisterGradient("RaggedTensorToSparse") def _ragged_tensor_to_sparse_gradient(op, unused_sparse_indices_grad, sparse_values_grad, unused_sparse_shape_grad): """Gradient for RaggedTensorToSparse.""" op_inputs_nested_row_splits = op.inputs[:-1] op_inputs_flat_values = op.inputs[-1] # No gradient for the RaggedTensor's nested_row_splits. nested_row_splits_gradient = [None] * len(op_inputs_nested_row_splits) # Gradient for the RaggedTensor's flat_values is formed by reshaping # the gradient for the SparseTensor's values. flat_values_shape = array_ops.shape(op_inputs_flat_values) flat_values_gradient = array_ops.reshape(sparse_values_grad, flat_values_shape) return nested_row_splits_gradient + [flat_values_gradient] def _assert_monotonic_increasing(tensor, message=None): return check_ops.assert_non_negative( tensor[1:] - tensor[:-1], message=message) def _assert_zero(tensor, message=None): return check_ops.assert_equal( tensor, constant_op.constant(0, dtype=tensor.dtype), message=message) def _nrows(tensor, out_type=dtypes.int32): if isinstance(tensor, RaggedTensor): return tensor.nrows(out_type=out_type) else: return array_ops.shape(tensor, out_type=out_type)[0]
codeparrot/github-code-clean
""" Cisco_IOS_XR_ipv4_acl_oper This module contains a collection of YANG definitions for Cisco IOS\-XR ipv4\-acl package operational data. This module contains definitions for the following management objects\: ipv4\-acl\-and\-prefix\-list\: Root class of IPv4 Oper schema tree Copyright (c) 2013\-2015 by Cisco Systems, Inc. All rights reserved. """ import re import collections from enum import Enum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk.errors import YPYError, YPYModelError from ydk.models.cisco_ios_xr.Cisco_IOS_XR_common_acl_datatypes import AclUsageAppIdEnumEnum class AclAce1Enum(Enum): """ AclAce1Enum ACE Types .. data:: NORMAL = 0 This is Normal ACE .. data:: REMARK = 1 This is Remark ACE .. data:: ABF = 2 This is ABF ACE """ NORMAL = 0 REMARK = 1 ABF = 2 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['AclAce1Enum'] class AclAce1Enum(Enum): """ AclAce1Enum ACE Types .. data:: NORMAL = 0 This is Normal ACE .. data:: REMARK = 1 This is Remark ACE .. data:: ABF = 2 This is ABF ACE """ NORMAL = 0 REMARK = 1 ABF = 2 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['AclAce1Enum'] class AclActionEnum(Enum): """ AclActionEnum Acl action .. data:: DENY = 0 Deny .. data:: PERMIT = 1 Permit .. data:: ENCRYPT = 2 Encrypt .. data:: BYPASS = 3 Bypass .. data:: FALLTHROUGH = 4 Fallthrough .. data:: INVALID = 5 Invalid """ DENY = 0 PERMIT = 1 ENCRYPT = 2 BYPASS = 3 FALLTHROUGH = 4 INVALID = 5 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['AclActionEnum'] class AclLogEnum(Enum): """ AclLogEnum Acl log .. data:: LOG_NONE = 0 Log None .. data:: LOG = 1 Log Regular .. data:: LOG_INPUT = 2 Log Input """ LOG_NONE = 0 LOG = 1 LOG_INPUT = 2 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['AclLogEnum'] class AclPortOperatorEnum(Enum): """ AclPortOperatorEnum Acl port operator .. data:: NONE = 0 None .. data:: EQ = 1 Equal .. data:: GT = 2 Greater than .. data:: LT = 3 Less than .. data:: NEQ = 4 Not Equal .. data:: RANGE = 5 Range .. data:: ONEBYTE = 8 One Byte .. data:: TWOBYTES = 9 Two Bytes """ NONE = 0 EQ = 1 GT = 2 LT = 3 NEQ = 4 RANGE = 5 ONEBYTE = 8 TWOBYTES = 9 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['AclPortOperatorEnum'] class AclPortOperatorEnum(Enum): """ AclPortOperatorEnum Acl port operator .. data:: NONE = 0 None .. data:: EQ = 1 Equal .. data:: GT = 2 Greater than .. data:: LT = 3 Less than .. data:: NEQ = 4 Not Equal .. data:: RANGE = 5 Range .. data:: ONEBYTE = 8 One Byte .. data:: TWOBYTES = 9 Two Bytes """ NONE = 0 EQ = 1 GT = 2 LT = 3 NEQ = 4 RANGE = 5 ONEBYTE = 8 TWOBYTES = 9 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['AclPortOperatorEnum'] class AclPortOperatorEnum(Enum): """ AclPortOperatorEnum Acl port operator .. data:: NONE = 0 None .. data:: EQ = 1 Equal .. data:: GT = 2 Greater than .. data:: LT = 3 Less than .. data:: NEQ = 4 Not Equal .. data:: RANGE = 5 Range .. data:: ONEBYTE = 8 One Byte .. data:: TWOBYTES = 9 Two Bytes """ NONE = 0 EQ = 1 GT = 2 LT = 3 NEQ = 4 RANGE = 5 ONEBYTE = 8 TWOBYTES = 9 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['AclPortOperatorEnum'] class AclTcpflagsOperatorEnum(Enum): """ AclTcpflagsOperatorEnum Acl tcpflags operator .. data:: MATCH_NONE = 0 Match None .. data:: MATCH_ALL = 1 Match All .. data:: MATCH_ANY_OLD = 2 Match any old .. data:: MATCH_ANY = 3 Match any """ MATCH_NONE = 0 MATCH_ALL = 1 MATCH_ANY_OLD = 2 MATCH_ANY = 3 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['AclTcpflagsOperatorEnum'] class BagAclNhEnum(Enum): """ BagAclNhEnum Bag acl nh .. data:: NEXTHOP_NONE = 0 Next Hop None .. data:: NEXTHOP_DEFAULT = 1 Nexthop Default .. data:: NEXTHOP = 2 Nexthop """ NEXTHOP_NONE = 0 NEXTHOP_DEFAULT = 1 NEXTHOP = 2 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['BagAclNhEnum'] class BagAclNhStatusEnum(Enum): """ BagAclNhStatusEnum Bag acl nh status .. data:: NOT_PRESENT = 0 State Not Present .. data:: UNKNOWN = 1 State Unknown .. data:: DOWN = 2 State DOWN .. data:: UP = 3 State UP """ NOT_PRESENT = 0 UNKNOWN = 1 DOWN = 2 UP = 3 @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['BagAclNhStatusEnum'] class Ipv4AclAndPrefixList(object): """ Root class of IPv4 Oper schema tree .. attribute:: access_list_manager Access list manager containing access lists and prefix lists **type**\: :py:class:`AccessListManager <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager>` .. attribute:: oor Out Of Resources, Limits to the resources allocatable **type**\: :py:class:`Oor <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.access_list_manager = Ipv4AclAndPrefixList.AccessListManager() self.access_list_manager.parent = self self.oor = Ipv4AclAndPrefixList.Oor() self.oor.parent = self class AccessListManager(object): """ Access list manager containing access lists and prefix lists .. attribute:: accesses Access listL class displaying Usage and Entries **type**\: :py:class:`Accesses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Accesses>` .. attribute:: prefixes Table of prefix lists **type**\: :py:class:`Prefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Prefixes>` .. attribute:: usages Table of Usage statistics of access lists at different nodes **type**\: :py:class:`Usages <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Usages>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.accesses = Ipv4AclAndPrefixList.AccessListManager.Accesses() self.accesses.parent = self self.prefixes = Ipv4AclAndPrefixList.AccessListManager.Prefixes() self.prefixes.parent = self self.usages = Ipv4AclAndPrefixList.AccessListManager.Usages() self.usages.parent = self class Prefixes(object): """ Table of prefix lists .. attribute:: prefix Name of the prefix list **type**\: list of :py:class:`Prefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.prefix = YList() self.prefix.parent = self self.prefix.name = 'prefix' class Prefix(object): """ Name of the prefix list .. attribute:: prefix_list_name <key> Name of the prefix list **type**\: str .. attribute:: prefix_list_sequences Table of all the SequenceNumbers per prefix list **type**\: :py:class:`PrefixListSequences <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.prefix_list_name = None self.prefix_list_sequences = Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences() self.prefix_list_sequences.parent = self class PrefixListSequences(object): """ Table of all the SequenceNumbers per prefix list .. attribute:: prefix_list_sequence Sequence Number of a prefix list entry **type**\: list of :py:class:`PrefixListSequence <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences.PrefixListSequence>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.prefix_list_sequence = YList() self.prefix_list_sequence.parent = self self.prefix_list_sequence.name = 'prefix_list_sequence' class PrefixListSequence(object): """ Sequence Number of a prefix list entry .. attribute:: sequence_number <key> Sequence Number of the prefix list entry **type**\: int **range:** 1..2147483646 .. attribute:: acl_name ACL Name **type**\: str .. attribute:: grant Grant value permit/deny **type**\: :py:class:`AclActionEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclActionEnum>` .. attribute:: hits Number of hits **type**\: int **range:** 0..4294967295 .. attribute:: item_type ACE type (prefix, remark) **type**\: :py:class:`AclAce1Enum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclAce1Enum>` .. attribute:: maximum_length Maximum length **type**\: int **range:** 0..4294967295 .. attribute:: minimum_length Min length **type**\: int **range:** 0..4294967295 .. attribute:: operator_ Port Operator **type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclPortOperatorEnum>` .. attribute:: prefix Prefix **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? .. attribute:: prefix_length Prefix length **type**\: int **range:** 0..4294967295 .. attribute:: remark Remark String **type**\: str .. attribute:: sequence ACLE sequence number **type**\: int **range:** 0..4294967295 """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.sequence_number = None self.acl_name = None self.grant = None self.hits = None self.item_type = None self.maximum_length = None self.minimum_length = None self.operator_ = None self.prefix = None self.prefix_length = None self.remark = None self.sequence = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.sequence_number is None: raise YPYModelError('Key property sequence_number is None') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-oper:prefix-list-sequence[Cisco-IOS-XR-ipv4-acl-oper:sequence-number = ' + str(self.sequence_number) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.sequence_number is not None: return True if self.acl_name is not None: return True if self.grant is not None: return True if self.hits is not None: return True if self.item_type is not None: return True if self.maximum_length is not None: return True if self.minimum_length is not None: return True if self.operator_ is not None: return True if self.prefix is not None: return True if self.prefix_length is not None: return True if self.remark is not None: return True if self.sequence is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences.PrefixListSequence']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-oper:prefix-list-sequences' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.prefix_list_sequence is not None: for child_ref in self.prefix_list_sequence: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix.PrefixListSequences']['meta_info'] @property def _common_path(self): if self.prefix_list_name is None: raise YPYModelError('Key property prefix_list_name is None') return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager/Cisco-IOS-XR-ipv4-acl-oper:prefixes/Cisco-IOS-XR-ipv4-acl-oper:prefix[Cisco-IOS-XR-ipv4-acl-oper:prefix-list-name = ' + str(self.prefix_list_name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.prefix_list_name is not None: return True if self.prefix_list_sequences is not None and self.prefix_list_sequences._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Prefixes.Prefix']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager/Cisco-IOS-XR-ipv4-acl-oper:prefixes' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.prefix is not None: for child_ref in self.prefix: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Prefixes']['meta_info'] class Accesses(object): """ Access listL class displaying Usage and Entries .. attribute:: access Name of the Access List **type**\: list of :py:class:`Access <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Accesses.Access>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.access = YList() self.access.parent = self self.access.name = 'access' class Access(object): """ Name of the Access List .. attribute:: access_list_name <key> Name of the Access List **type**\: str .. attribute:: access_list_sequences Table of all the SequenceNumbers per access list **type**\: :py:class:`AccessListSequences <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.access_list_name = None self.access_list_sequences = Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences() self.access_list_sequences.parent = self class AccessListSequences(object): """ Table of all the SequenceNumbers per access list .. attribute:: access_list_sequence Sequence Number of an access list entry **type**\: list of :py:class:`AccessListSequence <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.access_list_sequence = YList() self.access_list_sequence.parent = self self.access_list_sequence.name = 'access_list_sequence' class AccessListSequence(object): """ Sequence Number of an access list entry .. attribute:: sequence_number <key> ACLEntry Sequence Number **type**\: int **range:** 1..2147483646 .. attribute:: acl_name ACL Name **type**\: str .. attribute:: capture Capture option, TRUE if enabled **type**\: bool .. attribute:: counter_name Counter name **type**\: str .. attribute:: destination_address Destination address **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? .. attribute:: destination_address_mask Destination mask **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? .. attribute:: destination_operator Destination operator **type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclPortOperatorEnum>` .. attribute:: destination_port1 Destination port 1 **type**\: int **range:** 0..65535 .. attribute:: destination_port2 Destination port 2 **type**\: int **range:** 0..65535 .. attribute:: destination_port_group Destination port object\-group **type**\: str .. attribute:: destination_prefix_group Destination prefix object\-group **type**\: str .. attribute:: dscp DSCP or DSCP range start **type**\: int **range:** 0..255 .. attribute:: dscp2 DSCP Range End **type**\: int **range:** 0..255 .. attribute:: dscp_operator DSCP Operator **type**\: int **range:** 0..255 .. attribute:: dscp_present DSCP present **type**\: bool .. attribute:: dynamic Is dynamic ACE **type**\: bool .. attribute:: fragments Fragments **type**\: int **range:** 0..255 .. attribute:: grant Permit/deny **type**\: :py:class:`AclActionEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclActionEnum>` .. attribute:: hits Number of hits **type**\: long **range:** 0..18446744073709551615 .. attribute:: hw_next_hop_info HW Next hop info **type**\: :py:class:`HwNextHopInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.HwNextHopInfo>` .. attribute:: is_icmp_off True if ICMP off **type**\: bool .. attribute:: item_type ACE type (acl, remark) **type**\: :py:class:`AclAce1Enum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclAce1Enum>` .. attribute:: log_option Log option **type**\: :py:class:`AclLogEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclLogEnum>` .. attribute:: next_hop_info Next hop info **type**\: list of :py:class:`NextHopInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.NextHopInfo>` .. attribute:: next_hop_type Next hop type **type**\: :py:class:`BagAclNhEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.BagAclNhEnum>` .. attribute:: no_stats No stats **type**\: bool .. attribute:: port_length1 Port length 1 **type**\: int **range:** 0..65535 .. attribute:: port_length2 Port length 2 **type**\: int **range:** 0..65535 .. attribute:: port_length_operator Port length operator **type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclPortOperatorEnum>` .. attribute:: precedence Precedence **type**\: int **range:** 0..255 .. attribute:: precedence_present Precedence present **type**\: bool .. attribute:: protocol IPv4 protocol type **type**\: int **range:** 0..65535 .. attribute:: remark Remark String **type**\: str .. attribute:: sequence ACLE sequence number **type**\: int **range:** 0..4294967295 .. attribute:: sorce_operator Deprecated by Source operator **type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclPortOperatorEnum>` .. attribute:: sorce_port1 Deprecated by SourcePort1 **type**\: int **range:** 0..65535 .. attribute:: sorce_port2 Deprecated by SourcePort2 **type**\: int **range:** 0..65535 .. attribute:: source_address Source address **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? .. attribute:: source_address_mask Source mask **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? .. attribute:: source_operator Source operator **type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclPortOperatorEnum>` .. attribute:: source_port1 Source port 1 **type**\: int **range:** 0..65535 .. attribute:: source_port2 Source port 2 **type**\: int **range:** 0..65535 .. attribute:: source_port_group Source port object\-group **type**\: str .. attribute:: source_prefix_group Source prefix object\-group **type**\: str .. attribute:: tcp_flags TCP flags **type**\: int **range:** 0..255 .. attribute:: tcp_flags_mask TCP flags mask **type**\: int **range:** 0..255 .. attribute:: tcp_flags_operator TCP flags operator **type**\: :py:class:`AclTcpflagsOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclTcpflagsOperatorEnum>` .. attribute:: ttl1 TTL 1 **type**\: int **range:** 0..65535 .. attribute:: ttl2 TTL 2 **type**\: int **range:** 0..65535 .. attribute:: ttl_operator TTL operator **type**\: :py:class:`AclPortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.AclPortOperatorEnum>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.sequence_number = None self.acl_name = None self.capture = None self.counter_name = None self.destination_address = None self.destination_address_mask = None self.destination_operator = None self.destination_port1 = None self.destination_port2 = None self.destination_port_group = None self.destination_prefix_group = None self.dscp = None self.dscp2 = None self.dscp_operator = None self.dscp_present = None self.dynamic = None self.fragments = None self.grant = None self.hits = None self.hw_next_hop_info = Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.HwNextHopInfo() self.hw_next_hop_info.parent = self self.is_icmp_off = None self.item_type = None self.log_option = None self.next_hop_info = YList() self.next_hop_info.parent = self self.next_hop_info.name = 'next_hop_info' self.next_hop_type = None self.no_stats = None self.port_length1 = None self.port_length2 = None self.port_length_operator = None self.precedence = None self.precedence_present = None self.protocol = None self.remark = None self.sequence = None self.sorce_operator = None self.sorce_port1 = None self.sorce_port2 = None self.source_address = None self.source_address_mask = None self.source_operator = None self.source_port1 = None self.source_port2 = None self.source_port_group = None self.source_prefix_group = None self.tcp_flags = None self.tcp_flags_mask = None self.tcp_flags_operator = None self.ttl1 = None self.ttl2 = None self.ttl_operator = None class HwNextHopInfo(object): """ HW Next hop info .. attribute:: next_hop The Next Hop **type**\: int **range:** 0..4294967295 .. attribute:: type the next\-hop type **type**\: :py:class:`BagAclNhEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.BagAclNhEnum>` .. attribute:: vrf_name VRF name **type**\: str **range:** 0..32 """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.next_hop = None self.type = None self.vrf_name = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-oper:hw-next-hop-info' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.next_hop is not None: return True if self.type is not None: return True if self.vrf_name is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.HwNextHopInfo']['meta_info'] class NextHopInfo(object): """ Next hop info .. attribute:: is_acl_next_hop_exist The nexthop exist **type**\: bool .. attribute:: next_hop The next hop **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? .. attribute:: status The next hop status **type**\: :py:class:`BagAclNhStatusEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.BagAclNhStatusEnum>` .. attribute:: track_name Track name **type**\: str """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.is_acl_next_hop_exist = None self.next_hop = None self.status = None self.track_name = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-oper:next-hop-info' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.is_acl_next_hop_exist is not None: return True if self.next_hop is not None: return True if self.status is not None: return True if self.track_name is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence.NextHopInfo']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') if self.sequence_number is None: raise YPYModelError('Key property sequence_number is None') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-oper:access-list-sequence[Cisco-IOS-XR-ipv4-acl-oper:sequence-number = ' + str(self.sequence_number) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.sequence_number is not None: return True if self.acl_name is not None: return True if self.capture is not None: return True if self.counter_name is not None: return True if self.destination_address is not None: return True if self.destination_address_mask is not None: return True if self.destination_operator is not None: return True if self.destination_port1 is not None: return True if self.destination_port2 is not None: return True if self.destination_port_group is not None: return True if self.destination_prefix_group is not None: return True if self.dscp is not None: return True if self.dscp2 is not None: return True if self.dscp_operator is not None: return True if self.dscp_present is not None: return True if self.dynamic is not None: return True if self.fragments is not None: return True if self.grant is not None: return True if self.hits is not None: return True if self.hw_next_hop_info is not None and self.hw_next_hop_info._has_data(): return True if self.is_icmp_off is not None: return True if self.item_type is not None: return True if self.log_option is not None: return True if self.next_hop_info is not None: for child_ref in self.next_hop_info: if child_ref._has_data(): return True if self.next_hop_type is not None: return True if self.no_stats is not None: return True if self.port_length1 is not None: return True if self.port_length2 is not None: return True if self.port_length_operator is not None: return True if self.precedence is not None: return True if self.precedence_present is not None: return True if self.protocol is not None: return True if self.remark is not None: return True if self.sequence is not None: return True if self.sorce_operator is not None: return True if self.sorce_port1 is not None: return True if self.sorce_port2 is not None: return True if self.source_address is not None: return True if self.source_address_mask is not None: return True if self.source_operator is not None: return True if self.source_port1 is not None: return True if self.source_port2 is not None: return True if self.source_port_group is not None: return True if self.source_prefix_group is not None: return True if self.tcp_flags is not None: return True if self.tcp_flags_mask is not None: return True if self.tcp_flags_operator is not None: return True if self.ttl1 is not None: return True if self.ttl2 is not None: return True if self.ttl_operator is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences.AccessListSequence']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-oper:access-list-sequences' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.access_list_sequence is not None: for child_ref in self.access_list_sequence: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Accesses.Access.AccessListSequences']['meta_info'] @property def _common_path(self): if self.access_list_name is None: raise YPYModelError('Key property access_list_name is None') return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager/Cisco-IOS-XR-ipv4-acl-oper:accesses/Cisco-IOS-XR-ipv4-acl-oper:access[Cisco-IOS-XR-ipv4-acl-oper:access-list-name = ' + str(self.access_list_name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.access_list_name is not None: return True if self.access_list_sequences is not None and self.access_list_sequences._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Accesses.Access']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager/Cisco-IOS-XR-ipv4-acl-oper:accesses' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.access is not None: for child_ref in self.access: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Accesses']['meta_info'] class Usages(object): """ Table of Usage statistics of access lists at different nodes .. attribute:: usage Usage statistics of an access list at a node **type**\: list of :py:class:`Usage <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.AccessListManager.Usages.Usage>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.usage = YList() self.usage.parent = self self.usage.name = 'usage' class Usage(object): """ Usage statistics of an access list at a node .. attribute:: access_list_name Name of the access list **type**\: str .. attribute:: application_id Application ID **type**\: :py:class:`AclUsageAppIdEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_common_acl_datatypes.AclUsageAppIdEnumEnum>` .. attribute:: node_name Node where access list is applied **type**\: str **pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+) .. attribute:: usage_details Usage Statistics Details **type**\: str **mandatory**\: True """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.access_list_name = None self.application_id = None self.node_name = None self.usage_details = None @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager/Cisco-IOS-XR-ipv4-acl-oper:usages/Cisco-IOS-XR-ipv4-acl-oper:usage' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.access_list_name is not None: return True if self.application_id is not None: return True if self.node_name is not None: return True if self.usage_details is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Usages.Usage']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager/Cisco-IOS-XR-ipv4-acl-oper:usages' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.usage is not None: for child_ref in self.usage: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager.Usages']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:access-list-manager' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.accesses is not None and self.accesses._has_data(): return True if self.prefixes is not None and self.prefixes._has_data(): return True if self.usages is not None and self.usages._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.AccessListManager']['meta_info'] class Oor(object): """ Out Of Resources, Limits to the resources allocatable .. attribute:: access_list_summary Resource limits pertaining to access lists only **type**\: :py:class:`AccessListSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.AccessListSummary>` .. attribute:: details Details of the Overall Out Of Resources Limits **type**\: :py:class:`Details <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.Details>` .. attribute:: oor_accesses Resource occupation details for access lists **type**\: :py:class:`OorAccesses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.OorAccesses>` .. attribute:: oor_prefixes Resource occupation details for prefix lists **type**\: :py:class:`OorPrefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.OorPrefixes>` .. attribute:: prefix_list_summary Summary of the prefix Lists resource utilization **type**\: :py:class:`PrefixListSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.PrefixListSummary>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.access_list_summary = Ipv4AclAndPrefixList.Oor.AccessListSummary() self.access_list_summary.parent = self self.details = Ipv4AclAndPrefixList.Oor.Details() self.details.parent = self self.oor_accesses = Ipv4AclAndPrefixList.Oor.OorAccesses() self.oor_accesses.parent = self self.oor_prefixes = Ipv4AclAndPrefixList.Oor.OorPrefixes() self.oor_prefixes.parent = self self.prefix_list_summary = Ipv4AclAndPrefixList.Oor.PrefixListSummary() self.prefix_list_summary.parent = self class Details(object): """ Details of the Overall Out Of Resources Limits .. attribute:: current_configured_ac_es Current configured aces **type**\: int **range:** 0..4294967295 .. attribute:: current_configured_ac_ls Current configured acls **type**\: int **range:** 0..4294967295 .. attribute:: current_max_configurable_ac_es Current max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: current_max_configurable_ac_ls Current max configurable acls **type**\: int **range:** 0..4294967295 .. attribute:: default_max_ac_es default max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: default_max_ac_ls default max configurable acls **type**\: int **range:** 0..4294967295 .. attribute:: max_configurable_ac_es max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: max_configurable_ac_ls max configurable acls **type**\: int **range:** 0..4294967295 """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.current_configured_ac_es = None self.current_configured_ac_ls = None self.current_max_configurable_ac_es = None self.current_max_configurable_ac_ls = None self.default_max_ac_es = None self.default_max_ac_ls = None self.max_configurable_ac_es = None self.max_configurable_ac_ls = None @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:details' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.current_configured_ac_es is not None: return True if self.current_configured_ac_ls is not None: return True if self.current_max_configurable_ac_es is not None: return True if self.current_max_configurable_ac_ls is not None: return True if self.default_max_ac_es is not None: return True if self.default_max_ac_ls is not None: return True if self.max_configurable_ac_es is not None: return True if self.max_configurable_ac_ls is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.Oor.Details']['meta_info'] class OorPrefixes(object): """ Resource occupation details for prefix lists .. attribute:: oor_prefix Resource occupation details for a particular prefix list **type**\: list of :py:class:`OorPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.OorPrefixes.OorPrefix>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.oor_prefix = YList() self.oor_prefix.parent = self self.oor_prefix.name = 'oor_prefix' class OorPrefix(object): """ Resource occupation details for a particular prefix list .. attribute:: prefix_list_name <key> Name of a prefix list **type**\: str .. attribute:: current_configured_ac_es Current configured aces **type**\: int **range:** 0..4294967295 .. attribute:: current_configured_ac_ls Current configured acls **type**\: int **range:** 0..4294967295 .. attribute:: current_max_configurable_ac_es Current max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: current_max_configurable_ac_ls Current max configurable acls **type**\: int **range:** 0..4294967295 .. attribute:: default_max_ac_es default max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: default_max_ac_ls default max configurable acls **type**\: int **range:** 0..4294967295 .. attribute:: max_configurable_ac_es max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: max_configurable_ac_ls max configurable acls **type**\: int **range:** 0..4294967295 """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.prefix_list_name = None self.current_configured_ac_es = None self.current_configured_ac_ls = None self.current_max_configurable_ac_es = None self.current_max_configurable_ac_ls = None self.default_max_ac_es = None self.default_max_ac_ls = None self.max_configurable_ac_es = None self.max_configurable_ac_ls = None @property def _common_path(self): if self.prefix_list_name is None: raise YPYModelError('Key property prefix_list_name is None') return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:oor-prefixes/Cisco-IOS-XR-ipv4-acl-oper:oor-prefix[Cisco-IOS-XR-ipv4-acl-oper:prefix-list-name = ' + str(self.prefix_list_name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.prefix_list_name is not None: return True if self.current_configured_ac_es is not None: return True if self.current_configured_ac_ls is not None: return True if self.current_max_configurable_ac_es is not None: return True if self.current_max_configurable_ac_ls is not None: return True if self.default_max_ac_es is not None: return True if self.default_max_ac_ls is not None: return True if self.max_configurable_ac_es is not None: return True if self.max_configurable_ac_ls is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.Oor.OorPrefixes.OorPrefix']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:oor-prefixes' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.oor_prefix is not None: for child_ref in self.oor_prefix: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.Oor.OorPrefixes']['meta_info'] class OorAccesses(object): """ Resource occupation details for access lists .. attribute:: oor_access Resource occupation details for a particular access list **type**\: list of :py:class:`OorAccess <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.OorAccesses.OorAccess>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.oor_access = YList() self.oor_access.parent = self self.oor_access.name = 'oor_access' class OorAccess(object): """ Resource occupation details for a particular access list .. attribute:: access_list_name <key> Name of the Access List **type**\: str .. attribute:: current_configured_ac_es Current configured aces **type**\: int **range:** 0..4294967295 .. attribute:: current_configured_ac_ls Current configured acls **type**\: int **range:** 0..4294967295 .. attribute:: current_max_configurable_ac_es Current max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: current_max_configurable_ac_ls Current max configurable acls **type**\: int **range:** 0..4294967295 .. attribute:: default_max_ac_es default max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: default_max_ac_ls default max configurable acls **type**\: int **range:** 0..4294967295 .. attribute:: max_configurable_ac_es max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: max_configurable_ac_ls max configurable acls **type**\: int **range:** 0..4294967295 """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.access_list_name = None self.current_configured_ac_es = None self.current_configured_ac_ls = None self.current_max_configurable_ac_es = None self.current_max_configurable_ac_ls = None self.default_max_ac_es = None self.default_max_ac_ls = None self.max_configurable_ac_es = None self.max_configurable_ac_ls = None @property def _common_path(self): if self.access_list_name is None: raise YPYModelError('Key property access_list_name is None') return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:oor-accesses/Cisco-IOS-XR-ipv4-acl-oper:oor-access[Cisco-IOS-XR-ipv4-acl-oper:access-list-name = ' + str(self.access_list_name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.access_list_name is not None: return True if self.current_configured_ac_es is not None: return True if self.current_configured_ac_ls is not None: return True if self.current_max_configurable_ac_es is not None: return True if self.current_max_configurable_ac_ls is not None: return True if self.default_max_ac_es is not None: return True if self.default_max_ac_ls is not None: return True if self.max_configurable_ac_es is not None: return True if self.max_configurable_ac_ls is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.Oor.OorAccesses.OorAccess']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:oor-accesses' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.oor_access is not None: for child_ref in self.oor_access: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.Oor.OorAccesses']['meta_info'] class AccessListSummary(object): """ Resource limits pertaining to access lists only .. attribute:: details Details containing the resource limits of the access lists **type**\: :py:class:`Details <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.AccessListSummary.Details>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.details = Ipv4AclAndPrefixList.Oor.AccessListSummary.Details() self.details.parent = self class Details(object): """ Details containing the resource limits of the access lists .. attribute:: current_configured_ac_es Current configured aces **type**\: int **range:** 0..4294967295 .. attribute:: current_configured_ac_ls Current configured acls **type**\: int **range:** 0..4294967295 .. attribute:: current_max_configurable_ac_es Current max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: current_max_configurable_ac_ls Current max configurable acls **type**\: int **range:** 0..4294967295 .. attribute:: default_max_ac_es default max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: default_max_ac_ls default max configurable acls **type**\: int **range:** 0..4294967295 .. attribute:: max_configurable_ac_es max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: max_configurable_ac_ls max configurable acls **type**\: int **range:** 0..4294967295 """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.current_configured_ac_es = None self.current_configured_ac_ls = None self.current_max_configurable_ac_es = None self.current_max_configurable_ac_ls = None self.default_max_ac_es = None self.default_max_ac_ls = None self.max_configurable_ac_es = None self.max_configurable_ac_ls = None @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:access-list-summary/Cisco-IOS-XR-ipv4-acl-oper:details' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.current_configured_ac_es is not None: return True if self.current_configured_ac_ls is not None: return True if self.current_max_configurable_ac_es is not None: return True if self.current_max_configurable_ac_ls is not None: return True if self.default_max_ac_es is not None: return True if self.default_max_ac_ls is not None: return True if self.max_configurable_ac_es is not None: return True if self.max_configurable_ac_ls is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.Oor.AccessListSummary.Details']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:access-list-summary' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.details is not None and self.details._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.Oor.AccessListSummary']['meta_info'] class PrefixListSummary(object): """ Summary of the prefix Lists resource utilization .. attribute:: details Summary Detail of the prefix list Resource Utilisation **type**\: :py:class:`Details <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_oper.Ipv4AclAndPrefixList.Oor.PrefixListSummary.Details>` """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.details = Ipv4AclAndPrefixList.Oor.PrefixListSummary.Details() self.details.parent = self class Details(object): """ Summary Detail of the prefix list Resource Utilisation .. attribute:: current_configured_ac_es Current configured aces **type**\: int **range:** 0..4294967295 .. attribute:: current_configured_ac_ls Current configured acls **type**\: int **range:** 0..4294967295 .. attribute:: current_max_configurable_ac_es Current max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: current_max_configurable_ac_ls Current max configurable acls **type**\: int **range:** 0..4294967295 .. attribute:: default_max_ac_es default max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: default_max_ac_ls default max configurable acls **type**\: int **range:** 0..4294967295 .. attribute:: max_configurable_ac_es max configurable aces **type**\: int **range:** 0..4294967295 .. attribute:: max_configurable_ac_ls max configurable acls **type**\: int **range:** 0..4294967295 """ _prefix = 'ipv4-acl-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.current_configured_ac_es = None self.current_configured_ac_ls = None self.current_max_configurable_ac_es = None self.current_max_configurable_ac_ls = None self.default_max_ac_es = None self.default_max_ac_ls = None self.max_configurable_ac_es = None self.max_configurable_ac_ls = None @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:prefix-list-summary/Cisco-IOS-XR-ipv4-acl-oper:details' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.current_configured_ac_es is not None: return True if self.current_configured_ac_ls is not None: return True if self.current_max_configurable_ac_es is not None: return True if self.current_max_configurable_ac_ls is not None: return True if self.default_max_ac_es is not None: return True if self.default_max_ac_ls is not None: return True if self.max_configurable_ac_es is not None: return True if self.max_configurable_ac_ls is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.Oor.PrefixListSummary.Details']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor/Cisco-IOS-XR-ipv4-acl-oper:prefix-list-summary' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.details is not None and self.details._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.Oor.PrefixListSummary']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-oper:oor' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.access_list_summary is not None and self.access_list_summary._has_data(): return True if self.details is not None and self.details._has_data(): return True if self.oor_accesses is not None and self.oor_accesses._has_data(): return True if self.oor_prefixes is not None and self.oor_prefixes._has_data(): return True if self.prefix_list_summary is not None and self.prefix_list_summary._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList.Oor']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-ipv4-acl-oper:ipv4-acl-and-prefix-list' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if not self.is_config(): return False if self.access_list_manager is not None and self.access_list_manager._has_data(): return True if self.oor is not None and self.oor._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_oper as meta return meta._meta_table['Ipv4AclAndPrefixList']['meta_info']
codeparrot/github-code-clean
input = """ 1 2 0 0 1 3 0 0 1 4 0 0 1 5 0 0 1 6 0 0 1 7 0 0 1 8 0 0 1 9 0 0 1 10 0 0 1 11 0 0 1 12 0 0 1 13 0 0 1 14 0 0 1 15 0 0 1 16 0 0 1 17 0 0 1 18 0 0 1 19 0 0 1 20 0 0 1 21 0 0 1 22 0 0 1 23 0 0 1 24 0 0 1 25 0 0 1 26 0 0 1 27 0 0 1 28 0 0 1 29 0 0 1 30 0 0 1 31 0 0 1 32 0 0 1 33 0 0 1 34 0 0 1 35 0 0 1 36 0 0 1 37 0 0 1 38 0 0 1 39 0 0 1 40 0 0 1 41 0 0 1 42 0 0 1 43 0 0 1 44 0 0 1 45 0 0 1 46 0 0 1 47 0 0 1 48 0 0 1 49 0 0 1 50 0 0 1 51 0 0 1 52 0 0 1 53 0 0 1 54 0 0 1 55 0 0 1 56 0 0 1 57 0 0 1 58 0 0 1 59 0 0 1 60 0 0 1 61 0 0 1 62 0 0 1 63 0 0 1 64 0 0 1 65 0 0 1 66 0 0 1 67 0 0 1 68 0 0 1 69 0 0 1 70 0 0 1 71 0 0 1 72 0 0 1 73 0 0 1 74 0 0 1 75 0 0 1 76 0 0 1 77 0 0 1 78 0 0 1 79 0 0 1 80 0 0 1 81 0 0 1 82 0 0 1 83 0 0 1 84 0 0 1 85 0 0 1 86 0 0 1 87 0 0 1 88 0 0 1 89 0 0 1 90 0 0 1 91 0 0 1 92 0 0 1 93 0 0 1 94 0 0 1 95 0 0 1 96 0 0 1 97 0 0 1 98 0 0 1 99 0 0 1 100 0 0 1 101 0 0 1 102 0 0 1 103 0 0 1 104 0 0 1 105 0 0 1 106 0 0 1 107 0 0 1 108 0 0 1 109 0 0 1 110 0 0 1 111 0 0 1 112 0 0 1 113 0 0 1 114 0 0 1 115 0 0 1 116 0 0 1 117 0 0 1 118 0 0 1 119 0 0 1 120 0 0 1 121 0 0 1 122 0 0 1 123 0 0 1 124 0 0 1 125 0 0 1 126 0 0 1 127 0 0 1 128 0 0 1 129 0 0 1 130 0 0 1 131 0 0 1 132 0 0 1 133 0 0 1 134 0 0 1 135 0 0 1 136 0 0 1 137 0 0 1 138 0 0 1 139 0 0 1 140 0 0 1 141 0 0 1 142 0 0 1 143 0 0 1 144 0 0 1 145 0 0 1 146 0 0 1 147 0 0 1 148 0 0 1 149 0 0 1 150 0 0 1 151 0 0 1 152 0 0 1 153 0 0 1 154 0 0 1 155 0 0 1 156 0 0 1 157 0 0 1 158 0 0 1 159 0 0 1 160 0 0 1 161 0 0 1 162 0 0 1 163 0 0 1 164 0 0 1 165 0 0 1 166 0 0 1 167 0 0 1 168 0 0 1 169 0 0 1 170 0 0 1 171 0 0 1 172 0 0 1 173 0 0 1 174 0 0 1 175 0 0 1 176 0 0 1 177 0 0 1 178 0 0 1 179 0 0 1 180 0 0 1 181 0 0 1 182 0 0 1 183 0 0 1 184 0 0 1 185 0 0 1 186 0 0 1 187 0 0 1 188 0 0 1 189 0 0 1 190 0 0 1 191 0 0 1 192 0 0 1 193 0 0 1 194 0 0 1 195 0 0 1 196 0 0 1 197 0 0 1 198 0 0 1 199 0 0 1 200 0 0 1 201 0 0 1 202 0 0 1 203 0 0 1 204 0 0 1 205 0 0 1 206 0 0 1 207 0 0 1 208 0 0 1 209 0 0 1 210 0 0 1 211 0 0 1 212 0 0 1 213 0 0 1 214 0 0 1 215 0 0 1 216 0 0 1 217 0 0 1 218 0 0 1 219 0 0 1 220 0 0 1 221 0 0 1 222 0 0 1 223 0 0 1 224 0 0 1 225 0 0 1 226 0 0 1 227 0 0 1 228 0 0 1 229 0 0 1 230 0 0 1 231 0 0 1 232 0 0 1 233 0 0 1 234 0 0 1 235 0 0 1 236 0 0 1 237 0 0 1 238 0 0 1 239 0 0 1 240 0 0 1 241 0 0 1 242 0 0 1 243 0 0 1 244 0 0 1 245 0 0 1 246 0 0 1 247 0 0 1 248 0 0 1 249 0 0 1 250 0 0 1 251 0 0 1 252 0 0 1 253 0 0 1 254 0 0 1 255 0 0 1 256 0 0 1 257 0 0 1 258 0 0 1 259 0 0 1 260 0 0 1 261 0 0 1 262 0 0 1 263 0 0 1 264 0 0 1 265 0 0 1 266 0 0 1 267 0 0 1 268 0 0 1 269 0 0 1 270 0 0 1 271 0 0 1 272 0 0 1 273 0 0 1 274 0 0 1 275 0 0 1 276 0 0 1 277 0 0 1 278 0 0 1 279 0 0 1 280 0 0 1 281 0 0 1 282 0 0 1 283 0 0 1 284 0 0 1 285 0 0 1 286 0 0 1 287 0 0 1 288 0 0 1 289 0 0 1 290 0 0 1 291 0 0 1 292 0 0 1 293 0 0 1 294 0 0 1 295 0 0 1 296 0 0 1 297 0 0 1 298 0 0 1 299 0 0 1 300 0 0 1 301 0 0 1 302 0 0 1 303 0 0 1 304 0 0 1 305 0 0 1 306 0 0 1 307 0 0 1 308 0 0 1 309 0 0 1 310 0 0 1 311 0 0 1 312 0 0 1 313 0 0 1 314 0 0 1 315 0 0 1 316 0 0 1 317 0 0 1 318 0 0 1 319 0 0 1 320 0 0 1 321 0 0 1 322 0 0 1 323 0 0 1 324 0 0 1 325 0 0 1 326 0 0 1 327 0 0 1 328 0 0 1 329 0 0 1 330 0 0 1 331 2 1 332 333 1 332 2 1 331 333 1 333 0 0 1 334 2 1 335 336 1 335 2 1 334 336 1 336 0 0 1 337 2 1 338 339 1 338 2 1 337 339 1 339 0 0 1 340 2 1 341 342 1 341 2 1 340 342 1 342 0 0 1 343 2 1 344 345 1 344 2 1 343 345 1 345 0 0 1 346 2 1 347 348 1 347 2 1 346 348 1 348 0 0 1 349 2 1 350 351 1 350 2 1 349 351 1 351 0 0 1 352 2 1 353 354 1 353 2 1 352 354 1 354 0 0 1 355 2 1 356 357 1 356 2 1 355 357 1 357 0 0 1 358 2 1 359 360 1 359 2 1 358 360 1 360 0 0 1 361 2 1 362 363 1 362 2 1 361 363 1 363 0 0 1 364 2 1 365 366 1 365 2 1 364 366 1 366 0 0 1 367 2 1 368 369 1 368 2 1 367 369 1 369 0 0 1 370 2 1 371 372 1 371 2 1 370 372 1 372 0 0 1 373 2 1 374 375 1 374 2 1 373 375 1 375 0 0 1 376 2 1 377 378 1 377 2 1 376 378 1 378 0 0 1 379 2 1 380 381 1 380 2 1 379 381 1 381 0 0 1 382 2 1 383 384 1 383 2 1 382 384 1 384 0 0 1 385 2 1 386 387 1 386 2 1 385 387 1 387 0 0 1 388 2 1 389 390 1 389 2 1 388 390 1 390 0 0 1 391 2 1 392 393 1 392 2 1 391 393 1 393 0 0 1 394 2 1 395 396 1 395 2 1 394 396 1 396 0 0 1 397 2 1 398 399 1 398 2 1 397 399 1 399 0 0 1 400 2 1 401 402 1 401 2 1 400 402 1 402 0 0 1 403 2 1 404 405 1 404 2 1 403 405 1 405 0 0 1 406 2 1 407 408 1 407 2 1 406 408 1 408 0 0 1 409 2 1 410 411 1 410 2 1 409 411 1 411 0 0 1 412 2 1 413 414 1 413 2 1 412 414 1 414 0 0 1 415 2 1 416 417 1 416 2 1 415 417 1 417 0 0 1 418 2 1 419 420 1 419 2 1 418 420 1 420 0 0 1 421 2 1 422 423 1 422 2 1 421 423 1 423 0 0 1 424 2 1 425 426 1 425 2 1 424 426 1 426 0 0 1 427 2 1 428 429 1 428 2 1 427 429 1 429 0 0 1 430 2 1 431 432 1 431 2 1 430 432 1 432 0 0 1 433 2 1 434 435 1 434 2 1 433 435 1 435 0 0 1 436 2 1 437 438 1 437 2 1 436 438 1 438 0 0 1 439 2 1 440 441 1 440 2 1 439 441 1 441 0 0 1 442 2 1 443 444 1 443 2 1 442 444 1 444 0 0 1 445 2 1 446 447 1 446 2 1 445 447 1 447 0 0 1 448 2 1 449 450 1 449 2 1 448 450 1 450 0 0 1 451 2 1 452 453 1 452 2 1 451 453 1 453 0 0 1 454 2 1 455 456 1 455 2 1 454 456 1 456 0 0 1 457 2 1 458 459 1 458 2 1 457 459 1 459 0 0 1 460 2 1 461 462 1 461 2 1 460 462 1 462 0 0 1 463 2 1 464 465 1 464 2 1 463 465 1 465 0 0 1 466 2 1 467 468 1 467 2 1 466 468 1 468 0 0 1 469 2 1 470 471 1 470 2 1 469 471 1 471 0 0 1 472 2 1 473 474 1 473 2 1 472 474 1 474 0 0 1 475 2 1 476 477 1 476 2 1 475 477 1 477 0 0 1 478 2 1 479 480 1 479 2 1 478 480 1 480 0 0 1 481 2 1 482 483 1 482 2 1 481 483 1 483 0 0 1 484 2 1 485 486 1 485 2 1 484 486 1 486 0 0 1 487 2 1 488 489 1 488 2 1 487 489 1 489 0 0 1 490 2 1 491 492 1 491 2 1 490 492 1 492 0 0 1 493 2 1 494 495 1 494 2 1 493 495 1 495 0 0 1 496 2 1 497 498 1 497 2 1 496 498 1 498 0 0 1 499 2 1 500 501 1 500 2 1 499 501 1 501 0 0 1 502 2 1 503 504 1 503 2 1 502 504 1 504 0 0 1 505 2 1 506 507 1 506 2 1 505 507 1 507 0 0 1 508 2 1 509 510 1 509 2 1 508 510 1 510 0 0 1 511 2 1 512 513 1 512 2 1 511 513 1 513 0 0 1 514 2 1 515 516 1 515 2 1 514 516 1 516 0 0 1 517 2 1 518 519 1 518 2 1 517 519 1 519 0 0 1 520 2 1 521 522 1 521 2 1 520 522 1 522 0 0 1 523 2 1 524 525 1 524 2 1 523 525 1 525 0 0 1 526 2 1 527 528 1 527 2 1 526 528 1 528 0 0 1 529 2 1 530 531 1 530 2 1 529 531 1 531 0 0 1 532 2 1 533 534 1 533 2 1 532 534 1 534 0 0 1 535 2 1 536 537 1 536 2 1 535 537 1 537 0 0 1 538 2 1 539 540 1 539 2 1 538 540 1 540 0 0 1 541 2 1 542 543 1 542 2 1 541 543 1 543 0 0 1 544 2 1 545 546 1 545 2 1 544 546 1 546 0 0 1 547 2 1 548 549 1 548 2 1 547 549 1 549 0 0 1 550 2 1 551 552 1 551 2 1 550 552 1 552 0 0 1 553 2 1 554 555 1 554 2 1 553 555 1 555 0 0 1 556 2 1 557 558 1 557 2 1 556 558 1 558 0 0 1 559 2 1 560 561 1 560 2 1 559 561 1 561 0 0 1 562 2 1 563 564 1 563 2 1 562 564 1 564 0 0 1 565 2 1 566 567 1 566 2 1 565 567 1 567 0 0 1 568 2 1 569 570 1 569 2 1 568 570 1 570 0 0 1 571 2 1 572 573 1 572 2 1 571 573 1 573 0 0 1 574 2 1 575 576 1 575 2 1 574 576 1 576 0 0 1 577 2 1 578 579 1 578 2 1 577 579 1 579 0 0 1 580 2 1 581 582 1 581 2 1 580 582 1 582 0 0 1 583 2 1 584 585 1 584 2 1 583 585 1 585 0 0 1 586 2 1 587 588 1 587 2 1 586 588 1 588 0 0 1 589 2 1 590 591 1 590 2 1 589 591 1 591 0 0 1 592 2 1 593 594 1 593 2 1 592 594 1 594 0 0 1 595 2 1 596 597 1 596 2 1 595 597 1 597 0 0 1 598 2 1 599 600 1 599 2 1 598 600 1 600 0 0 1 601 2 1 602 603 1 602 2 1 601 603 1 603 0 0 1 604 2 1 605 606 1 605 2 1 604 606 1 606 0 0 1 607 2 1 608 609 1 608 2 1 607 609 1 609 0 0 1 610 2 1 611 612 1 611 2 1 610 612 1 612 0 0 1 613 2 1 614 615 1 614 2 1 613 615 1 615 0 0 1 616 2 1 617 618 1 617 2 1 616 618 1 618 0 0 1 619 2 1 620 621 1 620 2 1 619 621 1 621 0 0 1 622 2 1 623 624 1 623 2 1 622 624 1 624 0 0 1 625 2 1 626 627 1 626 2 1 625 627 1 627 0 0 1 628 2 1 629 630 1 629 2 1 628 630 1 630 0 0 1 631 2 1 632 633 1 632 2 1 631 633 1 633 0 0 1 634 2 1 635 636 1 635 2 1 634 636 1 636 0 0 1 637 2 1 638 639 1 638 2 1 637 639 1 639 0 0 1 640 2 1 641 642 1 641 2 1 640 642 1 642 0 0 1 643 2 1 644 645 1 644 2 1 643 645 1 645 0 0 1 646 2 1 647 648 1 647 2 1 646 648 1 648 0 0 1 649 2 1 650 651 1 650 2 1 649 651 1 651 0 0 1 652 2 1 653 654 1 653 2 1 652 654 1 654 0 0 1 655 2 1 656 657 1 656 2 1 655 657 1 657 0 0 1 658 2 1 659 660 1 659 2 1 658 660 1 660 0 0 1 661 2 1 662 663 1 662 2 1 661 663 1 663 0 0 1 664 2 1 665 666 1 665 2 1 664 666 1 666 0 0 1 667 2 1 668 669 1 668 2 1 667 669 1 669 0 0 1 670 2 1 671 672 1 671 2 1 670 672 1 672 0 0 1 673 2 1 674 675 1 674 2 1 673 675 1 675 0 0 1 676 2 1 677 678 1 677 2 1 676 678 1 678 0 0 1 679 2 1 680 681 1 680 2 1 679 681 1 681 0 0 1 682 2 1 683 684 1 683 2 1 682 684 1 684 0 0 1 685 2 1 686 687 1 686 2 1 685 687 1 687 0 0 1 688 2 1 689 690 1 689 2 1 688 690 1 690 0 0 1 691 1 0 688 1 692 1 0 685 1 693 1 0 682 1 694 1 0 679 1 695 1 0 676 1 696 1 0 673 1 697 1 0 670 1 698 1 0 667 1 699 1 0 664 1 700 1 0 661 1 701 1 0 658 1 702 1 0 655 1 703 1 0 652 1 704 1 0 649 1 705 1 0 646 1 706 1 0 643 1 707 1 0 640 1 708 1 0 637 1 709 1 0 634 1 710 1 0 631 1 711 1 0 628 1 712 1 0 625 1 713 1 0 622 1 714 1 0 619 1 715 1 0 616 1 716 1 0 613 1 717 1 0 610 1 718 1 0 607 1 719 1 0 604 1 720 1 0 601 1 721 1 0 598 1 722 1 0 595 1 723 1 0 592 1 724 1 0 589 1 725 1 0 586 1 726 1 0 583 1 727 1 0 580 1 728 1 0 577 1 729 1 0 574 1 730 1 0 571 1 691 1 0 568 1 692 1 0 565 1 693 1 0 562 1 694 1 0 559 1 695 1 0 556 1 696 1 0 553 1 697 1 0 550 1 698 1 0 547 1 699 1 0 544 1 700 1 0 541 1 701 1 0 538 1 702 1 0 535 1 703 1 0 532 1 704 1 0 529 1 705 1 0 526 1 706 1 0 523 1 707 1 0 520 1 708 1 0 517 1 709 1 0 514 1 710 1 0 511 1 711 1 0 508 1 712 1 0 505 1 713 1 0 502 1 714 1 0 499 1 715 1 0 496 1 716 1 0 493 1 717 1 0 490 1 718 1 0 487 1 719 1 0 484 1 720 1 0 481 1 721 1 0 478 1 722 1 0 475 1 723 1 0 472 1 724 1 0 469 1 725 1 0 466 1 726 1 0 463 1 727 1 0 460 1 728 1 0 457 1 729 1 0 454 1 730 1 0 451 1 691 1 0 448 1 692 1 0 445 1 693 1 0 442 1 694 1 0 439 1 695 1 0 436 1 696 1 0 433 1 697 1 0 430 1 698 1 0 427 1 699 1 0 424 1 700 1 0 421 1 701 1 0 418 1 702 1 0 415 1 703 1 0 412 1 704 1 0 409 1 705 1 0 406 1 706 1 0 403 1 707 1 0 400 1 708 1 0 397 1 709 1 0 394 1 710 1 0 391 1 711 1 0 388 1 712 1 0 385 1 713 1 0 382 1 714 1 0 379 1 715 1 0 376 1 716 1 0 373 1 717 1 0 370 1 718 1 0 367 1 719 1 0 364 1 720 1 0 361 1 721 1 0 358 1 722 1 0 355 1 723 1 0 352 1 724 1 0 349 1 725 1 0 346 1 726 1 0 343 1 727 1 0 340 1 728 1 0 337 1 729 1 0 334 1 730 1 0 331 1 1 1 1 730 1 1 1 1 729 1 1 1 1 728 1 1 1 1 727 1 1 1 1 726 1 1 1 1 725 1 1 1 1 724 1 1 1 1 723 1 1 1 1 722 1 1 1 1 721 1 1 1 1 720 1 1 1 1 719 1 1 1 1 718 1 1 1 1 717 1 1 1 1 716 1 1 1 1 715 1 1 1 1 714 1 1 1 1 713 1 1 1 1 712 1 1 1 1 711 1 1 1 1 710 1 1 1 1 709 1 1 1 1 708 1 1 1 1 707 1 1 1 1 706 1 1 1 1 705 1 1 1 1 704 1 1 1 1 703 1 1 1 1 702 1 1 1 1 701 1 1 1 1 700 1 1 1 1 699 1 1 1 1 698 1 1 1 1 697 1 1 1 1 696 1 1 1 1 695 1 1 1 1 694 1 1 1 1 693 1 1 1 1 692 1 1 1 1 691 1 1 2 0 688 568 1 1 2 0 688 448 1 1 2 0 685 565 1 1 2 0 685 445 1 1 2 0 682 562 1 1 2 0 682 442 1 1 2 0 679 559 1 1 2 0 679 439 1 1 2 0 676 556 1 1 2 0 676 436 1 1 2 0 673 553 1 1 2 0 673 433 1 1 2 0 670 550 1 1 2 0 670 430 1 1 2 0 667 547 1 1 2 0 667 427 1 1 2 0 664 544 1 1 2 0 664 424 1 1 2 0 661 541 1 1 2 0 661 421 1 1 2 0 658 538 1 1 2 0 658 418 1 1 2 0 655 535 1 1 2 0 655 415 1 1 2 0 652 532 1 1 2 0 652 412 1 1 2 0 649 529 1 1 2 0 649 409 1 1 2 0 646 526 1 1 2 0 646 406 1 1 2 0 643 523 1 1 2 0 643 403 1 1 2 0 640 520 1 1 2 0 640 400 1 1 2 0 637 517 1 1 2 0 637 397 1 1 2 0 634 514 1 1 2 0 634 394 1 1 2 0 631 511 1 1 2 0 631 391 1 1 2 0 628 508 1 1 2 0 628 388 1 1 2 0 625 505 1 1 2 0 625 385 1 1 2 0 622 502 1 1 2 0 622 382 1 1 2 0 619 499 1 1 2 0 619 379 1 1 2 0 616 496 1 1 2 0 616 376 1 1 2 0 613 493 1 1 2 0 613 373 1 1 2 0 610 490 1 1 2 0 610 370 1 1 2 0 607 487 1 1 2 0 607 367 1 1 2 0 604 484 1 1 2 0 604 364 1 1 2 0 601 481 1 1 2 0 601 361 1 1 2 0 598 478 1 1 2 0 598 358 1 1 2 0 595 475 1 1 2 0 595 355 1 1 2 0 592 472 1 1 2 0 592 352 1 1 2 0 589 469 1 1 2 0 589 349 1 1 2 0 586 466 1 1 2 0 586 346 1 1 2 0 583 463 1 1 2 0 583 343 1 1 2 0 580 460 1 1 2 0 580 340 1 1 2 0 577 457 1 1 2 0 577 337 1 1 2 0 574 454 1 1 2 0 574 334 1 1 2 0 571 451 1 1 2 0 571 331 1 1 2 0 568 688 1 1 2 0 568 448 1 1 2 0 565 685 1 1 2 0 565 445 1 1 2 0 562 682 1 1 2 0 562 442 1 1 2 0 559 679 1 1 2 0 559 439 1 1 2 0 556 676 1 1 2 0 556 436 1 1 2 0 553 673 1 1 2 0 553 433 1 1 2 0 550 670 1 1 2 0 550 430 1 1 2 0 547 667 1 1 2 0 547 427 1 1 2 0 544 664 1 1 2 0 544 424 1 1 2 0 541 661 1 1 2 0 541 421 1 1 2 0 538 658 1 1 2 0 538 418 1 1 2 0 535 655 1 1 2 0 535 415 1 1 2 0 532 652 1 1 2 0 532 412 1 1 2 0 529 649 1 1 2 0 529 409 1 1 2 0 526 646 1 1 2 0 526 406 1 1 2 0 523 643 1 1 2 0 523 403 1 1 2 0 520 640 1 1 2 0 520 400 1 1 2 0 517 637 1 1 2 0 517 397 1 1 2 0 514 634 1 1 2 0 514 394 1 1 2 0 511 631 1 1 2 0 511 391 1 1 2 0 508 628 1 1 2 0 508 388 1 1 2 0 505 625 1 1 2 0 505 385 1 1 2 0 502 622 1 1 2 0 502 382 1 1 2 0 499 619 1 1 2 0 499 379 1 1 2 0 496 616 1 1 2 0 496 376 1 1 2 0 493 613 1 1 2 0 493 373 1 1 2 0 490 610 1 1 2 0 490 370 1 1 2 0 487 607 1 1 2 0 487 367 1 1 2 0 484 604 1 1 2 0 484 364 1 1 2 0 481 601 1 1 2 0 481 361 1 1 2 0 478 598 1 1 2 0 478 358 1 1 2 0 475 595 1 1 2 0 475 355 1 1 2 0 472 592 1 1 2 0 472 352 1 1 2 0 469 589 1 1 2 0 469 349 1 1 2 0 466 586 1 1 2 0 466 346 1 1 2 0 463 583 1 1 2 0 463 343 1 1 2 0 460 580 1 1 2 0 460 340 1 1 2 0 457 577 1 1 2 0 457 337 1 1 2 0 454 574 1 1 2 0 454 334 1 1 2 0 451 571 1 1 2 0 451 331 1 1 2 0 448 688 1 1 2 0 448 568 1 1 2 0 445 685 1 1 2 0 445 565 1 1 2 0 442 682 1 1 2 0 442 562 1 1 2 0 439 679 1 1 2 0 439 559 1 1 2 0 436 676 1 1 2 0 436 556 1 1 2 0 433 673 1 1 2 0 433 553 1 1 2 0 430 670 1 1 2 0 430 550 1 1 2 0 427 667 1 1 2 0 427 547 1 1 2 0 424 664 1 1 2 0 424 544 1 1 2 0 421 661 1 1 2 0 421 541 1 1 2 0 418 658 1 1 2 0 418 538 1 1 2 0 415 655 1 1 2 0 415 535 1 1 2 0 412 652 1 1 2 0 412 532 1 1 2 0 409 649 1 1 2 0 409 529 1 1 2 0 406 646 1 1 2 0 406 526 1 1 2 0 403 643 1 1 2 0 403 523 1 1 2 0 400 640 1 1 2 0 400 520 1 1 2 0 397 637 1 1 2 0 397 517 1 1 2 0 394 634 1 1 2 0 394 514 1 1 2 0 391 631 1 1 2 0 391 511 1 1 2 0 388 628 1 1 2 0 388 508 1 1 2 0 385 625 1 1 2 0 385 505 1 1 2 0 382 622 1 1 2 0 382 502 1 1 2 0 379 619 1 1 2 0 379 499 1 1 2 0 376 616 1 1 2 0 376 496 1 1 2 0 373 613 1 1 2 0 373 493 1 1 2 0 370 610 1 1 2 0 370 490 1 1 2 0 367 607 1 1 2 0 367 487 1 1 2 0 364 604 1 1 2 0 364 484 1 1 2 0 361 601 1 1 2 0 361 481 1 1 2 0 358 598 1 1 2 0 358 478 1 1 2 0 355 595 1 1 2 0 355 475 1 1 2 0 352 592 1 1 2 0 352 472 1 1 2 0 349 589 1 1 2 0 349 469 1 1 2 0 346 586 1 1 2 0 346 466 1 1 2 0 343 583 1 1 2 0 343 463 1 1 2 0 340 580 1 1 2 0 340 460 1 1 2 0 337 577 1 1 2 0 337 457 1 1 2 0 334 574 1 1 2 0 334 454 1 1 2 0 331 571 1 1 2 0 331 451 1 1 2 0 688 685 1 1 2 0 688 658 1 1 2 0 688 646 1 1 2 0 688 643 1 1 2 0 688 589 1 1 2 0 688 574 1 1 2 0 688 571 1 1 2 0 685 670 1 1 2 0 685 652 1 1 2 0 685 646 1 1 2 0 685 643 1 1 2 0 685 622 1 1 2 0 685 619 1 1 2 0 685 574 1 1 2 0 682 679 1 1 2 0 682 670 1 1 2 0 682 640 1 1 2 0 682 625 1 1 2 0 682 604 1 1 2 0 682 601 1 1 2 0 682 580 1 1 2 0 679 676 1 1 2 0 679 673 1 1 2 0 679 655 1 1 2 0 679 634 1 1 2 0 679 610 1 1 2 0 679 577 1 1 2 0 676 664 1 1 2 0 676 658 1 1 2 0 676 652 1 1 2 0 676 613 1 1 2 0 676 610 1 1 2 0 676 595 1 1 2 0 676 592 1 1 2 0 673 670 1 1 2 0 673 649 1 1 2 0 673 643 1 1 2 0 673 607 1 1 2 0 670 655 1 1 2 0 670 649 1 1 2 0 670 637 1 1 2 0 670 631 1 1 2 0 667 652 1 1 2 0 667 649 1 1 2 0 667 646 1 1 2 0 667 643 1 1 2 0 667 607 1 1 2 0 667 601 1 1 2 0 667 577 1 1 2 0 667 571 1 1 2 0 664 643 1 1 2 0 664 640 1 1 2 0 664 625 1 1 2 0 664 589 1 1 2 0 664 586 1 1 2 0 661 655 1 1 2 0 661 631 1 1 2 0 661 628 1 1 2 0 661 625 1 1 2 0 661 613 1 1 2 0 661 574 1 1 2 0 658 652 1 1 2 0 658 646 1 1 2 0 658 613 1 1 2 0 658 610 1 1 2 0 658 601 1 1 2 0 655 652 1 1 2 0 655 637 1 1 2 0 655 634 1 1 2 0 655 613 1 1 2 0 655 601 1 1 2 0 655 595 1 1 2 0 652 649 1 1 2 0 652 625 1 1 2 0 652 622 1 1 2 0 652 616 1 1 2 0 652 613 1 1 2 0 652 604 1 1 2 0 649 637 1 1 2 0 649 601 1 1 2 0 649 583 1 1 2 0 649 574 1 1 2 0 646 607 1 1 2 0 646 574 1 1 2 0 646 571 1 1 2 0 643 637 1 1 2 0 643 616 1 1 2 0 643 598 1 1 2 0 643 595 1 1 2 0 643 589 1 1 2 0 640 637 1 1 2 0 637 631 1 1 2 0 637 619 1 1 2 0 637 613 1 1 2 0 637 598 1 1 2 0 637 592 1 1 2 0 637 583 1 1 2 0 637 574 1 1 2 0 634 583 1 1 2 0 634 571 1 1 2 0 631 628 1 1 2 0 631 625 1 1 2 0 631 619 1 1 2 0 631 598 1 1 2 0 631 583 1 1 2 0 628 625 1 1 2 0 628 604 1 1 2 0 628 598 1 1 2 0 628 577 1 1 2 0 625 592 1 1 2 0 625 577 1 1 2 0 625 574 1 1 2 0 622 604 1 1 2 0 622 598 1 1 2 0 622 571 1 1 2 0 619 616 1 1 2 0 619 613 1 1 2 0 619 610 1 1 2 0 619 604 1 1 2 0 616 610 1 1 2 0 616 598 1 1 2 0 616 583 1 1 2 0 613 610 1 1 2 0 613 604 1 1 2 0 613 601 1 1 2 0 613 574 1 1 2 0 610 607 1 1 2 0 610 577 1 1 2 0 607 601 1 1 2 0 604 601 1 1 2 0 604 595 1 1 2 0 604 586 1 1 2 0 604 577 1 1 2 0 601 589 1 1 2 0 601 574 1 1 2 0 601 571 1 1 2 0 598 595 1 1 2 0 598 577 1 1 2 0 595 574 1 1 2 0 592 574 1 1 2 0 589 580 1 1 2 0 589 571 1 1 2 0 586 577 1 1 2 0 583 580 1 1 2 0 577 574 1 1 2 0 574 571 1 1 2 0 568 565 1 1 2 0 568 538 1 1 2 0 568 526 1 1 2 0 568 523 1 1 2 0 568 469 1 1 2 0 568 454 1 1 2 0 568 451 1 1 2 0 565 550 1 1 2 0 565 532 1 1 2 0 565 526 1 1 2 0 565 523 1 1 2 0 565 502 1 1 2 0 565 499 1 1 2 0 565 454 1 1 2 0 562 559 1 1 2 0 562 550 1 1 2 0 562 520 1 1 2 0 562 505 1 1 2 0 562 484 1 1 2 0 562 481 1 1 2 0 562 460 1 1 2 0 559 556 1 1 2 0 559 553 1 1 2 0 559 535 1 1 2 0 559 514 1 1 2 0 559 490 1 1 2 0 559 457 1 1 2 0 556 544 1 1 2 0 556 538 1 1 2 0 556 532 1 1 2 0 556 493 1 1 2 0 556 490 1 1 2 0 556 475 1 1 2 0 556 472 1 1 2 0 553 550 1 1 2 0 553 529 1 1 2 0 553 523 1 1 2 0 553 487 1 1 2 0 550 535 1 1 2 0 550 529 1 1 2 0 550 517 1 1 2 0 550 511 1 1 2 0 547 532 1 1 2 0 547 529 1 1 2 0 547 526 1 1 2 0 547 523 1 1 2 0 547 487 1 1 2 0 547 481 1 1 2 0 547 457 1 1 2 0 547 451 1 1 2 0 544 523 1 1 2 0 544 520 1 1 2 0 544 505 1 1 2 0 544 469 1 1 2 0 544 466 1 1 2 0 541 535 1 1 2 0 541 511 1 1 2 0 541 508 1 1 2 0 541 505 1 1 2 0 541 493 1 1 2 0 541 454 1 1 2 0 538 532 1 1 2 0 538 526 1 1 2 0 538 493 1 1 2 0 538 490 1 1 2 0 538 481 1 1 2 0 535 532 1 1 2 0 535 517 1 1 2 0 535 514 1 1 2 0 535 493 1 1 2 0 535 481 1 1 2 0 535 475 1 1 2 0 532 529 1 1 2 0 532 505 1 1 2 0 532 502 1 1 2 0 532 496 1 1 2 0 532 493 1 1 2 0 532 484 1 1 2 0 529 517 1 1 2 0 529 481 1 1 2 0 529 463 1 1 2 0 529 454 1 1 2 0 526 487 1 1 2 0 526 454 1 1 2 0 526 451 1 1 2 0 523 517 1 1 2 0 523 496 1 1 2 0 523 478 1 1 2 0 523 475 1 1 2 0 523 469 1 1 2 0 520 517 1 1 2 0 517 511 1 1 2 0 517 499 1 1 2 0 517 493 1 1 2 0 517 478 1 1 2 0 517 472 1 1 2 0 517 463 1 1 2 0 517 454 1 1 2 0 514 463 1 1 2 0 514 451 1 1 2 0 511 508 1 1 2 0 511 505 1 1 2 0 511 499 1 1 2 0 511 478 1 1 2 0 511 463 1 1 2 0 508 505 1 1 2 0 508 484 1 1 2 0 508 478 1 1 2 0 508 457 1 1 2 0 505 472 1 1 2 0 505 457 1 1 2 0 505 454 1 1 2 0 502 484 1 1 2 0 502 478 1 1 2 0 502 451 1 1 2 0 499 496 1 1 2 0 499 493 1 1 2 0 499 490 1 1 2 0 499 484 1 1 2 0 496 490 1 1 2 0 496 478 1 1 2 0 496 463 1 1 2 0 493 490 1 1 2 0 493 484 1 1 2 0 493 481 1 1 2 0 493 454 1 1 2 0 490 487 1 1 2 0 490 457 1 1 2 0 487 481 1 1 2 0 484 481 1 1 2 0 484 475 1 1 2 0 484 466 1 1 2 0 484 457 1 1 2 0 481 469 1 1 2 0 481 454 1 1 2 0 481 451 1 1 2 0 478 475 1 1 2 0 478 457 1 1 2 0 475 454 1 1 2 0 472 454 1 1 2 0 469 460 1 1 2 0 469 451 1 1 2 0 466 457 1 1 2 0 463 460 1 1 2 0 457 454 1 1 2 0 454 451 1 1 2 0 448 445 1 1 2 0 448 418 1 1 2 0 448 406 1 1 2 0 448 403 1 1 2 0 448 349 1 1 2 0 448 334 1 1 2 0 448 331 1 1 2 0 445 430 1 1 2 0 445 412 1 1 2 0 445 406 1 1 2 0 445 403 1 1 2 0 445 382 1 1 2 0 445 379 1 1 2 0 445 334 1 1 2 0 442 439 1 1 2 0 442 430 1 1 2 0 442 400 1 1 2 0 442 385 1 1 2 0 442 364 1 1 2 0 442 361 1 1 2 0 442 340 1 1 2 0 439 436 1 1 2 0 439 433 1 1 2 0 439 415 1 1 2 0 439 394 1 1 2 0 439 370 1 1 2 0 439 337 1 1 2 0 436 424 1 1 2 0 436 418 1 1 2 0 436 412 1 1 2 0 436 373 1 1 2 0 436 370 1 1 2 0 436 355 1 1 2 0 436 352 1 1 2 0 433 430 1 1 2 0 433 409 1 1 2 0 433 403 1 1 2 0 433 367 1 1 2 0 430 415 1 1 2 0 430 409 1 1 2 0 430 397 1 1 2 0 430 391 1 1 2 0 427 412 1 1 2 0 427 409 1 1 2 0 427 406 1 1 2 0 427 403 1 1 2 0 427 367 1 1 2 0 427 361 1 1 2 0 427 337 1 1 2 0 427 331 1 1 2 0 424 403 1 1 2 0 424 400 1 1 2 0 424 385 1 1 2 0 424 349 1 1 2 0 424 346 1 1 2 0 421 415 1 1 2 0 421 391 1 1 2 0 421 388 1 1 2 0 421 385 1 1 2 0 421 373 1 1 2 0 421 334 1 1 2 0 418 412 1 1 2 0 418 406 1 1 2 0 418 373 1 1 2 0 418 370 1 1 2 0 418 361 1 1 2 0 415 412 1 1 2 0 415 397 1 1 2 0 415 394 1 1 2 0 415 373 1 1 2 0 415 361 1 1 2 0 415 355 1 1 2 0 412 409 1 1 2 0 412 385 1 1 2 0 412 382 1 1 2 0 412 376 1 1 2 0 412 373 1 1 2 0 412 364 1 1 2 0 409 397 1 1 2 0 409 361 1 1 2 0 409 343 1 1 2 0 409 334 1 1 2 0 406 367 1 1 2 0 406 334 1 1 2 0 406 331 1 1 2 0 403 397 1 1 2 0 403 376 1 1 2 0 403 358 1 1 2 0 403 355 1 1 2 0 403 349 1 1 2 0 400 397 1 1 2 0 397 391 1 1 2 0 397 379 1 1 2 0 397 373 1 1 2 0 397 358 1 1 2 0 397 352 1 1 2 0 397 343 1 1 2 0 397 334 1 1 2 0 394 343 1 1 2 0 394 331 1 1 2 0 391 388 1 1 2 0 391 385 1 1 2 0 391 379 1 1 2 0 391 358 1 1 2 0 391 343 1 1 2 0 388 385 1 1 2 0 388 364 1 1 2 0 388 358 1 1 2 0 388 337 1 1 2 0 385 352 1 1 2 0 385 337 1 1 2 0 385 334 1 1 2 0 382 364 1 1 2 0 382 358 1 1 2 0 382 331 1 1 2 0 379 376 1 1 2 0 379 373 1 1 2 0 379 370 1 1 2 0 379 364 1 1 2 0 376 370 1 1 2 0 376 358 1 1 2 0 376 343 1 1 2 0 373 370 1 1 2 0 373 364 1 1 2 0 373 361 1 1 2 0 373 334 1 1 2 0 370 367 1 1 2 0 370 337 1 1 2 0 367 361 1 1 2 0 364 361 1 1 2 0 364 355 1 1 2 0 364 346 1 1 2 0 364 337 1 1 2 0 361 349 1 1 2 0 361 334 1 1 2 0 361 331 1 1 2 0 358 355 1 1 2 0 358 337 1 1 2 0 355 334 1 1 2 0 352 334 1 1 2 0 349 340 1 1 2 0 349 331 1 1 2 0 346 337 1 1 2 0 343 340 1 1 2 0 337 334 1 1 2 0 334 331 0 331 col(39,3) 334 col(38,3) 337 col(37,3) 340 col(36,3) 343 col(35,3) 346 col(34,3) 349 col(33,3) 352 col(32,3) 355 col(31,3) 358 col(30,3) 361 col(29,3) 364 col(28,3) 367 col(27,3) 370 col(26,3) 373 col(25,3) 376 col(24,3) 379 col(23,3) 382 col(22,3) 385 col(21,3) 388 col(20,3) 391 col(19,3) 394 col(18,3) 397 col(17,3) 400 col(16,3) 403 col(15,3) 406 col(14,3) 409 col(13,3) 412 col(12,3) 415 col(11,3) 418 col(10,3) 421 col(9,3) 424 col(8,3) 427 col(7,3) 430 col(6,3) 433 col(5,3) 436 col(4,3) 439 col(3,3) 442 col(2,3) 445 col(1,3) 448 col(0,3) 451 col(39,2) 454 col(38,2) 457 col(37,2) 460 col(36,2) 463 col(35,2) 466 col(34,2) 469 col(33,2) 472 col(32,2) 475 col(31,2) 478 col(30,2) 481 col(29,2) 484 col(28,2) 487 col(27,2) 490 col(26,2) 493 col(25,2) 496 col(24,2) 499 col(23,2) 502 col(22,2) 505 col(21,2) 508 col(20,2) 511 col(19,2) 514 col(18,2) 517 col(17,2) 520 col(16,2) 523 col(15,2) 526 col(14,2) 529 col(13,2) 532 col(12,2) 535 col(11,2) 538 col(10,2) 541 col(9,2) 544 col(8,2) 547 col(7,2) 550 col(6,2) 553 col(5,2) 556 col(4,2) 559 col(3,2) 562 col(2,2) 565 col(1,2) 568 col(0,2) 571 col(39,1) 574 col(38,1) 577 col(37,1) 580 col(36,1) 583 col(35,1) 586 col(34,1) 589 col(33,1) 592 col(32,1) 595 col(31,1) 598 col(30,1) 601 col(29,1) 604 col(28,1) 607 col(27,1) 610 col(26,1) 613 col(25,1) 616 col(24,1) 619 col(23,1) 622 col(22,1) 625 col(21,1) 628 col(20,1) 631 col(19,1) 634 col(18,1) 637 col(17,1) 640 col(16,1) 643 col(15,1) 646 col(14,1) 649 col(13,1) 652 col(12,1) 655 col(11,1) 658 col(10,1) 661 col(9,1) 664 col(8,1) 667 col(7,1) 670 col(6,1) 673 col(5,1) 676 col(4,1) 679 col(3,1) 682 col(2,1) 685 col(1,1) 688 col(0,1) 0 B+ 0 B- 1 0 1 """ output = """ INCOHERENT """
codeparrot/github-code-clean
""" @package mi.instrument.seabird.sbe26plus_v2.driver @file mi/instrument/seabird/sbe16plus_v2/driver.py @author David Everett @brief Driver base class for sbe16plus V2 CTD instrument. """ __author__ = 'David Everett' __license__ = 'Apache 2.0' import time import re from mi.core.log import get_logger, get_logging_metaclass log = get_logger() from mi.core.common import BaseEnum, Units from mi.core.util import dict_equal from mi.core.instrument.protocol_param_dict import ParameterDictType from mi.core.instrument.instrument_fsm import InstrumentFSM from mi.core.instrument.instrument_driver import DriverParameter from mi.core.instrument.data_particle import CommonDataParticleType from mi.core.instrument.chunker import StringChunker from mi.core.instrument.driver_dict import DriverDictKey from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol, InitializationType from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver from mi.core.instrument.data_particle import DataParticle from mi.core.instrument.data_particle import DataParticleKey from mi.core.instrument.protocol_param_dict import ParameterDictVisibility from mi.core.instrument.instrument_driver import DriverProtocolState from mi.core.instrument.instrument_driver import ResourceAgentState from mi.core.instrument.instrument_driver import DriverAsyncEvent from mi.core.instrument.instrument_driver import DriverEvent from mi.core.exceptions import InstrumentProtocolException from mi.core.exceptions import InstrumentParameterException from mi.core.exceptions import NotImplementedException from mi.core.exceptions import SampleException from xml.dom.minidom import parseString from mi.core.time_tools import get_timestamp_delayed WAKEUP_TIMEOUT = 3 NEWLINE = '\r\n' SBE_EPOCH = 946713600 # Unix time for SBE epoch 2000-01-01 00:00:00 TIMEOUT = 20 DEFAULT_ENCODER_KEY = '__default__' ERROR_PATTERN = r"<ERROR type='(.*?)' msg='(.*?)'\/>" ERROR_REGEX = re.compile(ERROR_PATTERN, re.DOTALL) class ScheduledJob(BaseEnum): ACQUIRE_STATUS = 'acquire_status' CLOCK_SYNC = 'clock_sync' class Command(BaseEnum): GET_CD = 'GetCD' GET_SD = 'GetSD' GET_CC = 'GetCC' GET_EC = 'GetEC' RESET_EC = 'ResetEC' GET_HD = 'GetHD' #DS = 'ds' #Superceded by GetCD and GetSD, do not use! #DCAL = 'dcal' #Superceded by GetCC, do not use! TS = 'ts' STARTNOW = 'StartNow' STOP = 'Stop' SET = 'set' class ProtocolState(BaseEnum): """ Protocol states for SBE16. Cherry picked from DriverProtocolState enum. """ UNKNOWN = DriverProtocolState.UNKNOWN COMMAND = DriverProtocolState.COMMAND AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS class ProtocolEvent(BaseEnum): """ Protocol events for SBE16. Cherry picked from DriverEvent enum. """ ENTER = DriverEvent.ENTER EXIT = DriverEvent.EXIT GET = DriverEvent.GET SET = DriverEvent.SET DISCOVER = DriverEvent.DISCOVER ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT START_DIRECT = DriverEvent.START_DIRECT STOP_DIRECT = DriverEvent.STOP_DIRECT CLOCK_SYNC = DriverEvent.CLOCK_SYNC SCHEDULED_CLOCK_SYNC = DriverEvent.SCHEDULED_CLOCK_SYNC ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS SCHEDULED_ACQUIRED_STATUS = 'PROTOCOL_EVENT_SCHEDULED_ACQUIRE_STATUS' class Capability(BaseEnum): """ Capabilities that are exposed to the user (subset of above) """ GET = DriverEvent.GET SET = DriverEvent.SET START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE CLOCK_SYNC = DriverEvent.CLOCK_SYNC ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE START_DIRECT = DriverEvent.START_DIRECT STOP_DIRECT = DriverEvent.STOP_DIRECT class CommonParameter(DriverParameter): DATE_TIME = "DateTime" PTYPE = "PType" VOLT0 = "Volt0" VOLT1 = "Volt1" VOLT2 = "Volt2" VOLT3 = "Volt3" VOLT4 = "Volt4" VOLT5 = "Volt5" SBE38 = "SBE38" SBE63 = "SBE63" WETLABS = "WetLabs" GTD = "GTD" DUAL_GTD = "DualGTD" OPTODE = "OPTODE" OUTPUT_FORMAT = "OutputFormat" LOGGING = "logging" class Parameter(CommonParameter): """ Device parameters for SBE16. """ INTERVAL = 'SampleInterval' TXREALTIME = 'TXREALTIME' ECHO = "echo" OUTPUT_EXEC_TAG = 'OutputExecutedTag' PUMP_MODE = "PumpMode" NCYCLES = "NCycles" BIOWIPER = "Biowiper" DELAY_BEFORE_SAMPLE = "DelayBeforeSampling" DELAY_AFTER_SAMPLE = "DelayAfterSampling" SBE50 = "SBE50" SYNCMODE = "SyncMode" SYNCWAIT = "SyncWait" class ConfirmedParameter(BaseEnum): """ List of all parameters that require confirmation i.e. set sent twice to confirm. """ PTYPE = Parameter.PTYPE SBE63 = Parameter.SBE63 SBE38 = Parameter.SBE38 SBE50 = Parameter.SBE50 GTD = Parameter.GTD DUAL_GTD = Parameter.DUAL_GTD OPTODE = Parameter.OPTODE WETLABS = Parameter.WETLABS VOLT0 = Parameter.VOLT0 VOLT1 = Parameter.VOLT1 VOLT2 = Parameter.VOLT2 VOLT3 = Parameter.VOLT3 VOLT4 = Parameter.VOLT4 VOLT5 = Parameter.VOLT5 # Device prompts. class Prompt(BaseEnum): """ SBE16 io prompts. """ COMMAND = NEWLINE + 'S>' BAD_COMMAND = '?cmd S>' AUTOSAMPLE = NEWLINE + 'S>' EXECUTED = '<Executed/>' class DataParticleType(BaseEnum): RAW = CommonDataParticleType.RAW CTD_PARSED = 'ctdbp_cdef_sample' DEVICE_STATUS = 'ctdbp_cdef_status' DEVICE_CALIBRATION = 'ctdbp_cdef_calibration_coefficients' class Sbe16plusBaseParticle(DataParticle): """ Overload the base particle to add in some common parsing logic for SBE instruments. Add regex methods to help identify and parse multi-line strings. """ @staticmethod def regex(): """ Return a regex string to use in matching functions. This can be used for parsing too if more complex parsing isn't needed. @return: uncompiled regex string """ NotImplementedException() @staticmethod def regex_compiled(): """ Return a regex compiled regex of the regex @return: compiled regex """ NotImplementedException() def regex_multiline(self): """ return a dictionary containing uncompiled regex used to match patterns in SBE multiline results. includes an encoder method. @return: dictionary of uncompiled regexs """ NotImplementedException() def regex_multiline_compiled(self): """ return a dictionary containing compiled regex used to match patterns in SBE multiline results. @return: dictionary of compiled regexs """ result = {} for (key, regex) in self.regex_multiline().iteritems(): result[key] = re.compile(regex, re.DOTALL) return result def encoders(self): """ return a dictionary containing encoder methods for parameters a special key 'default' can be used to name the default mechanism @return: dictionary containing encoder callbacks """ NotImplementedException() def _get_multiline_values(self, split_fun=None): """ return a dictionary containing keys and found values from a multiline sample using the multiline regex @param: split_fun - function to which splits sample into lines @return: dictionary of compiled regexs """ result = [] if split_fun is None: split_fun = self._split_on_newline matchers = self.regex_multiline_compiled() regexs = self.regex_multiline() for line in split_fun(self.raw_data): log.trace("Line: %s" % line) for key in matchers.keys(): log.trace("match: %s" % regexs.get(key)) match = matchers[key].search(line) if match: encoder = self._get_encoder(key) if encoder: log.debug("encoding value %s (%s)" % (key, match.group(1))) value = encoder(match.group(1)) else: value = match.group(1) log.trace("multiline match %s = %s (%s)" % (key, match.group(1), value)) result.append({ DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: value }) return result def _split_on_newline(self, value): """ default split method for multiline regex matches @param: value string to split @return: list of line split on NEWLINE """ return value.split(NEWLINE) def _get_encoder(self, key): """ Get an encoder for a key, if one isn't specified look for a default. Can return None for no encoder @param: key encoder we are looking for @return: dictionary of encoders. """ encoder = self.encoders().get(key) if not encoder: encoder = self.encoders().get(DEFAULT_ENCODER_KEY) return encoder def _map_param_to_xml_tag(self, parameter_name): """ @return: a string containing the xml tag name for a parameter """ NotImplementedException() def _extract_xml_elements(self, node, tag, raise_exception_if_none_found=True): """ extract elements with tag from an XML node @param: node - XML node to look in @param: tag - tag of elements to look for @param: raise_exception_if_none_found - raise an exception if no element is found @return: return list of elements found; empty list if none found """ elements = node.getElementsByTagName(tag) if raise_exception_if_none_found and len(elements) == 0: raise SampleException("_extract_xml_elements: No %s in input data: [%s]" % (tag, self.raw_data)) return elements def _extract_xml_element_value(self, node, tag, raise_exception_if_none_found=True): """ extract element value that has tag from an XML node @param: node - XML node to look in @param: tag - tag of elements to look for @param: raise_exception_if_none_found - raise an exception if no value is found @return: return value of element """ elements = self._extract_xml_elements(node, tag, raise_exception_if_none_found) if elements is None: return None children = elements[0].childNodes if len(children) == 0 and raise_exception_if_none_found: raise SampleException("_extract_xml_element_value: No value for %s in input data: [%s]" % (tag, self.raw_data)) return children[0].nodeValue def _get_xml_parameter(self, xml_element, parameter_name, dtype=float, raise_exception_if_none_found=True): try: value = dtype(self._extract_xml_element_value(xml_element, self._map_param_to_xml_tag(parameter_name))) except SampleException: if raise_exception_if_none_found: raise SampleException value = None return {DataParticleKey.VALUE_ID: parameter_name, DataParticleKey.VALUE: value} ######################################################################## # Static helpers. ######################################################################## @staticmethod def hex2value(hex_value, divisor=None): """ Convert a SBE hex value to a value. Some hex values are converted from raw counts to volts using a divisor. If passed the value will be calculated, otherwise return an int. @param hex_value: string to convert @param divisor: conversion value @return: int or float of the converted value """ if not isinstance(hex_value, str): raise InstrumentParameterException("hex value not a string") if divisor is not None and divisor == 0: raise InstrumentParameterException("divisor can not be 0") value = int(hex_value, 16) if divisor is not None: return float(value) / divisor return value @staticmethod def yesno2bool(value): """ convert a yes no response to a bool @param value: string to convert @return: bool """ if not (isinstance(value, str) or isinstance(value, unicode)): raise InstrumentParameterException("value not a string") if value.lower() == 'no': return 0 elif value.lower() == 'yes': return 1 raise InstrumentParameterException("Could not convert '%s' to bool" % value) @staticmethod def sbetime2unixtime(value): """ Convert an SBE integer time (epoch 1-1-2000) to unix time @param value: sbe integer time @return: unix time """ if not isinstance(value, int): raise InstrumentParameterException("value not a int") return SBE_EPOCH + value @staticmethod def float_to_int(val): return int(float(val)) class SBE16DataParticleKey(BaseEnum): TEMP = "temperature" CONDUCTIVITY = "conductivity" PRESSURE = "pressure" PRESSURE_TEMP = "pressure_temp" TIME = "ctd_time" class SBE16DataParticle(Sbe16plusBaseParticle): """ Routines for parsing raw data into a data particle structure. Override the building of values, and the rest should come along for free. Sample: #03EC1F0A738A81736187100004000B2CFDC618B859BE Format: #ttttttccccccppppppvvvvvvvvvvvvssssssss Temperature = tttttt = 0A5371 (676721 decimal); temperature A/D counts = 676721 Conductivity = 1BC722 (1820450 decimal); conductivity frequency = 1820450 / 256 = 7111.133 Hz Internally mounted strain gauge pressure = pppppp = 0C14C1 (791745 decimal); Strain gauge pressure A/D counts = 791745 Internally mounted strain gauge temperature compensation = vvvv = 7D82 (32,130 decimal); Strain gauge temperature = 32,130 / 13,107 = 2.4514 volts First external voltage = vvvv = 0305 (773 decimal); voltage = 773 / 13,107 = 0.0590 volts Second external voltage = vvvv = 0594 (1428 decimal); voltage = 1428 / 13,107 = 0.1089 volts Time = ssssssss = 0EC4270B (247,736,075 decimal); seconds since January 1, 2000 = 247,736,075 """ _data_particle_type = DataParticleType.CTD_PARSED @staticmethod def regex(): """ Regular expression to match a sample pattern @return: regex string """ #ttttttccccccppppppvvvvvvvvvvvvssssssss pattern = r'#? *' # patter may or may not start with a ' pattern += r'([0-9A-F]{6})' # temperature pattern += r'([0-9A-F]{6})' # conductivity pattern += r'([0-9A-F]{6})' # pressure pattern += r'([0-9A-F]{4})' # pressure temp pattern += r'[0-9A-F]*' # consume extra voltage measurements pattern += r'([0-9A-F]{8})' # time pattern += NEWLINE return pattern @staticmethod def regex_compiled(): """ get the compiled regex pattern @return: compiled re """ return re.compile(SBE16DataParticle.regex()) def _build_parsed_values(self): """ Take something in the autosample/TS format and split it into C, T, and D values (with appropriate tags) @throws SampleException If there is a problem with sample creation """ match = SBE16DataParticle.regex_compiled().match(self.raw_data) if not match: raise SampleException("No regex match of parsed sample data: [%s]" % self.raw_data) try: temperature = self.hex2value(match.group(1)) conductivity = self.hex2value(match.group(2)) pressure = self.hex2value(match.group(3)) pressure_temp = self.hex2value(match.group(4)) elapse_time = self.hex2value(match.group(5)) self.set_internal_timestamp(unix_time=self.sbetime2unixtime(elapse_time)) except ValueError: raise SampleException("ValueError while converting data: [%s]" % self.raw_data) result = [{DataParticleKey.VALUE_ID: SBE16DataParticleKey.TEMP, DataParticleKey.VALUE: temperature}, {DataParticleKey.VALUE_ID: SBE16DataParticleKey.CONDUCTIVITY, DataParticleKey.VALUE: conductivity}, {DataParticleKey.VALUE_ID: SBE16DataParticleKey.PRESSURE, DataParticleKey.VALUE: pressure}, {DataParticleKey.VALUE_ID: SBE16DataParticleKey.PRESSURE_TEMP, DataParticleKey.VALUE: pressure_temp}, {DataParticleKey.VALUE_ID: SBE16DataParticleKey.TIME, DataParticleKey.VALUE: elapse_time}] return result class SBE16StatusParticleKey(BaseEnum): FIRMWARE_VERSION = "firmware_version" SERIAL_NUMBER = "serial_number" DATE_TIME = "date_time_string" VBATT = "battery_voltage_main" VLITH = "battery_voltage_lithium" IOPER = "operational_current" IPUMP = "pump_current" LOGGING_STATUS = "logging_status" SAMPLES = "num_samples" MEM_FREE = "mem_free" SAMPLE_INTERVAL = "sample_interval" MEASUREMENTS_PER_SAMPLE = "measurements_per_sample" PUMP_MODE = "pump_mode" DELAY_BEFORE_SAMPLING = "delay_before_sampling" DELAY_AFTER_SAMPLING = "delay_after_sampling" TX_REAL_TIME = "tx_real_time" BATTERY_CUTOFF = "battery_cutoff" PRESSURE_SENSOR = "pressure_sensor_type" RANGE = "pressure_sensor_range" SBE38 = "sbe38" SBE50 = "sbe50" WETLABS = "wetlabs" OPTODE = "optode" GAS_TENSION_DEVICE = "gas_tension_device" EXT_VOLT_0 = "ext_volt_0" EXT_VOLT_1 = "ext_volt_1" EXT_VOLT_2 = "ext_volt_2" EXT_VOLT_3 = "ext_volt_3" EXT_VOLT_4 = "ext_volt_4" EXT_VOLT_5 = "ext_volt_5" ECHO_CHARACTERS = "echo_characters" OUTPUT_FORMAT = "output_format" OUTPUT_SALINITY = "output_salinity" OUTPUT_SOUND_VELOCITY = "output_sound_velocity" SERIAL_SYNC_MODE = "serial_sync_mode" class SBE16StatusParticle(Sbe16plusBaseParticle): """ Routines for parsing raw data into a data particle structure. Override the building of values, and the rest should come along for free. """ _data_particle_type = DataParticleType.DEVICE_STATUS @staticmethod def regex(): pattern = r'(<StatusData.*?</StatusData>).*?(<HardwareData.*?</HardwareData>).*?(<ConfigurationData.*?' \ r'</ConfigurationData>)' return pattern @staticmethod def regex_compiled(): return re.compile(SBE16StatusParticle.regex(), re.DOTALL) def _map_param_to_xml_tag(self, parameter_name): map_param_to_tag = { #GetSD response SBE16StatusParticleKey.DATE_TIME: 'DateTime', SBE16StatusParticleKey.VBATT : 'vMain', SBE16StatusParticleKey.VLITH : 'vLith', SBE16StatusParticleKey.IOPER : 'iMain', SBE16StatusParticleKey.IPUMP : 'iPump', SBE16StatusParticleKey.LOGGING_STATUS : 'LoggingState', SBE16StatusParticleKey.SAMPLES : 'Samples', SBE16StatusParticleKey.MEM_FREE : 'SamplesFree', #GetHD response SBE16StatusParticleKey.FIRMWARE_VERSION: 'FirmwareVersion', #GetCD response SBE16StatusParticleKey.PUMP_MODE : 'AutoRun', SBE16StatusParticleKey.DELAY_BEFORE_SAMPLING : 'PumpDelay', SBE16StatusParticleKey.DELAY_AFTER_SAMPLING : 'PumpDelay', SBE16StatusParticleKey.SBE38 : 'SBE38', SBE16StatusParticleKey.SBE50 : 'SBE50', SBE16StatusParticleKey.WETLABS : 'WETLABS', SBE16StatusParticleKey.OPTODE : 'OPTODE', SBE16StatusParticleKey.GAS_TENSION_DEVICE : 'GTD', SBE16StatusParticleKey.EXT_VOLT_0 : 'ExtVolt0', SBE16StatusParticleKey.EXT_VOLT_1 : 'ExtVolt1', SBE16StatusParticleKey.EXT_VOLT_2 : 'ExtVolt2', SBE16StatusParticleKey.EXT_VOLT_3 : 'ExtVolt3', SBE16StatusParticleKey.EXT_VOLT_4 : 'ExtVolt4', SBE16StatusParticleKey.EXT_VOLT_5 : 'ExtVolt5', SBE16StatusParticleKey.ECHO_CHARACTERS : 'EchoCharacters', SBE16StatusParticleKey.OUTPUT_FORMAT : 'OutputFormat', #not sure where these values are coming from SBE16StatusParticleKey.OUTPUT_SALINITY : 'OutputSal', SBE16StatusParticleKey.OUTPUT_SOUND_VELOCITY : 'OutputSV', SBE16StatusParticleKey.SERIAL_SYNC_MODE : 'SyncMode', SBE16StatusParticleKey.RANGE : 'PRange', SBE16StatusParticleKey.TX_REAL_TIME : 'TxRealTime', SBE16StatusParticleKey.BATTERY_CUTOFF : 'CutOff', SBE16StatusParticleKey.PRESSURE_SENSOR : 'type', SBE16StatusParticleKey.SAMPLE_INTERVAL : 'SampleInterval', SBE16StatusParticleKey.MEASUREMENTS_PER_SAMPLE : 'NCycles', } return map_param_to_tag[parameter_name] def _build_parsed_values(self): """ Take something in the autosample/TS format and split it into C, T, and D values (with appropriate tags) @throws SampleException If there is a problem with sample creation """ match = SBE16StatusParticle.regex_compiled().match(self.raw_data) if not match: raise SampleException("No regex match of parsed status data: [%s]" % self.raw_data) dom = parseString(match.group(1)) root = dom.documentElement serial_number = root.getAttribute("SerialNumber") result = [{DataParticleKey.VALUE_ID: SBE16StatusParticleKey.SERIAL_NUMBER, DataParticleKey.VALUE: serial_number}] result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.DATE_TIME, str)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.VBATT)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.VLITH)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.IOPER)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.IPUMP)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.LOGGING_STATUS, str)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.SAMPLES, int)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.MEM_FREE, int)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.SAMPLE_INTERVAL, int, False)), result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.MEASUREMENTS_PER_SAMPLE, int, False)), dom = parseString(match.group(2)) root = dom.documentElement sensors = self._extract_xml_elements(root, "Sensor") for sensor in sensors: sensor_id = sensor.getAttribute("id") log.debug('SENSOR ID %r', sensor_id) if sensor_id == "Main Pressure": result.append(self._get_xml_parameter(sensor, SBE16StatusParticleKey.PRESSURE_SENSOR, str)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.FIRMWARE_VERSION, str)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.RANGE, int, False)) dom = parseString(match.group(3)) root = dom.documentElement result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.PUMP_MODE, str)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.DELAY_BEFORE_SAMPLING)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.DELAY_AFTER_SAMPLING)), result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.SBE38, self.yesno2bool)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.SBE50, self.yesno2bool)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.WETLABS, self.yesno2bool)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.OPTODE, self.yesno2bool)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.GAS_TENSION_DEVICE, self.yesno2bool)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.EXT_VOLT_0, self.yesno2bool)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.EXT_VOLT_1, self.yesno2bool)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.EXT_VOLT_2, self.yesno2bool)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.EXT_VOLT_3, self.yesno2bool)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.EXT_VOLT_4, self.yesno2bool)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.EXT_VOLT_5, self.yesno2bool)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.ECHO_CHARACTERS, self.yesno2bool)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.OUTPUT_FORMAT, str)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.OUTPUT_SALINITY, self.yesno2bool, False)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.OUTPUT_SOUND_VELOCITY, self.yesno2bool, False)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.BATTERY_CUTOFF)), result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.TX_REAL_TIME, self.yesno2bool, False)) result.append(self._get_xml_parameter(root, SBE16StatusParticleKey.SERIAL_SYNC_MODE, self.yesno2bool, False)) return result class SBE16CalibrationParticleKey(BaseEnum): FIRMWARE_VERSION = "firmware_version" SERIAL_NUMBER = "serial_number" DATE_TIME = "date_time_string" TEMP_CAL_DATE = "calibration_date_temperature" TA0 = "temp_coeff_ta0" TA1 = "temp_coeff_ta1" TA2 = "temp_coeff_ta2" TA3 = "temp_coeff_ta3" TOFFSET = "temp_coeff_offset" COND_CAL_DATE = "calibration_date_conductivity" CONDG = "cond_coeff_cg" CONDH = "cond_coeff_ch" CONDI = "cond_coeff_ci" CONDJ = "cond_coeff_cj" CPCOR = "cond_coeff_cpcor" CTCOR = "cond_coeff_ctcor" CSLOPE = "cond_coeff_cslope" PRES_SERIAL_NUMBER = "pressure_sensor_serial_number" PRES_RANGE = "pressure_sensor_range" PRES_CAL_DATE = "calibration_date_pressure" # Quartz PC1 = "press_coeff_pc1" PC2 = "press_coeff_pc2" PC3 = "press_coeff_pc3" PD1 = "press_coeff_pd1" PD2 = "press_coeff_pd2" PT1 = "press_coeff_pt1" PT2 = "press_coeff_pt2" PT3 = "press_coeff_pt3" PT4 = "press_coeff_pt4" PSLOPE = "press_coeff_pslope" # strain gauge PA0 = "press_coeff_pa0" PA1 = "press_coeff_pa1" PA2 = "press_coeff_pa2" PTCA0 = "press_coeff_ptca0" PTCA1 = "press_coeff_ptca1" PTCA2 = "press_coeff_ptca2" PTCB0 = "press_coeff_ptcb0" PTCB1 = "press_coeff_ptcb1" PTCB2 = "press_coeff_ptcb2" PTEMPA0 = "press_coeff_ptempa0" PTEMPA1 = "press_coeff_ptempa1" PTEMPA2 = "press_coeff_ptempa2" POFFSET = "press_coeff_poffset" EXT_VOLT0_OFFSET = "ext_volt0_offset" EXT_VOLT0_SLOPE = "ext_volt0_slope" EXT_VOLT1_OFFSET = "ext_volt1_offset" EXT_VOLT1_SLOPE = "ext_volt1_slope" EXT_VOLT2_OFFSET = "ext_volt2_offset" EXT_VOLT2_SLOPE = "ext_volt2_slope" EXT_VOLT3_OFFSET = "ext_volt3_offset" EXT_VOLT3_SLOPE = "ext_volt3_slope" EXT_VOLT4_OFFSET = "ext_volt4_offset" EXT_VOLT4_SLOPE = "ext_volt4_slope" EXT_VOLT5_OFFSET = "ext_volt5_offset" EXT_VOLT5_SLOPE = "ext_volt5_slope" EXT_FREQ = "ext_freq_sf" class SBE16CalibrationParticle(Sbe16plusBaseParticle): """ Routines for parsing raw data into a data particle structure. Override the building of values, and the rest should come along for free. """ _data_particle_type = DataParticleType.DEVICE_CALIBRATION @staticmethod def regex(): pattern = r'(<CalibrationCoefficients.*?</CalibrationCoefficients>)' + NEWLINE return pattern @staticmethod def regex_compiled(): return re.compile(SBE16CalibrationParticle.regex(), re.DOTALL) @staticmethod def resp_regex(): pattern = r'(<CalibrationCoefficients.*?</CalibrationCoefficients>)' return pattern @staticmethod def resp_regex_compiled(): return re.compile(SBE16CalibrationParticle.resp_regex(), re.DOTALL) def _map_param_to_xml_tag(self, parameter_name): map_param_to_tag = { SBE16CalibrationParticleKey.FIRMWARE_VERSION : "DeviceType", SBE16CalibrationParticleKey.SERIAL_NUMBER : "SerialNum", SBE16CalibrationParticleKey.DATE_TIME : "CalDate", SBE16CalibrationParticleKey.TEMP_CAL_DATE : "CalDate", SBE16CalibrationParticleKey.TA0 : "TA0", SBE16CalibrationParticleKey.TA1 : "TA1", SBE16CalibrationParticleKey.TA2 : "TA2", SBE16CalibrationParticleKey.TA3 : "TA3", SBE16CalibrationParticleKey.TOFFSET : "TOFFSET", SBE16CalibrationParticleKey.COND_CAL_DATE : "CalDate", SBE16CalibrationParticleKey.CONDG : "G", SBE16CalibrationParticleKey.CONDH : "H", SBE16CalibrationParticleKey.CONDI : "I", SBE16CalibrationParticleKey.CONDJ : "J", SBE16CalibrationParticleKey.CPCOR : "CPCOR", SBE16CalibrationParticleKey.CTCOR : "CTCOR", SBE16CalibrationParticleKey.CSLOPE : "CSLOPE", SBE16CalibrationParticleKey.PRES_SERIAL_NUMBER : "SerialNum", SBE16CalibrationParticleKey.PRES_RANGE : r'PRANGE', SBE16CalibrationParticleKey.PRES_CAL_DATE : "CalDate", SBE16CalibrationParticleKey.PA0 : "PA0", SBE16CalibrationParticleKey.PA1 : "PA1", SBE16CalibrationParticleKey.PA2 : "PA2", SBE16CalibrationParticleKey.PTCA0 : "PTCA0", SBE16CalibrationParticleKey.PTCA1 : "PTCA1", SBE16CalibrationParticleKey.PTCA2 : "PTCA2", SBE16CalibrationParticleKey.PTCB0 : "PTCB0", SBE16CalibrationParticleKey.PTCB1 : "PTCB1", SBE16CalibrationParticleKey.PTCB2 : "PTCB2", SBE16CalibrationParticleKey.PTEMPA0 : "PTEMPA0", SBE16CalibrationParticleKey.PTEMPA1 : "PTEMPA1", SBE16CalibrationParticleKey.PTEMPA2 : "PTEMPA2", # Quartz SBE16CalibrationParticleKey.PC1 : "PC1", SBE16CalibrationParticleKey.PC2 : "PC2", SBE16CalibrationParticleKey.PC3 : "PC3", SBE16CalibrationParticleKey.PD1 : "PD1", SBE16CalibrationParticleKey.PD2 : "PD2", SBE16CalibrationParticleKey.PT1 : "PT1", SBE16CalibrationParticleKey.PT2 : "PT2", SBE16CalibrationParticleKey.PT3 : "PT3", SBE16CalibrationParticleKey.PT4 : "PT4", SBE16CalibrationParticleKey.PSLOPE : "PSLOPE", SBE16CalibrationParticleKey.POFFSET : "POFFSET", SBE16CalibrationParticleKey.EXT_VOLT0_OFFSET : "OFFSET", SBE16CalibrationParticleKey.EXT_VOLT0_SLOPE : "SLOPE", SBE16CalibrationParticleKey.EXT_VOLT1_OFFSET : "OFFSET", SBE16CalibrationParticleKey.EXT_VOLT1_SLOPE : "SLOPE", SBE16CalibrationParticleKey.EXT_VOLT2_OFFSET : "OFFSET", SBE16CalibrationParticleKey.EXT_VOLT2_SLOPE : "SLOPE", SBE16CalibrationParticleKey.EXT_VOLT3_OFFSET : "OFFSET", SBE16CalibrationParticleKey.EXT_VOLT3_SLOPE : "SLOPE", SBE16CalibrationParticleKey.EXT_VOLT4_OFFSET : "OFFSET", SBE16CalibrationParticleKey.EXT_VOLT4_SLOPE : "SLOPE", SBE16CalibrationParticleKey.EXT_VOLT5_OFFSET : "OFFSET", SBE16CalibrationParticleKey.EXT_VOLT5_SLOPE : "SLOPE", SBE16CalibrationParticleKey.EXT_FREQ : "EXTFREQSF" } return map_param_to_tag[parameter_name] def _build_parsed_values(self): """ Parse the output of the calibration command @throws SampleException If there is a problem with sample creation """ match = SBE16CalibrationParticle.regex_compiled().match(self.raw_data) if not match: raise SampleException("No regex match of parsed status data: [%s]" % self.raw_data) SERIAL_NUMBER = "SerialNumber" CALIBRATION = "Calibration" ID = "id" TEMPERATURE_SENSOR_ID = "Main Temperature" CONDUCTIVITY_SENSOR_ID = "Main Conductivity" PRESSURE_SENSOR_ID = "Main Pressure" VOLT0 = "Volt 0" VOLT1 = "Volt 1" VOLT2 = "Volt 2" VOLT3 = "Volt 3" VOLT4 = "Volt 4" VOLT5 = "Volt 5" EXTERNAL_FREQUENCY_CHANNEL = "external frequency channel" dom = parseString(self.raw_data) root = dom.documentElement serial_number = root.getAttribute(SERIAL_NUMBER) firmware_version = root.getAttribute("DeviceType") result = [{DataParticleKey.VALUE_ID: SBE16CalibrationParticleKey.SERIAL_NUMBER, DataParticleKey.VALUE: serial_number}, {DataParticleKey.VALUE_ID: SBE16CalibrationParticleKey.FIRMWARE_VERSION, DataParticleKey.VALUE: firmware_version}] calibration_elements = self._extract_xml_elements(root, CALIBRATION) for calibration in calibration_elements: id_attr = calibration.getAttribute(ID) if id_attr == TEMPERATURE_SENSOR_ID: result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.DATE_TIME, str)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.TEMP_CAL_DATE, str)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.TA0)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.TA1)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.TA2)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.TA3)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.TOFFSET)) elif id_attr == CONDUCTIVITY_SENSOR_ID: result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.COND_CAL_DATE, str)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.CONDG)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.CONDH)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.CONDI)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.CONDJ)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.CPCOR)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.CTCOR)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.CSLOPE)) elif id_attr == PRESSURE_SENSOR_ID: result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PRES_SERIAL_NUMBER, str)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PRES_CAL_DATE, str)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PA0, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PA1, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PA2, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PTCA0, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PTCA1, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PTCA2, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PTCB0, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PTCB1, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PTCB2, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PTEMPA0, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PTEMPA1, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PTEMPA2, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.POFFSET)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PRES_RANGE, self.float_to_int, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PC1, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PC2, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PC3, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PD1, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PD2, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PT1, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PT2, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PT3, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PT4, float, False)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.PSLOPE, float, False)) elif id_attr == VOLT0: result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_VOLT0_OFFSET)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_VOLT0_SLOPE)) elif id_attr == VOLT1: result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_VOLT1_OFFSET)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_VOLT1_SLOPE)) elif id_attr == VOLT2: result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_VOLT2_OFFSET)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_VOLT2_SLOPE)) elif id_attr == VOLT3: result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_VOLT3_OFFSET)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_VOLT3_SLOPE)) elif id_attr == VOLT4: result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_VOLT4_OFFSET)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_VOLT4_SLOPE)) elif id_attr == VOLT5: result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_VOLT5_OFFSET)) result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_VOLT5_SLOPE)) elif id_attr == EXTERNAL_FREQUENCY_CHANNEL: result.append(self._get_xml_parameter(calibration, SBE16CalibrationParticleKey.EXT_FREQ)) log.debug('RESULT = %r', result) return result ############################################################################### # Driver ############################################################################### class SBE16InstrumentDriver(SingleConnectionInstrumentDriver): """ InstrumentDriver subclass for SBE16 driver. Subclasses SingleConnectionInstrumentDriver with connection state machine. """ def __init__(self, evt_callback): """ InstrumentDriver constructor. @param evt_callback Driver process event callback. """ SingleConnectionInstrumentDriver.__init__(self, evt_callback) def get_resource_params(self): """ Return list of device parameters available. """ return Parameter.list() ######################################################################## # Protocol builder. ######################################################################## def _build_protocol(self): """ Construct the driver protocol state machine. """ self._protocol = SBE16Protocol(Prompt, NEWLINE, self._driver_event) ############################################################################### # Seabird Electronics 37-SMP MicroCAT protocol. ############################################################################### class SBE16Protocol(CommandResponseInstrumentProtocol): """ Instrument protocol class for SBE16 driver. Subclasses SeaBirdProtocol """ __metaclass__ = get_logging_metaclass(log_level='debug') _sampling = False def __init__(self, prompts, newline, driver_event): """ SBE16Protocol constructor. @param prompts A BaseEnum class containing instrument prompts. @param newline The SBE16 newline. @param driver_event Driver process event callback. """ CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event) # Build SBE16 protocol state machine. self._protocol_fsm = InstrumentFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT) # Add event handlers for protocol state machine. handlers = { ProtocolState.UNKNOWN: [ (ProtocolEvent.ENTER, self._handler_unknown_enter), (ProtocolEvent.EXIT, self._handler_generic_exit), (ProtocolEvent.DISCOVER, self._handler_unknown_discover), ], ProtocolState.COMMAND: [ (ProtocolEvent.ENTER, self._handler_command_enter), (ProtocolEvent.EXIT, self._handler_generic_exit), (ProtocolEvent.ACQUIRE_SAMPLE, self._handler_command_acquire_sample), (ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample), (ProtocolEvent.GET, self._handler_get), (ProtocolEvent.SET, self._handler_command_set), (ProtocolEvent.START_DIRECT, self._handler_command_start_direct), (ProtocolEvent.CLOCK_SYNC, self._handler_command_clock_sync_clock), (ProtocolEvent.ACQUIRE_STATUS, self._handler_command_acquire_status), (ProtocolEvent.SCHEDULED_ACQUIRED_STATUS, self._handler_command_acquire_status), (ProtocolEvent.SCHEDULED_CLOCK_SYNC, self._handler_command_clock_sync_clock) ], ProtocolState.DIRECT_ACCESS: [ (ProtocolEvent.ENTER, self._handler_direct_access_enter), (ProtocolEvent.EXIT, self._handler_generic_exit), (ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct), (ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct) ], ProtocolState.AUTOSAMPLE: [ (ProtocolEvent.ENTER, self._handler_autosample_enter), (ProtocolEvent.EXIT, self._handler_generic_exit), (ProtocolEvent.GET, self._handler_get), (ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample), (ProtocolEvent.SCHEDULED_ACQUIRED_STATUS, self._handler_autosample_acquire_status), (ProtocolEvent.SCHEDULED_CLOCK_SYNC, self._handler_autosample_clock_sync) ] } for state in handlers: for event, handler in handlers[state]: self._protocol_fsm.add_handler(state, event, handler) # Construct the parameter dictionary containing device parameters, # current parameter values, and set formatting functions. self._build_driver_dict() self._build_command_dict() self._build_param_dict() # Add build handlers for device commands. # Add build handlers for device commands, only using simple command handler. for cmd in Command.list(): if cmd == Command.SET: self._add_build_handler(Command.SET, self._build_set_command) self._add_response_handler(Command.SET, self._parse_set_response) else: self._add_build_handler(cmd, self._build_simple_command) # Add response handlers for device commands. self._add_response_handler(Command.GET_SD, self._parse_status_response) self._add_response_handler(Command.GET_HD, self._parse_status_response) self._add_response_handler(Command.GET_CD, self._parse_status_response) self._add_response_handler(Command.GET_CC, self._parse_status_response) self._add_response_handler(Command.GET_EC, self._parse_status_response) # State state machine in UNKNOWN state. self._protocol_fsm.start(ProtocolState.UNKNOWN) self._chunker = StringChunker(self.sieve_function) self._add_scheduler_event(ScheduledJob.ACQUIRE_STATUS, ProtocolEvent.ACQUIRE_STATUS) self._add_scheduler_event(ScheduledJob.CLOCK_SYNC, ProtocolEvent.SCHEDULED_CLOCK_SYNC) @staticmethod def sieve_function(raw_data): """ The method that splits samples """ matchers = [] return_list = [] matchers.append(SBE16DataParticle.regex_compiled()) matchers.append(SBE16StatusParticle.regex_compiled()) matchers.append(SBE16CalibrationParticle.regex_compiled()) for matcher in matchers: for match in matcher.finditer(raw_data): return_list.append((match.start(), match.end())) return return_list def _filter_capabilities(self, events): return [x for x in events if Capability.has(x)] def _got_chunk(self, chunk, timestamp): """ The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample with the appropriate particle objects and REGEXes. """ if self._extract_sample(SBE16DataParticle, SBE16DataParticle.regex_compiled(), chunk, timestamp): self._sampling = True any([ self._extract_sample(SBE16StatusParticle, SBE16StatusParticle.regex_compiled(), chunk, timestamp), self._extract_sample(SBE16CalibrationParticle, SBE16CalibrationParticle.regex_compiled(), chunk, timestamp)]) def _build_driver_dict(self): """ Populate the driver dictionary with options """ self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, True) def _build_command_dict(self): """ Populate the command dictionary with command. """ self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample") self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample") self._cmd_dict.add(Capability.CLOCK_SYNC, display_name="Synchronize Clock") self._cmd_dict.add(Capability.ACQUIRE_STATUS, display_name="Acquire Status") self._cmd_dict.add(Capability.ACQUIRE_SAMPLE, display_name="Acquire Sample") ######################################################################## # Unknown handlers. ######################################################################## def _handler_unknown_enter(self, *args, **kwargs): """ Enter unknown state. """ self._driver_event(DriverAsyncEvent.STATE_CHANGE) def _handler_unknown_discover(self, *args, **kwargs): """ Discover current state; can be COMMAND or AUTOSAMPLE. @retval (next_state, next_agent_state), COMMAND or AUTOSAMPLE @throws InstrumentProtocolException if the device response does not correspond to an expected state. """ #check for a sample particle self._sampling = False timeout = 2 end_time = time.time() + timeout while time.time() < end_time: time.sleep(.1) if self._sampling: return ProtocolState.AUTOSAMPLE, ResourceAgentState.STREAMING return ProtocolState.COMMAND, ResourceAgentState.IDLE ######################################################################## # Command handlers. ######################################################################## def _handler_command_enter(self, *args, **kwargs): """ Enter command state. @throws InstrumentTimeoutException if the device cannot be woken. @throws InstrumentProtocolException if the update commands and not recognized. """ if self._init_type != InitializationType.NONE: self._update_params() self._init_params() self._driver_event(DriverAsyncEvent.STATE_CHANGE) def _handler_command_acquire_status(self, *args, **kwargs): """ Get device status """ result = [] result.append(self._do_cmd_resp(Command.GET_SD, timeout=TIMEOUT)) log.debug("_handler_command_acquire_status: GetSD Response: %s", result) result.append(self._do_cmd_resp(Command.GET_HD, timeout=TIMEOUT)) log.debug("_handler_command_acquire_status: GetHD Response: %s", result) result.append(self._do_cmd_resp(Command.GET_CD, timeout=TIMEOUT)) log.debug("_handler_command_acquire_status: GetCD Response: %s", result) result.append(self._do_cmd_resp(Command.GET_CC, timeout=TIMEOUT)) log.debug("_handler_command_acquire_status: GetCC Response: %s", result) result.append(self._do_cmd_resp(Command.GET_EC, timeout=TIMEOUT)) log.debug("_handler_command_acquire_status: GetEC Response: %s", result) # Reset the event counter right after getEC self._do_cmd_resp(Command.RESET_EC, timeout=TIMEOUT) return None, (None, ''.join(result)) def _handler_command_set(self, *args, **kwargs): """ Perform a set command. @param args[0] parameter : value dict. @retval (next_state, result) tuple, (None, None). @throws InstrumentParameterException if missing set parameters, if set parameters not ALL and not a dict, or if parameter can't be properly formatted. """ startup = False # Retrieve required parameter. # Raise if no parameter provided, or not a dict. try: params = args[0] except IndexError: raise InstrumentParameterException('Set command requires a parameter dict.') if not isinstance(params, dict): raise InstrumentParameterException('Set parameters not a dict.') try: startup = args[1] except IndexError: pass self._set_params(params, startup) return None, None def _set_params(self, *args, **kwargs): """ Issue commands to the instrument to set various parameters """ try: params = args[0] except IndexError: raise InstrumentParameterException('Set command requires a parameter dict.') self._verify_not_readonly(*args, **kwargs) update_params = False # Pump Mode is the only parameter that is set by the driver # that where the input isn't validated by the instrument. So # We will do a quick range check before we start all sets for (key, val) in params.iteritems(): if key == Parameter.PUMP_MODE and val not in [0, 1, 2]: raise InstrumentParameterException("pump mode out of range") for (key, val) in params.iteritems(): old_val = self._param_dict.format(key) new_val = self._param_dict.format(key, val) log.debug("KEY = %r OLD VALUE = %r NEW VALUE = %r", key, old_val, new_val) if old_val != new_val: update_params = True if ConfirmedParameter.has(key): # We add a write delay here because this command has to be sent # twice, the write delay allows it to process the first command # before it receives the beginning of the second. self._do_cmd_resp(Command.SET, key, val, write_delay=0.2) else: self._do_cmd_resp(Command.SET, key, val, **kwargs) log.debug("set complete, update params") if update_params: self._update_params() def _handler_command_acquire_sample(self, *args, **kwargs): """ Acquire sample from SBE16. @retval next_state, (next_agent_state, result) tuple """ result = self._do_cmd_resp(Command.TS, *args, **kwargs) return None, (None, result) def _handler_command_start_autosample(self, *args, **kwargs): """ Switch into autosample mode. @retval (next_state, result) tuple, (ProtocolState.AUTOSAMPLE, (next_agent_state, None) if successful. @throws InstrumentTimeoutException if device cannot be woken for command. @throws InstrumentProtocolException if command could not be built or misunderstood. """ self._start_logging(*args, **kwargs) return ProtocolState.AUTOSAMPLE, (ResourceAgentState.STREAMING, None) def _handler_command_start_direct(self): """ Start direct access """ return ProtocolState.DIRECT_ACCESS, (ResourceAgentState.DIRECT_ACCESS, None) def _handler_command_clock_sync_clock(self, *args, **kwargs): """ sync clock close to a second edge @retval (next_state, result) tuple, (None, None) if successful. @throws InstrumentTimeoutException if device cannot be woken for command. @throws InstrumentProtocolException if command could not be built or misunderstood. """ self._wakeup(timeout=TIMEOUT) self._sync_clock(Command.SET, Parameter.DATE_TIME, TIMEOUT, time_format='%m%d%Y%H%M%S') return None, (None, None) ######################################################################## # Autosample handlers. ######################################################################## def _handler_autosample_clock_sync(self, *args, **kwargs): """ execute a clock sync on the leading edge of a second change from autosample mode. For this command we have to move the instrument into command mode, do the clock sync, then switch back. If an exception is thrown we will try to get ourselves back into streaming and then raise that exception. @retval (next_state, result) tuple, (ProtocolState.AUTOSAMPLE, None) if successful. @throws InstrumentTimeoutException if device cannot be woken for command. @throws InstrumentProtocolException if command could not be built or misunderstood. """ try: # Switch to command mode, self._stop_logging(*args, **kwargs) # Sync the clock self._sync_clock(Command.SET, Parameter.DATE_TIME, TIMEOUT, time_format='%m%d%Y%H%M%S') finally: # Switch back to streaming self._start_logging(*args, **kwargs) return None, (None, None) def _handler_autosample_enter(self, *args, **kwargs): """ Enter autosample state. """ if self._init_type != InitializationType.NONE: self._stop_logging() self._update_params() self._init_params() self._start_logging() self._driver_event(DriverAsyncEvent.STATE_CHANGE) def _handler_autosample_stop_autosample(self, *args, **kwargs): """ Stop autosample and switch back to command mode. @retval (next_state, result) tuple @throws InstrumentTimeoutException if device cannot be woken for command. @throws InstrumentProtocolException if command misunderstood or incorrect prompt received. """ self._stop_logging(*args, **kwargs) return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None) def _handler_autosample_acquire_status(self, *args, **kwargs): """ Get device status """ # When in autosample this command requires two wakeups to get to the right prompt self._wakeup(timeout=WAKEUP_TIMEOUT, delay=0.3) self._wakeup(timeout=WAKEUP_TIMEOUT, delay=0.3) result = [] result.append(self._do_cmd_resp(Command.GET_SD, timeout=TIMEOUT)) log.debug("_handler_command_acquire_status: GetSD Response: %s", result) result.append(self._do_cmd_resp(Command.GET_HD, timeout=TIMEOUT)) log.debug("_handler_command_acquire_status: GetHD Response: %s", result) result.append(self._do_cmd_resp(Command.GET_CD, timeout=TIMEOUT)) log.debug("_handler_command_acquire_status: GetCD Response: %s", result) result.append(self._do_cmd_resp(Command.GET_CC, timeout=TIMEOUT)) log.debug("_handler_command_acquire_status: GetCC Response: %s", result) result.append(self._do_cmd_resp(Command.GET_EC, timeout=TIMEOUT)) log.debug("_handler_command_acquire_status: GetEC Response: %s", result) # Reset the event counter right after getEC self._do_cmd_resp(Command.RESET_EC, timeout=TIMEOUT) return None, (None, ''.join(result)) ######################################################################## # Common handlers. ######################################################################## def _sync_clock(self, command, date_time_param, timeout=TIMEOUT, delay=1, time_format="%m%d%Y%H%M%S"): """ Send the command to the instrument to synchronize the clock @param command: command to set6 date time @param date_time_param: date time parameter that we want to set @param timeout: command timeout @param delay: wakeup delay @param time_format: time format string for set command @raise: InstrumentProtocolException if command fails """ # lets clear out any past data so it doesnt confuse the command self._linebuf = '' self._promptbuf = '' log.debug("Set time format(%s) '%s''", time_format, date_time_param) str_val = get_timestamp_delayed(time_format) log.debug("Set time value == '%s'", str_val) self._do_cmd_resp(command, date_time_param, str_val) def _handler_generic_exit(self, *args, **kwargs): """ Exit unknown state. """ pass ######################################################################## # Direct access handlers. ######################################################################## def _handler_direct_access_enter(self, *args, **kwargs): """ Enter direct access state. """ self._driver_event(DriverAsyncEvent.STATE_CHANGE) self._sent_cmds = [] def _handler_direct_access_execute_direct(self, data): """ """ self._do_cmd_direct(data) # add sent command to list for 'echo' filtering in callback self._sent_cmds.append(data) return None, (None, None) def _handler_direct_access_stop_direct(self): """ @throw InstrumentProtocolException on invalid command """ next_state, next_agent_state = self._handler_unknown_discover() if next_state == DriverProtocolState.COMMAND: next_agent_state = ResourceAgentState.COMMAND return next_state, (next_agent_state, None) ######################################################################## # Private helpers. ######################################################################## def _start_logging(self, *args, **kwargs): """ Command the instrument to start logging @param timeout: how long to wait for a prompt @return: True if successful @raise: InstrumentProtocolException if failed to start logging """ self._do_cmd_resp(Command.STARTNOW, *args, **kwargs) def _stop_logging(self, *args, **kwargs): """ Command the instrument to stop logging @param timeout: how long to wait for a prompt @return: True if successful @raise: InstrumentTimeoutException if prompt isn't seen @raise: InstrumentProtocolException failed to stop logging """ kwargs['timeout'] = TIMEOUT self._do_cmd_resp(Command.STOP, *args, **kwargs) def _send_wakeup(self): """ Send a newline to attempt to wake the SBE16 device. """ self._connection.send(NEWLINE) def _update_params(self, *args, **kwargs): """ Update the parameter dictionary. Wake the device then issue display status and display calibration commands. The parameter dict will match line output and update itself. @throws InstrumentTimeoutException if device cannot be timely woken. @throws InstrumentProtocolException if ds/dc misunderstood. """ self._wakeup(timeout=WAKEUP_TIMEOUT, delay=0.3) # For some reason when in streaming we require a second wakeup self._wakeup(timeout=WAKEUP_TIMEOUT, delay=0.3) # Get old param dict config. old_config = self._param_dict.get_config() # Issue status commands self._do_cmd_resp(Command.GET_SD, timeout=TIMEOUT) self._do_cmd_resp(Command.GET_CD, timeout=TIMEOUT) self._do_cmd_resp(Command.GET_HD, timeout=TIMEOUT) # Get new param dict config. If it differs from the old config, # tell driver superclass to publish a config change event. new_config = self._param_dict.get_config() log.debug("Old Config: %s", old_config) log.debug("New Config: %s", new_config) if not dict_equal(new_config, old_config) and self._protocol_fsm.get_current_state() != ProtocolState.UNKNOWN: log.debug("parameters updated, sending event") self._driver_event(DriverAsyncEvent.CONFIG_CHANGE) def _build_set_command(self, cmd, param, val): """ Build handler for set commands. param=val followed by newline. String val constructed by param dict formatting function. @param param the parameter key to set. @param val the parameter value to set. @ retval The set command to be sent to the device. @throws InstrumentProtocolException if the parameter is not valid or if the formatting function could not accept the value passed. """ try: if param is Parameter.DATE_TIME: set_cmd = '%s=%s%s' % (param, val, NEWLINE) else: str_val = self._param_dict.format(param, val) set_cmd = '%s=%s%s' % (param, str_val, NEWLINE) # Some set commands need to be sent twice to confirm if param in ConfirmedParameter.list(): set_cmd += set_cmd except KeyError: raise InstrumentParameterException('Unknown driver parameter %s' % param) return set_cmd def _find_error(self, response): """ Find an error xml message in a response @param response command response string. @return tuple with type and message, None otherwise """ match = re.search(ERROR_REGEX, response) if match: return match.group(1), match.group(2) return None def _parse_set_response(self, response, prompt): """ Parse handler for set command. @param response command response string. @param prompt prompt following command response. @throws InstrumentProtocolException if set command misunderstood. """ error = self._find_error(response) if error: log.error("Set command encountered error; type='%s' msg='%s'", error[0], error[1]) raise InstrumentParameterException('Set command failure: type="%s" msg="%s"' % (error[0], error[1])) if prompt not in [Prompt.EXECUTED, Prompt.COMMAND]: log.error("Set command encountered error; instrument returned: %s", response) raise InstrumentProtocolException('Set command not recognized: %s' % response) def _parse_status_response(self, response, prompt): """ Parse handler for status commands. @param response command response string. @param prompt prompt following command response. @throws InstrumentProtocolException if command misunderstood. """ if prompt not in [Prompt.COMMAND, Prompt.EXECUTED]: raise InstrumentProtocolException('Command not recognized: %s.' % response) for line in response.split(NEWLINE): self._param_dict.update(line) return response def _build_common_param_dict(self): self._param_dict.add(Parameter.LOGGING, r'LoggingState>(not )?logging</LoggingState', lambda match: False if (match.group(1)) else True, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Logging", description="Enable logging: (true | false)", visibility=ParameterDictVisibility.READ_ONLY) self._param_dict.add(Parameter.VOLT0, r'ExtVolt0>(.*)</ExtVolt0', lambda match: True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Volt 0", description="Enable external voltage 0: (true | false)", startup_param=True, direct_access=True, default_value=True, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.VOLT1, r'ExtVolt1>(.*)</ExtVolt1', lambda match: True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Volt 1", description="Enable external voltage 1: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.VOLT2, r'ExtVolt2>(.*)</ExtVolt2', lambda match: True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Volt 2", description="Enable external voltage 2: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.VOLT3, r'ExtVolt3>(.*)</ExtVolt3', lambda match: True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Volt 3", description="Enable external voltage 3: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.VOLT4, r'ExtVolt4>(.*)</ExtVolt4', lambda match: True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Volt 4", description="Enable external voltage 4: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.VOLT5, r'ExtVolt5>(.*)</ExtVolt5', lambda match: True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Volt 5", description="Enable external voltage 5: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.SBE38, r'SBE38>(.*)</SBE38', lambda match: True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="SBE38 Attached", description="Enable SBE38: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.SBE63, r'SBE63>(.*)</SBE63', lambda match: True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="SBE63 Attached", description="Enable SBE63: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.WETLABS, r'WETLABS>(.*)</WETLABS', lambda match: True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Wetlabs Sensor Attached", description="Enable Wetlabs sensor: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.GTD, r'GTD>(.*)</GTD', lambda match: True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="GTD Attached", description="Enable GTD: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.DUAL_GTD, r'GTD>(.*)</GTD', lambda match: True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Dual GTD Attached", description="Enable second GTD: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.OUTPUT_FORMAT, r'OutputFormat>(.*)</OutputFormat', self._output_format_string_2_int, int, type=ParameterDictType.INT, display_name="Output Format", description="Format for the instrument output: (0:raw hex | 1:converted hex | 2:raw decimal | " "3:converted decimal | 4:converted hex for afm | 5:converted xml uvic)", startup_param=True, direct_access=True, default_value=0, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.OPTODE, r'OPTODE>(.*)</OPTODE', lambda match: True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Optode Attached", description="Enable optode: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) def _build_param_dict(self): """ Populate the parameter dictionary with SBE16 parameters. For each parameter key, add match string, match lambda function, and value formatting function for set commands. """ self._build_common_param_dict() self._param_dict.add(Parameter.PTYPE, r"<Sensor id = 'Main Pressure'>.*?<type>(.*?)</type>.*?</Sensor>", self._pressure_sensor_to_int, str, type=ParameterDictType.INT, display_name="Pressure Sensor Type", startup_param=True, direct_access=True, default_value=1, description="Sensor type: (1:strain gauge | 3:quartz with temp comp)", visibility=ParameterDictVisibility.IMMUTABLE, regex_flags=re.DOTALL) self._param_dict.add(Parameter.ECHO, r'<EchoCharacters>(.*)</EchoCharacters>', lambda match : True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Echo Characters", description="Enable characters to be echoed as typed (true | false)", startup_param=True, direct_access=True, default_value=True, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.OUTPUT_EXEC_TAG, r'<OutputExecutedTag>(.*)</OutputExecutedTag>', lambda match : True, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Output Execute Tag", description="Enable display of XML executing and executed tags (true | false)", startup_param=True, direct_access=True, default_value=True, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.PUMP_MODE, r'<AutoRun>(.*)</AutoRun>', self._pump_mode_to_int, str, type=ParameterDictType.INT, display_name="Pump Mode", description="Mode: (0:no pump | 1:run pump for 0.5 sec | 2:run pump during sample)", startup_param=True, direct_access=True, default_value=2) self._param_dict.add(Parameter.SBE50, r'SBE50>(.*)</SBE50', lambda match : True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="SBE50 Attached", description="Enabled SBE50: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.DELAY_BEFORE_SAMPLE, r'DelayBeforeSampling>(.*?)</DelayBeforeSampling', lambda match : float(match.group(1)), self._float_to_string, type=ParameterDictType.FLOAT, display_name="Delay Before Sample", description=" Time to wait after switching on external voltages and RS-232 sensors " "before sampling: (0-600).", startup_param=True, direct_access=True, default_value=0.0, units=Units.SECOND, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.DELAY_AFTER_SAMPLE, r'DelayAfterSample>(.*?)</DelayBeforeSampling', lambda match : float(match.group(1)), str, type=ParameterDictType.FLOAT, display_name="Delay After Sample", description="Time to wait after sampling is completed, before turning off power " "to external voltages and RS-232 sensors.", startup_param=True, direct_access=True, default_value=0.0, units=Units.SECOND, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.SYNCMODE, r'SyncMode>(dis|en)abled</SyncMode', lambda match : True if match.group(1) == 'en' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Enable Serial Sync", description="Enable serial line sync mode: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.NCYCLES, r'NCycles>(.*?)</NCycles', lambda match : int(match.group(1)), str, type=ParameterDictType.INT, display_name="Ncycles", description="Number of measurements to take and average every SampleInterval seconds.", startup_param=True, direct_access=False, default_value=4) self._param_dict.add(Parameter.INTERVAL, r'SampleInterval>(.*?)</SampleInterval', lambda match : int(match.group(1)), str, type=ParameterDictType.INT, display_name="Sample Interval", description="Interval between samples: (10 - 14,400).", startup_param=True, direct_access=False, units=Units.SECOND, default_value=10) self._param_dict.add(Parameter.BIOWIPER, r'Biowiper>(.*?)</Biowiper', lambda match : False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Biowiper", description="Enable ECO-FL fluorometer with Bio-Wiper: (true | false)", startup_param=True, direct_access=True, default_value=False, visibility=ParameterDictVisibility.IMMUTABLE) self._param_dict.add(Parameter.TXREALTIME, r'TxRealTime>(yes|no)</TxRealTime', lambda match : True if match.group(1) == 'yes' else False, self._true_false_to_string, type=ParameterDictType.BOOL, display_name="Transmit Real-Time", description="Enable real-time data output: (true | false)", startup_param=True, direct_access=True, default_value=True, visibility=ParameterDictVisibility.IMMUTABLE) ######################################################################## # Static helpers to format set commands. ######################################################################## @staticmethod def _pressure_sensor_to_int(match): """ map a pressure sensor string into an int representation @param match: regex match @return: mode 1, 2, 3 or None for no match """ v = match.group(1) log.debug("get pressure type from: %s", v) if v == "strain gauge" or v == "strain-0": return 1 elif v == "quartz without temp comp": return 2 elif v == "quartz with temp comp" or v == "quartzTC-0": return 3 else: return None @staticmethod def _pump_mode_to_int(match): """ map a pump mode string into an int representation @param match: regex match @return: mode 0, 1, 2 or None for no match """ v = match.group(1) log.debug("get pump mode from: %s", v) if v == "no pump": return 0 elif v == "run pump for 0.5 sec": return 1 elif v == "run pump during sample": return 2 else: return None @staticmethod def _true_false_to_string(v): """ Write a boolean value to string formatted for sbe16 set operations. @param v a boolean value. @retval A yes/no string formatted for sbe16 set operations. @throws InstrumentParameterException if value not a bool. """ if not isinstance(v,bool): raise InstrumentParameterException('Value %s is not a bool.' % str(v)) if v: return 'y' else: return 'n' @staticmethod def _string_to_numeric_date_time_string(date_time_string): """ convert string from "21 AUG 2012 09:51:55" to numeric "mmddyyyyhhmmss" """ return time.strftime("%m%d%Y%H%M%S", time.strptime(date_time_string, "%d %b %Y %H:%M:%S")) @staticmethod def _output_format_string_2_int(format_string): """ Convert an output format from an string to an int @param format_string sbe output format as string or regex match @retval int representation of output format @raise InstrumentParameterException if format unknown """ if not isinstance(format_string, str): format_string = format_string.group(1) if format_string.lower() == "raw hex": return 0 elif format_string.lower() == "converted hex": return 1 elif format_string.lower() == "raw decimal": return 2 elif format_string.lower() == "converted decimal": return 3 elif format_string.lower() == "converted hex for afm": return 4 elif format_string.lower() == "converted xml uvic": return 5 raise InstrumentParameterException("output format unknown: %s" % format_string)
codeparrot/github-code-clean
# Author: Travis Oliphant # 1999 -- 2002 from __future__ import division, print_function, absolute_import import operator import math import sys import timeit from scipy.spatial import cKDTree from . import sigtools, dlti from ._upfirdn import upfirdn, _output_len, _upfirdn_modes from scipy import linalg, fft as sp_fft from scipy.fft._helper import _init_nd_shape_and_axes import numpy as np from scipy.special import lambertw from .windows import get_window from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext from .filter_design import cheby1, _validate_sos from .fir_filter_design import firwin from ._sosfilt import _sosfilt __all__ = ['correlate', 'correlate2d', 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve', 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter', 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2', 'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue', 'residuez', 'resample', 'resample_poly', 'detrend', 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method', 'filtfilt', 'decimate', 'vectorstrength'] _modedict = {'valid': 0, 'same': 1, 'full': 2} _boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1, 'symmetric': 1, 'reflect': 4} def _valfrommode(mode): try: return _modedict[mode] except KeyError: raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.") def _bvalfromboundary(boundary): try: return _boundarydict[boundary] << 2 except KeyError: raise ValueError("Acceptable boundary flags are 'fill', 'circular' " "(or 'wrap'), and 'symmetric' (or 'symm').") def _inputs_swap_needed(mode, shape1, shape2, axes=None): """Determine if inputs arrays need to be swapped in `"valid"` mode. If in `"valid"` mode, returns whether or not the input arrays need to be swapped depending on whether `shape1` is at least as large as `shape2` in every calculated dimension. This is important for some of the correlation and convolution implementations in this module, where the larger array input needs to come before the smaller array input when operating in this mode. Note that if the mode provided is not 'valid', False is immediately returned. """ if mode != 'valid': return False if not shape1: return False if axes is None: axes = range(len(shape1)) ok1 = all(shape1[i] >= shape2[i] for i in axes) ok2 = all(shape2[i] >= shape1[i] for i in axes) if not (ok1 or ok2): raise ValueError("For 'valid' mode, one must be at least " "as large as the other in every dimension") return not ok1 def correlate(in1, in2, mode='full', method='auto'): r""" Cross-correlate two N-dimensional arrays. Cross-correlate `in1` and `in2`, with the output size determined by the `mode` argument. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear cross-correlation of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. method : str {'auto', 'direct', 'fft'}, optional A string indicating which method to use to calculate the correlation. ``direct`` The correlation is determined directly from sums, the definition of correlation. ``fft`` The Fast Fourier Transform is used to perform the correlation more quickly (only available for numerical arrays.) ``auto`` Automatically chooses direct or Fourier method based on an estimate of which is faster (default). See `convolve` Notes for more detail. .. versionadded:: 0.19.0 Returns ------- correlate : array An N-dimensional array containing a subset of the discrete linear cross-correlation of `in1` with `in2`. See Also -------- choose_conv_method : contains more documentation on `method`. Notes ----- The correlation z of two d-dimensional arrays x and y is defined as:: z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...]) This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')`` then .. math:: z[k] = (x * y)(k - N + 1) = \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*} for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2` where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`, and :math:`y_m` is 0 when m is outside the range of y. ``method='fft'`` only works for numerical arrays as it relies on `fftconvolve`. In certain cases (i.e., arrays of objects or when rounding integers can lose precision), ``method='direct'`` is always used. When using "same" mode with even-length inputs, the outputs of `correlate` and `correlate2d` differ: There is a 1-index offset between them. Examples -------- Implement a matched filter using cross-correlation, to recover a signal that has passed through a noisy channel. >>> from scipy import signal >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128) >>> sig_noise = sig + np.random.randn(len(sig)) >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128 >>> import matplotlib.pyplot as plt >>> clock = np.arange(64, len(sig), 128) >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True) >>> ax_orig.plot(sig) >>> ax_orig.plot(clock, sig[clock], 'ro') >>> ax_orig.set_title('Original signal') >>> ax_noise.plot(sig_noise) >>> ax_noise.set_title('Signal with noise') >>> ax_corr.plot(corr) >>> ax_corr.plot(clock, corr[clock], 'ro') >>> ax_corr.axhline(0.5, ls=':') >>> ax_corr.set_title('Cross-correlated with rectangular pulse') >>> ax_orig.margins(0, 0.1) >>> fig.tight_layout() >>> fig.show() """ in1 = np.asarray(in1) in2 = np.asarray(in2) if in1.ndim == in2.ndim == 0: return in1 * in2.conj() elif in1.ndim != in2.ndim: raise ValueError("in1 and in2 should have the same dimensionality") # Don't use _valfrommode, since correlate should not accept numeric modes try: val = _modedict[mode] except KeyError: raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.") # this either calls fftconvolve or this function with method=='direct' if method in ('fft', 'auto'): return convolve(in1, _reverse_and_conj(in2), mode, method) elif method == 'direct': # fastpath to faster numpy.correlate for 1d inputs when possible if _np_conv_ok(in1, in2, mode): return np.correlate(in1, in2, mode) # _correlateND is far slower when in2.size > in1.size, so swap them # and then undo the effect afterward if mode == 'full'. Also, it fails # with 'valid' mode if in2 is larger than in1, so swap those, too. # Don't swap inputs for 'same' mode, since shape of in1 matters. swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or _inputs_swap_needed(mode, in1.shape, in2.shape)) if swapped_inputs: in1, in2 = in2, in1 if mode == 'valid': ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)] out = np.empty(ps, in1.dtype) z = sigtools._correlateND(in1, in2, out, val) else: ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)] # zero pad input in1zpadded = np.zeros(ps, in1.dtype) sc = tuple(slice(0, i) for i in in1.shape) in1zpadded[sc] = in1.copy() if mode == 'full': out = np.empty(ps, in1.dtype) elif mode == 'same': out = np.empty(in1.shape, in1.dtype) z = sigtools._correlateND(in1zpadded, in2, out, val) if swapped_inputs: # Reverse and conjugate to undo the effect of swapping inputs z = _reverse_and_conj(z) return z else: raise ValueError("Acceptable method flags are 'auto'," " 'direct', or 'fft'.") def _centered(arr, newshape): # Return the center newshape portion of the array. newshape = np.asarray(newshape) currshape = np.array(arr.shape) startind = (currshape - newshape) // 2 endind = startind + newshape myslice = [slice(startind[k], endind[k]) for k in range(len(endind))] return arr[tuple(myslice)] def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False): """Handle the axes argument for frequency-domain convolution. Returns the inputs and axes in a standard form, eliminating redundant axes, swapping the inputs if necessary, and checking for various potential errors. Parameters ---------- in1 : array First input. in2 : array Second input. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output. See the documentation `fftconvolve` for more information. axes : list of ints Axes over which to compute the FFTs. sorted_axes : bool, optional If `True`, sort the axes. Default is `False`, do not sort. Returns ------- in1 : array The first input, possible swapped with the second input. in2 : array The second input, possible swapped with the first input. axes : list of ints Axes over which to compute the FFTs. """ s1 = in1.shape s2 = in2.shape noaxes = axes is None _, axes = _init_nd_shape_and_axes(in1, shape=None, axes=axes) if not noaxes and not len(axes): raise ValueError("when provided, axes cannot be empty") # Axes of length 1 can rely on broadcasting rules for multipy, # no fft needed. axes = [a for a in axes if s1[a] != 1 and s2[a] != 1] if sorted_axes: axes.sort() if not all(s1[a] == s2[a] or s1[a] == 1 or s2[a] == 1 for a in range(in1.ndim) if a not in axes): raise ValueError("incompatible shapes for in1 and in2:" " {0} and {1}".format(s1, s2)) # Check that input sizes are compatible with 'valid' mode. if _inputs_swap_needed(mode, s1, s2, axes=axes): # Convolution is commutative; order doesn't have any effect on output. in1, in2 = in2, in1 return in1, in2, axes def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False): """Convolve two arrays in the frequency domain. This function implements only base the FFT-related operations. Specifically, it converts the signals to the frequency domain, multiplies them, then converts them back to the time domain. Calculations of axes, shapes, convolution mode, etc. are implemented in higher level-functions, such as `fftconvolve` and `oaconvolve`. Those functions should be used instead of this one. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. axes : array_like of ints Axes over which to compute the FFTs. shape : array_like of ints The sizes of the FFTs. calc_fast_len : bool, optional If `True`, set each value of `shape` to the next fast FFT length. Default is `False`, use `axes` as-is. Returns ------- out : array An N-dimensional array containing the discrete linear convolution of `in1` with `in2`. """ if not len(axes): return in1 * in2 complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c') if calc_fast_len: # Speed up FFT by padding to optimal size. fshape = [ sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] else: fshape = shape if not complex_result: fft, ifft = sp_fft.rfftn, sp_fft.irfftn else: fft, ifft = sp_fft.fftn, sp_fft.ifftn sp1 = fft(in1, fshape, axes=axes) sp2 = fft(in2, fshape, axes=axes) ret = ifft(sp1 * sp2, fshape, axes=axes) if calc_fast_len: fslice = tuple([slice(sz) for sz in shape]) ret = ret[fslice] return ret def _apply_conv_mode(ret, s1, s2, mode, axes): """Calculate the convolution result shape based on the `mode` argument. Returns the result sliced to the correct size for the given mode. Parameters ---------- ret : array The result array, with the appropriate shape for the 'full' mode. s1 : list of int The shape of the first input. s2 : list of int The shape of the second input. mode : str {'full', 'valid', 'same'} A string indicating the size of the output. See the documentation `fftconvolve` for more information. axes : list of ints Axes over which to compute the convolution. Returns ------- ret : array A copy of `res`, sliced to the correct size for the given `mode`. """ if mode == "full": return ret.copy() elif mode == "same": return _centered(ret, s1).copy() elif mode == "valid": shape_valid = [ret.shape[a] if a not in axes else s1[a] - s2[a] + 1 for a in range(ret.ndim)] return _centered(ret, shape_valid).copy() else: raise ValueError("acceptable mode flags are 'valid'," " 'same', or 'full'") def fftconvolve(in1, in2, mode="full", axes=None): """Convolve two N-dimensional arrays using FFT. Convolve `in1` and `in2` using the fast Fourier transform method, with the output size determined by the `mode` argument. This is generally much faster than `convolve` for large arrays (n > ~500), but can be slower when only a few output values are needed, and can only output float arrays (int or object array inputs will be cast to float). As of v0.19, `convolve` automatically chooses this method or the direct method based on an estimation of which is faster. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. axes : int or array_like of ints or None, optional Axes over which to compute the convolution. The default is over all axes. Returns ------- out : array An N-dimensional array containing a subset of the discrete linear convolution of `in1` with `in2`. See Also -------- convolve : Uses the direct convolution or FFT convolution algorithm depending on which is faster. oaconvolve : Uses the overlap-add method to do convolution, which is generally faster when the input arrays are large and significantly different in size. Examples -------- Autocorrelation of white noise is an impulse. >>> from scipy import signal >>> sig = np.random.randn(1000) >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full') >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) >>> ax_orig.plot(sig) >>> ax_orig.set_title('White noise') >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr) >>> ax_mag.set_title('Autocorrelation') >>> fig.tight_layout() >>> fig.show() Gaussian blur implemented using FFT convolution. Notice the dark borders around the image, due to the zero-padding beyond its boundaries. The `convolve2d` function allows for other types of image boundaries, but is far slower. >>> from scipy import misc >>> face = misc.face(gray=True) >>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8)) >>> blurred = signal.fftconvolve(face, kernel, mode='same') >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1, ... figsize=(6, 15)) >>> ax_orig.imshow(face, cmap='gray') >>> ax_orig.set_title('Original') >>> ax_orig.set_axis_off() >>> ax_kernel.imshow(kernel, cmap='gray') >>> ax_kernel.set_title('Gaussian kernel') >>> ax_kernel.set_axis_off() >>> ax_blurred.imshow(blurred, cmap='gray') >>> ax_blurred.set_title('Blurred') >>> ax_blurred.set_axis_off() >>> fig.show() """ in1 = np.asarray(in1) in2 = np.asarray(in2) if in1.ndim == in2.ndim == 0: # scalar inputs return in1 * in2 elif in1.ndim != in2.ndim: raise ValueError("in1 and in2 should have the same dimensionality") elif in1.size == 0 or in2.size == 0: # empty arrays return np.array([]) in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False) s1 = in1.shape s2 = in2.shape shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)] ret = _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True) return _apply_conv_mode(ret, s1, s2, mode, axes) def _calc_oa_lens(s1, s2): """Calculate the optimal FFT lengths for overlapp-add convolution. The calculation is done for a single dimension. Parameters ---------- s1 : int Size of the dimension for the first array. s2 : int Size of the dimension for the second array. Returns ------- block_size : int The size of the FFT blocks. overlap : int The amount of overlap between two blocks. in1_step : int The size of each step for the first array. in2_step : int The size of each step for the first array. """ # Set up the arguments for the conventional FFT approach. fallback = (s1+s2-1, None, s1, s2) # Use conventional FFT convolve if sizes are same. if s1 == s2 or s1 == 1 or s2 == 1: return fallback if s2 > s1: s1, s2 = s2, s1 swapped = True else: swapped = False # There cannot be a useful block size if s2 is more than half of s1. if s2 >= s1/2: return fallback # Derivation of optimal block length # For original formula see: # https://en.wikipedia.org/wiki/Overlap-add_method # # Formula: # K = overlap = s2-1 # N = block_size # C = complexity # e = exponential, exp(1) # # C = (N*(log2(N)+1))/(N-K) # C = (N*log2(2N))/(N-K) # C = N/(N-K) * log2(2N) # C1 = N/(N-K) # C2 = log2(2N) = ln(2N)/ln(2) # # dC1/dN = (1*(N-K)-N)/(N-K)^2 = -K/(N-K)^2 # dC2/dN = 2/(2*N*ln(2)) = 1/(N*ln(2)) # # dC/dN = dC1/dN*C2 + dC2/dN*C1 # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + N/(N*ln(2)*(N-K)) # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + 1/(ln(2)*(N-K)) # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + (N-K)/(ln(2)*(N-K)^2) # dC/dN = (-K*ln(2N) + (N-K)/(ln(2)*(N-K)^2) # dC/dN = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2) # # Solve for minimum, where dC/dN = 0 # 0 = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2) # 0 * ln(2)*(N-K)^2 = N - K*ln(2N) - K # 0 = N - K*ln(2N) - K # 0 = N - K*(ln(2N) + 1) # 0 = N - K*ln(2Ne) # N = K*ln(2Ne) # N/K = ln(2Ne) # # e^(N/K) = e^ln(2Ne) # e^(N/K) = 2Ne # 1/e^(N/K) = 1/(2*N*e) # e^(N/-K) = 1/(2*N*e) # e^(N/-K) = K/N*1/(2*K*e) # N/K*e^(N/-K) = 1/(2*e*K) # N/-K*e^(N/-K) = -1/(2*e*K) # # Using Lambert W function # https://en.wikipedia.org/wiki/Lambert_W_function # x = W(y) It is the solution to y = x*e^x # x = N/-K # y = -1/(2*e*K) # # N/-K = W(-1/(2*e*K)) # # N = -K*W(-1/(2*e*K)) overlap = s2-1 opt_size = -overlap*lambertw(-1/(2*math.e*overlap), k=-1).real block_size = sp_fft.next_fast_len(math.ceil(opt_size)) # Use conventional FFT convolve if there is only going to be one block. if block_size >= s1: return fallback if not swapped: in1_step = block_size-s2+1 in2_step = s2 else: in1_step = s2 in2_step = block_size-s2+1 return block_size, overlap, in1_step, in2_step def oaconvolve(in1, in2, mode="full", axes=None): """Convolve two N-dimensional arrays using the overlap-add method. Convolve `in1` and `in2` using the overlap-add method, with the output size determined by the `mode` argument. This is generally much faster than `convolve` for large arrays (n > ~500), and generally much faster than `fftconvolve` when one array is much larger than the other, but can be slower when only a few output values are needed or when the arrays are very similar in shape, and can only output float arrays (int or object array inputs will be cast to float). Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. axes : int or array_like of ints or None, optional Axes over which to compute the convolution. The default is over all axes. Returns ------- out : array An N-dimensional array containing a subset of the discrete linear convolution of `in1` with `in2`. See Also -------- convolve : Uses the direct convolution or FFT convolution algorithm depending on which is faster. fftconvolve : An implementation of convolution using FFT. Notes ----- .. versionadded:: 1.4.0 Examples -------- Convolve a 100,000 sample signal with a 512-sample filter. >>> from scipy import signal >>> sig = np.random.randn(100000) >>> filt = signal.firwin(512, 0.01) >>> fsig = signal.oaconvolve(sig, filt) >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) >>> ax_orig.plot(sig) >>> ax_orig.set_title('White noise') >>> ax_mag.plot(fsig) >>> ax_mag.set_title('Filtered noise') >>> fig.tight_layout() >>> fig.show() References ---------- .. [1] Wikipedia, "Overlap-add_method". https://en.wikipedia.org/wiki/Overlap-add_method .. [2] Richard G. Lyons. Understanding Digital Signal Processing, Third Edition, 2011. Chapter 13.10. ISBN 13: 978-0137-02741-5 """ in1 = np.asarray(in1) in2 = np.asarray(in2) if in1.ndim == in2.ndim == 0: # scalar inputs return in1 * in2 elif in1.ndim != in2.ndim: raise ValueError("in1 and in2 should have the same dimensionality") elif in1.size == 0 or in2.size == 0: # empty arrays return np.array([]) elif in1.shape == in2.shape: # Equivalent to fftconvolve return fftconvolve(in1, in2, mode=mode, axes=axes) in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=True) if not axes: return in1*in2 s1 = in1.shape s2 = in2.shape # Calculate this now since in1 is changed later shape_final = [None if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)] # Calculate the block sizes for the output, steps, first and second inputs. # It is simpler to calculate them all together than doing them in separate # loops due to all the special cases that need to be handled. optimal_sizes = ((-1, -1, s1[i], s2[i]) if i not in axes else _calc_oa_lens(s1[i], s2[i]) for i in range(in1.ndim)) block_size, overlaps, \ in1_step, in2_step = zip(*optimal_sizes) # Fall back to fftconvolve if there is only one block in every dimension. if in1_step == s1 and in2_step == s2: return fftconvolve(in1, in2, mode=mode, axes=axes) # Figure out the number of steps and padding. # This would get too complicated in a list comprehension. nsteps1 = [] nsteps2 = [] pad_size1 = [] pad_size2 = [] for i in range(in1.ndim): if i not in axes: pad_size1 += [(0, 0)] pad_size2 += [(0, 0)] continue if s1[i] > in1_step[i]: curnstep1 = math.ceil((s1[i]+1)/in1_step[i]) if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]: curnstep1 += 1 curpad1 = curnstep1*in1_step[i] - s1[i] else: curnstep1 = 1 curpad1 = 0 if s2[i] > in2_step[i]: curnstep2 = math.ceil((s2[i]+1)/in2_step[i]) if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]: curnstep2 += 1 curpad2 = curnstep2*in2_step[i] - s2[i] else: curnstep2 = 1 curpad2 = 0 nsteps1 += [curnstep1] nsteps2 += [curnstep2] pad_size1 += [(0, curpad1)] pad_size2 += [(0, curpad2)] # Pad the array to a size that can be reshaped to the desired shape # if necessary. if not all(curpad == (0, 0) for curpad in pad_size1): in1 = np.pad(in1, pad_size1, mode='constant', constant_values=0) if not all(curpad == (0, 0) for curpad in pad_size2): in2 = np.pad(in2, pad_size2, mode='constant', constant_values=0) # Reshape the overlap-add parts to input block sizes. split_axes = [iax+i for i, iax in enumerate(axes)] fft_axes = [iax+1 for iax in split_axes] # We need to put each new dimension before the corresponding dimension # being reshaped in order to get the data in the right layout at the end. reshape_size1 = list(in1_step) reshape_size2 = list(in2_step) for i, iax in enumerate(split_axes): reshape_size1.insert(iax, nsteps1[i]) reshape_size2.insert(iax, nsteps2[i]) in1 = in1.reshape(*reshape_size1) in2 = in2.reshape(*reshape_size2) # Do the convolution. fft_shape = [block_size[i] for i in axes] ret = _freq_domain_conv(in1, in2, fft_axes, fft_shape, calc_fast_len=False) # Do the overlap-add. for ax, ax_fft, ax_split in zip(axes, fft_axes, split_axes): overlap = overlaps[ax] if overlap is None: continue ret, overpart = np.split(ret, [-overlap], ax_fft) overpart = np.split(overpart, [-1], ax_split)[0] ret_overpart = np.split(ret, [overlap], ax_fft)[0] ret_overpart = np.split(ret_overpart, [1], ax_split)[1] ret_overpart += overpart # Reshape back to the correct dimensionality. shape_ret = [ret.shape[i] if i not in fft_axes else ret.shape[i]*ret.shape[i-1] for i in range(ret.ndim) if i not in split_axes] ret = ret.reshape(*shape_ret) # Slice to the correct size. slice_final = tuple([slice(islice) for islice in shape_final]) ret = ret[slice_final] return _apply_conv_mode(ret, s1, s2, mode, axes) def _numeric_arrays(arrays, kinds='buifc'): """ See if a list of arrays are all numeric. Parameters ---------- ndarrays : array or list of arrays arrays to check if numeric. numeric_kinds : string-like The dtypes of the arrays to be checked. If the dtype.kind of the ndarrays are not in this string the function returns False and otherwise returns True. """ if type(arrays) == np.ndarray: return arrays.dtype.kind in kinds for array_ in arrays: if array_.dtype.kind not in kinds: return False return True def _prod(iterable): """ Product of a list of numbers. Faster than np.prod for short lists like array shapes. """ product = 1 for x in iterable: product *= x return product def _conv_ops(x_shape, h_shape, mode): """ Find the number of operations required for direct/fft methods of convolution. The direct operations were recorded by making a dummy class to record the number of operations by overriding ``__mul__`` and ``__add__``. The FFT operations rely on the (well-known) computational complexity of the FFT (and the implementation of ``_freq_domain_conv``). """ if mode == "full": out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)] elif mode == "valid": out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)] elif mode == "same": out_shape = x_shape else: raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full', not mode={}".format(mode)) s1, s2 = x_shape, h_shape if len(x_shape) == 1: s1, s2 = s1[0], s2[0] if mode == "full": direct_ops = s1 * s2 elif mode == "valid": direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2 elif mode == "same": direct_ops = s1 * s2 if s1 < s2 else s1 * s2 - (s2 // 2) * ((s2 + 1) // 2) else: if mode == "full": direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape) elif mode == "valid": direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape) elif mode == "same": direct_ops = _prod(s1) * _prod(s2) full_out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)] N = _prod(full_out_shape) fft_ops = 3 * N * np.log(N) # 3 separate FFTs of size full_out_shape return fft_ops, direct_ops def _fftconv_faster(x, h, mode): """ See if using fftconvolve or convolve is faster. Parameters ---------- x : np.ndarray Signal h : np.ndarray Kernel mode : str Mode passed to convolve Returns ------- fft_faster : bool Notes ----- See docstring of `choose_conv_method` for details on tuning hardware. See pull request 11031 for more detail: https://github.com/scipy/scipy/pull/11031. """ fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode) offset = -1e-3 if x.ndim == 1 else -1e-4 constants = { "valid": (1.89095737e-9, 2.1364985e-10, offset), "full": (1.7649070e-9, 2.1414831e-10, offset), "same": (3.2646654e-9, 2.8478277e-10, offset) if h.size <= x.size else (3.21635404e-9, 1.1773253e-8, -1e-5), } if x.ndim == 1 else { "valid": (1.85927e-9, 2.11242e-8, offset), "full": (1.99817e-9, 1.66174e-8, offset), "same": (2.04735e-9, 1.55367e-8, offset), } O_fft, O_direct, O_offset = constants[mode] return O_fft * fft_ops < O_direct * direct_ops + O_offset def _reverse_and_conj(x): """ Reverse array `x` in all dimensions and perform the complex conjugate """ reverse = (slice(None, None, -1),) * x.ndim return x[reverse].conj() def _np_conv_ok(volume, kernel, mode): """ See if numpy supports convolution of `volume` and `kernel` (i.e. both are 1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the size of the larger input, while SciPy's uses the size of the first input. Invalid mode strings will return False and be caught by the calling func. """ if volume.ndim == kernel.ndim == 1: if mode in ('full', 'valid'): return True elif mode == 'same': return volume.size >= kernel.size else: return False def _timeit_fast(stmt="pass", setup="pass", repeat=3): """ Returns the time the statement/function took, in seconds. Faster, less precise version of IPython's timeit. `stmt` can be a statement written as a string or a callable. Will do only 1 loop (like IPython's timeit) with no repetitions (unlike IPython) for very slow functions. For fast functions, only does enough loops to take 5 ms, which seems to produce similar results (on Windows at least), and avoids doing an extraneous cycle that isn't measured. """ timer = timeit.Timer(stmt, setup) # determine number of calls per rep so total time for 1 rep >= 5 ms x = 0 for p in range(0, 10): number = 10**p x = timer.timeit(number) # seconds if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one break if x > 1: # second # If it's macroscopic, don't bother with repetitions best = x else: number *= 10 r = timer.repeat(repeat, number) best = min(r) sec = best / number return sec def choose_conv_method(in1, in2, mode='full', measure=False): """ Find the fastest convolution/correlation method. This primarily exists to be called during the ``method='auto'`` option in `convolve` and `correlate`. It can also be used to determine the value of ``method`` for many different convolutions of the same dtype/shape. In addition, it supports timing the convolution to adapt the value of ``method`` to a particular set of inputs and/or hardware. Parameters ---------- in1 : array_like The first argument passed into the convolution function. in2 : array_like The second argument passed into the convolution function. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. measure : bool, optional If True, run and time the convolution of `in1` and `in2` with both methods and return the fastest. If False (default), predict the fastest method using precomputed values. Returns ------- method : str A string indicating which convolution method is fastest, either 'direct' or 'fft' times : dict, optional A dictionary containing the times (in seconds) needed for each method. This value is only returned if ``measure=True``. See Also -------- convolve correlate Notes ----- Generally, this method is 99% accurate for 2D signals and 85% accurate for 1D signals for randomly chosen input sizes. For precision, use ``measure=True`` to find the fastest method by timing the convolution. This can be used to avoid the minimal overhead of finding the fastest ``method`` later, or to adapt the value of ``method`` to a particular set of inputs. Experiments were run on an Amazon EC2 r5a.2xlarge machine to test this function. These experiments measured the ratio between the time required when using ``method='auto'`` and the time required for the fastest method (i.e., ``ratio = time_auto / min(time_fft, time_direct)``). In these experiments, we found: * There is a 95% chance of this ratio being less than 1.5 for 1D signals and a 99% chance of being less than 2.5 for 2D signals. * The ratio was always less than 2.5/5 for 1D/2D signals respectively. * This function is most inaccurate for 1D convolutions that take between 1 and 10 milliseconds with ``method='direct'``. A good proxy for this (at least in our experiments) is ``1e6 <= in1.size * in2.size <= 1e7``. The 2D results almost certainly generalize to 3D/4D/etc because the implementation is the same (the 1D implementation is different). All the numbers above are specific to the EC2 machine. However, we did find that this function generalizes fairly decently across hardware. The speed tests were of similar quality (and even slightly better) than the same tests performed on the machine to tune this function's numbers (a mid-2014 15-inch MacBook Pro with 16GB RAM and a 2.5GHz Intel i7 processor). There are cases when `fftconvolve` supports the inputs but this function returns `direct` (e.g., to protect against floating point integer precision). .. versionadded:: 0.19 Examples -------- Estimate the fastest method for a given input: >>> from scipy import signal >>> img = np.random.rand(32, 32) >>> filter = np.random.rand(8, 8) >>> method = signal.choose_conv_method(img, filter, mode='same') >>> method 'fft' This can then be applied to other arrays of the same dtype and shape: >>> img2 = np.random.rand(32, 32) >>> filter2 = np.random.rand(8, 8) >>> corr2 = signal.correlate(img2, filter2, mode='same', method=method) >>> conv2 = signal.convolve(img2, filter2, mode='same', method=method) The output of this function (``method``) works with `correlate` and `convolve`. """ volume = np.asarray(in1) kernel = np.asarray(in2) if measure: times = {} for method in ['fft', 'direct']: times[method] = _timeit_fast(lambda: convolve(volume, kernel, mode=mode, method=method)) chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct' return chosen_method, times # for integer input, # catch when more precision required than float provides (representing an # integer as float can lose precision in fftconvolve if larger than 2**52) if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]): max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max()) max_value *= int(min(volume.size, kernel.size)) if max_value > 2**np.finfo('float').nmant - 1: return 'direct' if _numeric_arrays([volume, kernel], kinds='b'): return 'direct' if _numeric_arrays([volume, kernel]): if _fftconv_faster(volume, kernel, mode): return 'fft' return 'direct' def convolve(in1, in2, mode='full', method='auto'): """ Convolve two N-dimensional arrays. Convolve `in1` and `in2`, with the output size determined by the `mode` argument. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. method : str {'auto', 'direct', 'fft'}, optional A string indicating which method to use to calculate the convolution. ``direct`` The convolution is determined directly from sums, the definition of convolution. ``fft`` The Fourier Transform is used to perform the convolution by calling `fftconvolve`. ``auto`` Automatically chooses direct or Fourier method based on an estimate of which is faster (default). See Notes for more detail. .. versionadded:: 0.19.0 Returns ------- convolve : array An N-dimensional array containing a subset of the discrete linear convolution of `in1` with `in2`. See Also -------- numpy.polymul : performs polynomial multiplication (same operation, but also accepts poly1d objects) choose_conv_method : chooses the fastest appropriate convolution method fftconvolve : Always uses the FFT method. oaconvolve : Uses the overlap-add method to do convolution, which is generally faster when the input arrays are large and significantly different in size. Notes ----- By default, `convolve` and `correlate` use ``method='auto'``, which calls `choose_conv_method` to choose the fastest method using pre-computed values (`choose_conv_method` can also measure real-world timing with a keyword argument). Because `fftconvolve` relies on floating point numbers, there are certain constraints that may force `method=direct` (more detail in `choose_conv_method` docstring). Examples -------- Smooth a square pulse using a Hann window: >>> from scipy import signal >>> sig = np.repeat([0., 1., 0.], 100) >>> win = signal.hann(50) >>> filtered = signal.convolve(sig, win, mode='same') / sum(win) >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True) >>> ax_orig.plot(sig) >>> ax_orig.set_title('Original pulse') >>> ax_orig.margins(0, 0.1) >>> ax_win.plot(win) >>> ax_win.set_title('Filter impulse response') >>> ax_win.margins(0, 0.1) >>> ax_filt.plot(filtered) >>> ax_filt.set_title('Filtered signal') >>> ax_filt.margins(0, 0.1) >>> fig.tight_layout() >>> fig.show() """ volume = np.asarray(in1) kernel = np.asarray(in2) if volume.ndim == kernel.ndim == 0: return volume * kernel elif volume.ndim != kernel.ndim: raise ValueError("volume and kernel should have the same " "dimensionality") if _inputs_swap_needed(mode, volume.shape, kernel.shape): # Convolution is commutative; order doesn't have any effect on output volume, kernel = kernel, volume if method == 'auto': method = choose_conv_method(volume, kernel, mode=mode) if method == 'fft': out = fftconvolve(volume, kernel, mode=mode) result_type = np.result_type(volume, kernel) if result_type.kind in {'u', 'i'}: out = np.around(out) return out.astype(result_type) elif method == 'direct': # fastpath to faster numpy.convolve for 1d inputs when possible if _np_conv_ok(volume, kernel, mode): return np.convolve(volume, kernel, mode) return correlate(volume, _reverse_and_conj(kernel), mode, 'direct') else: raise ValueError("Acceptable method flags are 'auto'," " 'direct', or 'fft'.") def order_filter(a, domain, rank): """ Perform an order filter on an N-D array. Perform an order filter on the array in. The domain argument acts as a mask centered over each pixel. The non-zero elements of domain are used to select elements surrounding each input pixel which are placed in a list. The list is sorted, and the output for that pixel is the element corresponding to rank in the sorted list. Parameters ---------- a : ndarray The N-dimensional input array. domain : array_like A mask array with the same number of dimensions as `a`. Each dimension should have an odd number of elements. rank : int A non-negative integer which selects the element from the sorted list (0 corresponds to the smallest element, 1 is the next smallest element, etc.). Returns ------- out : ndarray The results of the order filter in an array with the same shape as `a`. Examples -------- >>> from scipy import signal >>> x = np.arange(25).reshape(5, 5) >>> domain = np.identity(3) >>> x array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) >>> signal.order_filter(x, domain, 0) array([[ 0., 0., 0., 0., 0.], [ 0., 0., 1., 2., 0.], [ 0., 5., 6., 7., 0.], [ 0., 10., 11., 12., 0.], [ 0., 0., 0., 0., 0.]]) >>> signal.order_filter(x, domain, 2) array([[ 6., 7., 8., 9., 4.], [ 11., 12., 13., 14., 9.], [ 16., 17., 18., 19., 14.], [ 21., 22., 23., 24., 19.], [ 20., 21., 22., 23., 24.]]) """ domain = np.asarray(domain) size = domain.shape for k in range(len(size)): if (size[k] % 2) != 1: raise ValueError("Each dimension of domain argument " " should have an odd number of elements.") return sigtools._order_filterND(a, domain, rank) def medfilt(volume, kernel_size=None): """ Perform a median filter on an N-dimensional array. Apply a median filter to the input array using a local window-size given by `kernel_size`. The array will automatically be zero-padded. Parameters ---------- volume : array_like An N-dimensional input array. kernel_size : array_like, optional A scalar or an N-length list giving the size of the median filter window in each dimension. Elements of `kernel_size` should be odd. If `kernel_size` is a scalar, then this scalar is used as the size in each dimension. Default size is 3 for each dimension. Returns ------- out : ndarray An array the same size as input containing the median filtered result. See also -------- scipy.ndimage.median_filter Notes ------- The more general function `scipy.ndimage.median_filter` has a more efficient implementation of a median filter and therefore runs much faster. """ volume = np.atleast_1d(volume) if kernel_size is None: kernel_size = [3] * volume.ndim kernel_size = np.asarray(kernel_size) if kernel_size.shape == (): kernel_size = np.repeat(kernel_size.item(), volume.ndim) for k in range(volume.ndim): if (kernel_size[k] % 2) != 1: raise ValueError("Each element of kernel_size should be odd.") domain = np.ones(kernel_size) numels = np.prod(kernel_size, axis=0) order = numels // 2 return sigtools._order_filterND(volume, domain, order) def wiener(im, mysize=None, noise=None): """ Perform a Wiener filter on an N-dimensional array. Apply a Wiener filter to the N-dimensional array `im`. Parameters ---------- im : ndarray An N-dimensional array. mysize : int or array_like, optional A scalar or an N-length list giving the size of the Wiener filter window in each dimension. Elements of mysize should be odd. If mysize is a scalar, then this scalar is used as the size in each dimension. noise : float, optional The noise-power to use. If None, then noise is estimated as the average of the local variance of the input. Returns ------- out : ndarray Wiener filtered result with the same shape as `im`. Examples -------- >>> from scipy.misc import face >>> from scipy.signal.signaltools import wiener >>> import matplotlib.pyplot as plt >>> import numpy as np >>> img = np.random.random((40, 40)) #Create a random image >>> filtered_img = wiener(img, (5, 5)) #Filter the image >>> f, (plot1, plot2) = plt.subplots(1, 2) >>> plot1.imshow(img) >>> plot2.imshow(filtered_img) >>> plt.show() Notes ----- This implementation is similar to wiener2 in Matlab/Octave. For more details see [1]_ References ---------- .. [1] Lim, Jae S., Two-Dimensional Signal and Image Processing, Englewood Cliffs, NJ, Prentice Hall, 1990, p. 548. """ im = np.asarray(im) if mysize is None: mysize = [3] * im.ndim mysize = np.asarray(mysize) if mysize.shape == (): mysize = np.repeat(mysize.item(), im.ndim) # Estimate the local mean lMean = correlate(im, np.ones(mysize), 'same') / np.prod(mysize, axis=0) # Estimate the local variance lVar = (correlate(im ** 2, np.ones(mysize), 'same') / np.prod(mysize, axis=0) - lMean ** 2) # Estimate the noise power if needed. if noise is None: noise = np.mean(np.ravel(lVar), axis=0) res = (im - lMean) res *= (1 - noise / lVar) res += lMean out = np.where(lVar < noise, lMean, res) return out def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0): """ Convolve two 2-dimensional arrays. Convolve `in1` and `in2` with output size determined by `mode`, and boundary conditions determined by `boundary` and `fillvalue`. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. boundary : str {'fill', 'wrap', 'symm'}, optional A flag indicating how to handle boundaries: ``fill`` pad input arrays with fillvalue. (default) ``wrap`` circular boundary conditions. ``symm`` symmetrical boundary conditions. fillvalue : scalar, optional Value to fill pad input arrays with. Default is 0. Returns ------- out : ndarray A 2-dimensional array containing a subset of the discrete linear convolution of `in1` with `in2`. Examples -------- Compute the gradient of an image by 2D convolution with a complex Scharr operator. (Horizontal operator is real, vertical is imaginary.) Use symmetric boundary condition to avoid creating edges at the image boundaries. >>> from scipy import signal >>> from scipy import misc >>> ascent = misc.ascent() >>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j], ... [-10+0j, 0+ 0j, +10 +0j], ... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy >>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same') >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15)) >>> ax_orig.imshow(ascent, cmap='gray') >>> ax_orig.set_title('Original') >>> ax_orig.set_axis_off() >>> ax_mag.imshow(np.absolute(grad), cmap='gray') >>> ax_mag.set_title('Gradient magnitude') >>> ax_mag.set_axis_off() >>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles >>> ax_ang.set_title('Gradient orientation') >>> ax_ang.set_axis_off() >>> fig.show() """ in1 = np.asarray(in1) in2 = np.asarray(in2) if not in1.ndim == in2.ndim == 2: raise ValueError('convolve2d inputs must both be 2-D arrays') if _inputs_swap_needed(mode, in1.shape, in2.shape): in1, in2 = in2, in1 val = _valfrommode(mode) bval = _bvalfromboundary(boundary) out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue) return out def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0): """ Cross-correlate two 2-dimensional arrays. Cross correlate `in1` and `in2` with output size determined by `mode`, and boundary conditions determined by `boundary` and `fillvalue`. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear cross-correlation of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. boundary : str {'fill', 'wrap', 'symm'}, optional A flag indicating how to handle boundaries: ``fill`` pad input arrays with fillvalue. (default) ``wrap`` circular boundary conditions. ``symm`` symmetrical boundary conditions. fillvalue : scalar, optional Value to fill pad input arrays with. Default is 0. Returns ------- correlate2d : ndarray A 2-dimensional array containing a subset of the discrete linear cross-correlation of `in1` with `in2`. Notes ----- When using "same" mode with even-length inputs, the outputs of `correlate` and `correlate2d` differ: There is a 1-index offset between them. Examples -------- Use 2D cross-correlation to find the location of a template in a noisy image: >>> from scipy import signal >>> from scipy import misc >>> face = misc.face(gray=True) - misc.face(gray=True).mean() >>> template = np.copy(face[300:365, 670:750]) # right eye >>> template -= template.mean() >>> face = face + np.random.randn(*face.shape) * 50 # add noise >>> corr = signal.correlate2d(face, template, boundary='symm', mode='same') >>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1, ... figsize=(6, 15)) >>> ax_orig.imshow(face, cmap='gray') >>> ax_orig.set_title('Original') >>> ax_orig.set_axis_off() >>> ax_template.imshow(template, cmap='gray') >>> ax_template.set_title('Template') >>> ax_template.set_axis_off() >>> ax_corr.imshow(corr, cmap='gray') >>> ax_corr.set_title('Cross-correlation') >>> ax_corr.set_axis_off() >>> ax_orig.plot(x, y, 'ro') >>> fig.show() """ in1 = np.asarray(in1) in2 = np.asarray(in2) if not in1.ndim == in2.ndim == 2: raise ValueError('correlate2d inputs must both be 2-D arrays') swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape) if swapped_inputs: in1, in2 = in2, in1 val = _valfrommode(mode) bval = _bvalfromboundary(boundary) out = sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue) if swapped_inputs: out = out[::-1, ::-1] return out def medfilt2d(input, kernel_size=3): """ Median filter a 2-dimensional array. Apply a median filter to the `input` array using a local window-size given by `kernel_size` (must be odd). The array is zero-padded automatically. Parameters ---------- input : array_like A 2-dimensional input array. kernel_size : array_like, optional A scalar or a list of length 2, giving the size of the median filter window in each dimension. Elements of `kernel_size` should be odd. If `kernel_size` is a scalar, then this scalar is used as the size in each dimension. Default is a kernel of size (3, 3). Returns ------- out : ndarray An array the same size as input containing the median filtered result. See also -------- scipy.ndimage.median_filter Notes ------- The more general function `scipy.ndimage.median_filter` has a more efficient implementation of a median filter and therefore runs much faster. """ image = np.asarray(input) if kernel_size is None: kernel_size = [3] * 2 kernel_size = np.asarray(kernel_size) if kernel_size.shape == (): kernel_size = np.repeat(kernel_size.item(), 2) for size in kernel_size: if (size % 2) != 1: raise ValueError("Each element of kernel_size should be odd.") return sigtools._medfilt2d(image, kernel_size) def lfilter(b, a, x, axis=-1, zi=None): """ Filter data along one-dimension with an IIR or FIR filter. Filter a data sequence, `x`, using a digital filter. This works for many fundamental data types (including Object type). The filter is a direct form II transposed implementation of the standard difference equation (see Notes). The function `sosfilt` (and filter design using ``output='sos'``) should be preferred over `lfilter` for most filtering tasks, as second-order sections have fewer numerical problems. Parameters ---------- b : array_like The numerator coefficient vector in a 1-D sequence. a : array_like The denominator coefficient vector in a 1-D sequence. If ``a[0]`` is not 1, then both `a` and `b` are normalized by ``a[0]``. x : array_like An N-dimensional input array. axis : int, optional The axis of the input data array along which to apply the linear filter. The filter is applied to each subarray along this axis. Default is -1. zi : array_like, optional Initial conditions for the filter delays. It is a vector (or array of vectors for an N-dimensional input) of length ``max(len(a), len(b)) - 1``. If `zi` is None or is not given then initial rest is assumed. See `lfiltic` for more information. Returns ------- y : array The output of the digital filter. zf : array, optional If `zi` is None, this is not returned, otherwise, `zf` holds the final filter delay values. See Also -------- lfiltic : Construct initial conditions for `lfilter`. lfilter_zi : Compute initial state (steady state of step response) for `lfilter`. filtfilt : A forward-backward filter, to obtain a filter with linear phase. savgol_filter : A Savitzky-Golay filter. sosfilt: Filter data using cascaded second-order sections. sosfiltfilt: A forward-backward filter using second-order sections. Notes ----- The filter function is implemented as a direct II transposed structure. This means that the filter implements:: a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M] - a[1]*y[n-1] - ... - a[N]*y[n-N] where `M` is the degree of the numerator, `N` is the degree of the denominator, and `n` is the sample number. It is implemented using the following difference equations (assuming M = N):: a[0]*y[n] = b[0] * x[n] + d[0][n-1] d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1] d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1] ... d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1] d[N-1][n] = b[N] * x[n] - a[N] * y[n] where `d` are the state variables. The rational transfer function describing this filter in the z-transform domain is:: -1 -M b[0] + b[1]z + ... + b[M] z Y(z) = -------------------------------- X(z) -1 -N a[0] + a[1]z + ... + a[N] z Examples -------- Generate a noisy signal to be filtered: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(-1, 1, 201) >>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) + ... 0.1*np.sin(2*np.pi*1.25*t + 1) + ... 0.18*np.cos(2*np.pi*3.85*t)) >>> xn = x + np.random.randn(len(t)) * 0.08 Create an order 3 lowpass butterworth filter: >>> b, a = signal.butter(3, 0.05) Apply the filter to xn. Use lfilter_zi to choose the initial condition of the filter: >>> zi = signal.lfilter_zi(b, a) >>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0]) Apply the filter again, to have a result filtered at an order the same as filtfilt: >>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0]) Use filtfilt to apply the filter: >>> y = signal.filtfilt(b, a, xn) Plot the original signal and the various filtered versions: >>> plt.figure >>> plt.plot(t, xn, 'b', alpha=0.75) >>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k') >>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice', ... 'filtfilt'), loc='best') >>> plt.grid(True) >>> plt.show() """ a = np.atleast_1d(a) if len(a) == 1: # This path only supports types fdgFDGO to mirror _linear_filter below. # Any of b, a, x, or zi can set the dtype, but there is no default # casting of other types; instead a NotImplementedError is raised. b = np.asarray(b) a = np.asarray(a) if b.ndim != 1 and a.ndim != 1: raise ValueError('object of too small depth for desired array') x = _validate_x(x) inputs = [b, a, x] if zi is not None: # _linear_filter does not broadcast zi, but does do expansion of # singleton dims. zi = np.asarray(zi) if zi.ndim != x.ndim: raise ValueError('object of too small depth for desired array') expected_shape = list(x.shape) expected_shape[axis] = b.shape[0] - 1 expected_shape = tuple(expected_shape) # check the trivial case where zi is the right shape first if zi.shape != expected_shape: strides = zi.ndim * [None] if axis < 0: axis += zi.ndim for k in range(zi.ndim): if k == axis and zi.shape[k] == expected_shape[k]: strides[k] = zi.strides[k] elif k != axis and zi.shape[k] == expected_shape[k]: strides[k] = zi.strides[k] elif k != axis and zi.shape[k] == 1: strides[k] = 0 else: raise ValueError('Unexpected shape for zi: expected ' '%s, found %s.' % (expected_shape, zi.shape)) zi = np.lib.stride_tricks.as_strided(zi, expected_shape, strides) inputs.append(zi) dtype = np.result_type(*inputs) if dtype.char not in 'fdgFDGO': raise NotImplementedError("input type '%s' not supported" % dtype) b = np.array(b, dtype=dtype) a = np.array(a, dtype=dtype, copy=False) b /= a[0] x = np.array(x, dtype=dtype, copy=False) out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x) ind = out_full.ndim * [slice(None)] if zi is not None: ind[axis] = slice(zi.shape[axis]) out_full[tuple(ind)] += zi ind[axis] = slice(out_full.shape[axis] - len(b) + 1) out = out_full[tuple(ind)] if zi is None: return out else: ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None) zf = out_full[tuple(ind)] return out, zf else: if zi is None: return sigtools._linear_filter(b, a, x, axis) else: return sigtools._linear_filter(b, a, x, axis, zi) def lfiltic(b, a, y, x=None): """ Construct initial conditions for lfilter given input and output vectors. Given a linear filter (b, a) and initial conditions on the output `y` and the input `x`, return the initial conditions on the state vector zi which is used by `lfilter` to generate the output given the input. Parameters ---------- b : array_like Linear filter term. a : array_like Linear filter term. y : array_like Initial conditions. If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``. If `y` is too short, it is padded with zeros. x : array_like, optional Initial conditions. If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``. If `x` is not given, its initial conditions are assumed zero. If `x` is too short, it is padded with zeros. Returns ------- zi : ndarray The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, where ``K = max(M, N)``. See Also -------- lfilter, lfilter_zi """ N = np.size(a) - 1 M = np.size(b) - 1 K = max(M, N) y = np.asarray(y) if y.dtype.kind in 'bui': # ensure calculations are floating point y = y.astype(np.float64) zi = np.zeros(K, y.dtype) if x is None: x = np.zeros(M, y.dtype) else: x = np.asarray(x) L = np.size(x) if L < M: x = np.r_[x, np.zeros(M - L)] L = np.size(y) if L < N: y = np.r_[y, np.zeros(N - L)] for m in range(M): zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0) for m in range(N): zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0) return zi def deconvolve(signal, divisor): """Deconvolves ``divisor`` out of ``signal`` using inverse filtering. Returns the quotient and remainder such that ``signal = convolve(divisor, quotient) + remainder`` Parameters ---------- signal : array_like Signal data, typically a recorded signal divisor : array_like Divisor data, typically an impulse response or filter that was applied to the original signal Returns ------- quotient : ndarray Quotient, typically the recovered original signal remainder : ndarray Remainder Examples -------- Deconvolve a signal that's been filtered: >>> from scipy import signal >>> original = [0, 1, 0, 0, 1, 1, 0, 0] >>> impulse_response = [2, 1] >>> recorded = signal.convolve(impulse_response, original) >>> recorded array([0, 2, 1, 0, 2, 3, 1, 0, 0]) >>> recovered, remainder = signal.deconvolve(recorded, impulse_response) >>> recovered array([ 0., 1., 0., 0., 1., 1., 0., 0.]) See Also -------- numpy.polydiv : performs polynomial division (same operation, but also accepts poly1d objects) """ num = np.atleast_1d(signal) den = np.atleast_1d(divisor) N = len(num) D = len(den) if D > N: quot = [] rem = num else: input = np.zeros(N - D + 1, float) input[0] = 1 quot = lfilter(num, den, input) rem = num - convolve(den, quot, mode='full') return quot, rem def hilbert(x, N=None, axis=-1): """ Compute the analytic signal, using the Hilbert transform. The transformation is done along the last axis by default. Parameters ---------- x : array_like Signal data. Must be real. N : int, optional Number of Fourier components. Default: ``x.shape[axis]`` axis : int, optional Axis along which to do the transformation. Default: -1. Returns ------- xa : ndarray Analytic signal of `x`, of each 1-D array along `axis` Notes ----- The analytic signal ``x_a(t)`` of signal ``x(t)`` is: .. math:: x_a = F^{-1}(F(x) 2U) = x + i y where `F` is the Fourier transform, `U` the unit step function, and `y` the Hilbert transform of `x`. [1]_ In other words, the negative half of the frequency spectrum is zeroed out, turning the real-valued signal into a complex signal. The Hilbert transformed signal can be obtained from ``np.imag(hilbert(x))``, and the original signal from ``np.real(hilbert(x))``. Examples --------- In this example we use the Hilbert transform to determine the amplitude envelope and instantaneous frequency of an amplitude-modulated signal. >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import hilbert, chirp >>> duration = 1.0 >>> fs = 400.0 >>> samples = int(fs*duration) >>> t = np.arange(samples) / fs We create a chirp of which the frequency increases from 20 Hz to 100 Hz and apply an amplitude modulation. >>> signal = chirp(t, 20.0, t[-1], 100.0) >>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) ) The amplitude envelope is given by magnitude of the analytic signal. The instantaneous frequency can be obtained by differentiating the instantaneous phase in respect to time. The instantaneous phase corresponds to the phase angle of the analytic signal. >>> analytic_signal = hilbert(signal) >>> amplitude_envelope = np.abs(analytic_signal) >>> instantaneous_phase = np.unwrap(np.angle(analytic_signal)) >>> instantaneous_frequency = (np.diff(instantaneous_phase) / ... (2.0*np.pi) * fs) >>> fig = plt.figure() >>> ax0 = fig.add_subplot(211) >>> ax0.plot(t, signal, label='signal') >>> ax0.plot(t, amplitude_envelope, label='envelope') >>> ax0.set_xlabel("time in seconds") >>> ax0.legend() >>> ax1 = fig.add_subplot(212) >>> ax1.plot(t[1:], instantaneous_frequency) >>> ax1.set_xlabel("time in seconds") >>> ax1.set_ylim(0.0, 120.0) References ---------- .. [1] Wikipedia, "Analytic signal". https://en.wikipedia.org/wiki/Analytic_signal .. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2. .. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal Processing, Third Edition, 2009. Chapter 12. ISBN 13: 978-1292-02572-8 """ x = np.asarray(x) if np.iscomplexobj(x): raise ValueError("x must be real.") if N is None: N = x.shape[axis] if N <= 0: raise ValueError("N must be positive.") Xf = sp_fft.fft(x, N, axis=axis) h = np.zeros(N) if N % 2 == 0: h[0] = h[N // 2] = 1 h[1:N // 2] = 2 else: h[0] = 1 h[1:(N + 1) // 2] = 2 if x.ndim > 1: ind = [np.newaxis] * x.ndim ind[axis] = slice(None) h = h[tuple(ind)] x = sp_fft.ifft(Xf * h, axis=axis) return x def hilbert2(x, N=None): """ Compute the '2-D' analytic signal of `x` Parameters ---------- x : array_like 2-D signal data. N : int or tuple of two ints, optional Number of Fourier components. Default is ``x.shape`` Returns ------- xa : ndarray Analytic signal of `x` taken along axes (0,1). References ---------- .. [1] Wikipedia, "Analytic signal", https://en.wikipedia.org/wiki/Analytic_signal """ x = np.atleast_2d(x) if x.ndim > 2: raise ValueError("x must be 2-D.") if np.iscomplexobj(x): raise ValueError("x must be real.") if N is None: N = x.shape elif isinstance(N, int): if N <= 0: raise ValueError("N must be positive.") N = (N, N) elif len(N) != 2 or np.any(np.asarray(N) <= 0): raise ValueError("When given as a tuple, N must hold exactly " "two positive integers") Xf = sp_fft.fft2(x, N, axes=(0, 1)) h1 = np.zeros(N[0], 'd') h2 = np.zeros(N[1], 'd') for p in range(2): h = eval("h%d" % (p + 1)) N1 = N[p] if N1 % 2 == 0: h[0] = h[N1 // 2] = 1 h[1:N1 // 2] = 2 else: h[0] = 1 h[1:(N1 + 1) // 2] = 2 exec("h%d = h" % (p + 1), globals(), locals()) h = h1[:, np.newaxis] * h2[np.newaxis, :] k = x.ndim while k > 2: h = h[:, np.newaxis] k -= 1 x = sp_fft.ifft2(Xf * h, axes=(0, 1)) return x def cmplx_sort(p): """Sort roots based on magnitude. Parameters ---------- p : array_like The roots to sort, as a 1-D array. Returns ------- p_sorted : ndarray Sorted roots. indx : ndarray Array of indices needed to sort the input `p`. Examples -------- >>> from scipy import signal >>> vals = [1, 4, 1+1.j, 3] >>> p_sorted, indx = signal.cmplx_sort(vals) >>> p_sorted array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j]) >>> indx array([0, 2, 3, 1]) """ p = np.asarray(p) indx = np.argsort(abs(p)) return np.take(p, indx, 0), indx def unique_roots(p, tol=1e-3, rtype='min'): """Determine unique roots and their multiplicities from a list of roots. Parameters ---------- p : array_like The list of roots. tol : float, optional The tolerance for two roots to be considered equal in terms of the distance between them. Default is 1e-3. Refer to Notes about the details on roots grouping. rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional How to determine the returned root if multiple roots are within `tol` of each other. - 'max', 'maximum': pick the maximum of those roots - 'min', 'minimum': pick the minimum of those roots - 'avg', 'mean': take the average of those roots When finding minimum or maximum among complex roots they are compared first by the real part and then by the imaginary part. Returns ------- unique : ndarray The list of unique roots. multiplicity : ndarray The multiplicity of each root. Notes ----- If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to ``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it doesn't necessarily mean that ``a`` is close to ``c``. It means that roots grouping is not unique. In this function we use "greedy" grouping going through the roots in the order they are given in the input `p`. This utility function is not specific to roots but can be used for any sequence of values for which uniqueness and multiplicity has to be determined. For a more general routine, see `numpy.unique`. Examples -------- >>> from scipy import signal >>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3] >>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg') Check which roots have multiplicity larger than 1: >>> uniq[mult > 1] array([ 1.305]) """ if rtype in ['max', 'maximum']: reduce = np.max elif rtype in ['min', 'minimum']: reduce = np.min elif rtype in ['avg', 'mean']: reduce = np.mean else: raise ValueError("`rtype` must be one of " "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}") p = np.asarray(p) points = np.empty((len(p), 2)) points[:, 0] = np.real(p) points[:, 1] = np.imag(p) tree = cKDTree(points) p_unique = [] p_multiplicity = [] used = np.zeros(len(p), dtype=bool) for i in range(len(p)): if used[i]: continue group = tree.query_ball_point(points[i], tol) group = [x for x in group if not used[x]] p_unique.append(reduce(p[group])) p_multiplicity.append(len(group)) used[group] = True return np.asarray(p_unique), np.asarray(p_multiplicity) def invres(r, p, k, tol=1e-3, rtype='avg'): """Compute b(s) and a(s) from partial fraction expansion. If `M` is the degree of numerator `b` and `N` the degree of denominator `a`:: b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] H(s) = ------ = ------------------------------------------ a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] then the partial-fraction expansion H(s) is defined as:: r[0] r[1] r[-1] = -------- + -------- + ... + --------- + k(s) (s-p[0]) (s-p[1]) (s-p[-1]) If there are any repeated roots (closer together than `tol`), then H(s) has terms like:: r[i] r[i+1] r[i+n-1] -------- + ----------- + ... + ----------- (s-p[i]) (s-p[i])**2 (s-p[i])**n This function is used for polynomials in positive powers of s or z, such as analog filters or digital filters in controls engineering. For negative powers of z (typical for digital filters in DSP), use `invresz`. Parameters ---------- r : array_like Residues corresponding to the poles. For repeated poles, the residues must be ordered to correspond to ascending by power fractions. p : array_like Poles. Equal poles must be adjacent. k : array_like Coefficients of the direct polynomial term. tol : float, optional The tolerance for two roots to be considered equal in terms of the distance between them. Default is 1e-3. See `unique_roots` for further details. rtype : {'avg', 'min', 'max'}, optional Method for computing a root to represent a group of identical roots. Default is 'avg'. See `unique_roots` for further details. Returns ------- b : ndarray Numerator polynomial coefficients. a : ndarray Denominator polynomial coefficients. See Also -------- residue, invresz, unique_roots """ r = np.atleast_1d(r) p = np.atleast_1d(p) k = np.trim_zeros(np.atleast_1d(k), 'f') unique_poles, multiplicity = _group_poles(p, tol, rtype) factors, denominator = _compute_factors(unique_poles, multiplicity, include_powers=True) if len(k) == 0: numerator = 0 else: numerator = np.polymul(k, denominator) for residue, factor in zip(r, factors): numerator = np.polyadd(numerator, residue * factor) return numerator, denominator def _compute_factors(roots, multiplicity, include_powers=False): """Compute the total polynomial divided by factors for each root.""" current = np.array([1]) suffixes = [current] for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]): monomial = np.array([1, -pole]) for _ in range(mult): current = np.polymul(current, monomial) suffixes.append(current) suffixes = suffixes[::-1] factors = [] current = np.array([1]) for pole, mult, suffix in zip(roots, multiplicity, suffixes): monomial = np.array([1, -pole]) block = [] for i in range(mult): if i == 0 or include_powers: block.append(np.polymul(current, suffix)) current = np.polymul(current, monomial) factors.extend(reversed(block)) return factors, current def _compute_residues(poles, multiplicity, numerator): denominator_factors, _ = _compute_factors(poles, multiplicity) numerator = numerator.astype(poles.dtype) residues = [] for pole, mult, factor in zip(poles, multiplicity, denominator_factors): if mult == 1: residues.append(np.polyval(numerator, pole) / np.polyval(factor, pole)) else: numer = numerator.copy() monomial = np.array([1, -pole]) factor, d = np.polydiv(factor, monomial) block = [] for _ in range(mult): numer, n = np.polydiv(numer, monomial) r = n[0] / d[0] numer = np.polysub(numer, r * factor) block.append(r) residues.extend(reversed(block)) return np.asarray(residues) def residue(b, a, tol=1e-3, rtype='avg'): """Compute partial-fraction expansion of b(s) / a(s). If `M` is the degree of numerator `b` and `N` the degree of denominator `a`:: b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] H(s) = ------ = ------------------------------------------ a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] then the partial-fraction expansion H(s) is defined as:: r[0] r[1] r[-1] = -------- + -------- + ... + --------- + k(s) (s-p[0]) (s-p[1]) (s-p[-1]) If there are any repeated roots (closer together than `tol`), then H(s) has terms like:: r[i] r[i+1] r[i+n-1] -------- + ----------- + ... + ----------- (s-p[i]) (s-p[i])**2 (s-p[i])**n This function is used for polynomials in positive powers of s or z, such as analog filters or digital filters in controls engineering. For negative powers of z (typical for digital filters in DSP), use `residuez`. See Notes for details about the algorithm. Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. tol : float, optional The tolerance for two roots to be considered equal in terms of the distance between them. Default is 1e-3. See `unique_roots` for further details. rtype : {'avg', 'min', 'max'}, optional Method for computing a root to represent a group of identical roots. Default is 'avg'. See `unique_roots` for further details. Returns ------- r : ndarray Residues corresponding to the poles. For repeated poles, the residues are ordered to correspond to ascending by power fractions. p : ndarray Poles ordered by magnitude in ascending order. k : ndarray Coefficients of the direct polynomial term. See Also -------- invres, residuez, numpy.poly, unique_roots Notes ----- The "deflation through subtraction" algorithm is used for computations --- method 6 in [1]_. The form of partial fraction expansion depends on poles multiplicity in the exact mathematical sense. However there is no way to exactly determine multiplicity of roots of a polynomial in numerical computing. Thus you should think of the result of `residue` with given `tol` as partial fraction expansion computed for the denominator composed of the computed poles with empirically determined multiplicity. The choice of `tol` can drastically change the result if there are close poles. References ---------- .. [1] J. F. Mahoney, B. D. Sivazlian, "Partial fractions expansion: a review of computational methodology and efficiency", Journal of Computational and Applied Mathematics, Vol. 9, 1983. """ b = np.asarray(b) a = np.asarray(a) if (np.issubdtype(b.dtype, np.complexfloating) or np.issubdtype(a.dtype, np.complexfloating)): b = b.astype(complex) a = a.astype(complex) else: b = b.astype(float) a = a.astype(float) b = np.trim_zeros(np.atleast_1d(b), 'f') a = np.trim_zeros(np.atleast_1d(a), 'f') if a.size == 0: raise ValueError("Denominator `a` is zero.") poles = np.roots(a) if b.size == 0: return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([]) if len(b) < len(a): k = np.empty(0) else: k, b = np.polydiv(b, a) unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype) unique_poles, order = cmplx_sort(unique_poles) multiplicity = multiplicity[order] residues = _compute_residues(unique_poles, multiplicity, b) index = 0 for pole, mult in zip(unique_poles, multiplicity): poles[index:index + mult] = pole index += mult return residues / a[0], poles, k def residuez(b, a, tol=1e-3, rtype='avg'): """Compute partial-fraction expansion of b(z) / a(z). If `M` is the degree of numerator `b` and `N` the degree of denominator `a`:: b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) H(z) = ------ = ------------------------------------------ a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) then the partial-fraction expansion H(z) is defined as:: r[0] r[-1] = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... (1-p[0]z**(-1)) (1-p[-1]z**(-1)) If there are any repeated roots (closer than `tol`), then the partial fraction expansion has terms like:: r[i] r[i+1] r[i+n-1] -------------- + ------------------ + ... + ------------------ (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n This function is used for polynomials in negative powers of z, such as digital filters in DSP. For positive powers, use `residue`. See Notes of `residue` for details about the algorithm. Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. tol : float, optional The tolerance for two roots to be considered equal in terms of the distance between them. Default is 1e-3. See `unique_roots` for further details. rtype : {'avg', 'min', 'max'}, optional Method for computing a root to represent a group of identical roots. Default is 'avg'. See `unique_roots` for further details. Returns ------- r : ndarray Residues corresponding to the poles. For repeated poles, the residues are ordered to correspond to ascending by power fractions. p : ndarray Poles ordered by magnitude in ascending order. k : ndarray Coefficients of the direct polynomial term. See Also -------- invresz, residue, unique_roots """ b = np.asarray(b) a = np.asarray(a) if (np.issubdtype(b.dtype, np.complexfloating) or np.issubdtype(a.dtype, np.complexfloating)): b = b.astype(complex) a = a.astype(complex) else: b = b.astype(float) a = a.astype(float) b = np.trim_zeros(np.atleast_1d(b), 'b') a = np.trim_zeros(np.atleast_1d(a), 'b') if a.size == 0: raise ValueError("Denominator `a` is zero.") elif a[0] == 0: raise ValueError("First coefficient of determinant `a` must be " "non-zero.") poles = np.roots(a) if b.size == 0: return np.zeros(poles.shape), cmplx_sort(poles)[0], np.array([]) b_rev = b[::-1] a_rev = a[::-1] if len(b_rev) < len(a_rev): k_rev = np.empty(0) else: k_rev, b_rev = np.polydiv(b_rev, a_rev) unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype) unique_poles, order = cmplx_sort(unique_poles) multiplicity = multiplicity[order] residues = _compute_residues(1 / unique_poles, multiplicity, b_rev) index = 0 powers = np.empty(len(residues), dtype=int) for pole, mult in zip(unique_poles, multiplicity): poles[index:index + mult] = pole powers[index:index + mult] = 1 + np.arange(mult) index += mult residues *= (-poles) ** powers / a_rev[0] return residues, poles, k_rev[::-1] def _group_poles(poles, tol, rtype): if rtype in ['max', 'maximum']: reduce = np.max elif rtype in ['min', 'minimum']: reduce = np.min elif rtype in ['avg', 'mean']: reduce = np.mean else: raise ValueError("`rtype` must be one of " "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}") unique = [] multiplicity = [] pole = poles[0] block = [pole] for i in range(1, len(poles)): if abs(poles[i] - pole) <= tol: block.append(pole) else: unique.append(reduce(block)) multiplicity.append(len(block)) pole = poles[i] block = [pole] unique.append(reduce(block)) multiplicity.append(len(block)) return np.asarray(unique), np.asarray(multiplicity) def invresz(r, p, k, tol=1e-3, rtype='avg'): """Compute b(z) and a(z) from partial fraction expansion. If `M` is the degree of numerator `b` and `N` the degree of denominator `a`:: b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) H(z) = ------ = ------------------------------------------ a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) then the partial-fraction expansion H(z) is defined as:: r[0] r[-1] = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... (1-p[0]z**(-1)) (1-p[-1]z**(-1)) If there are any repeated roots (closer than `tol`), then the partial fraction expansion has terms like:: r[i] r[i+1] r[i+n-1] -------------- + ------------------ + ... + ------------------ (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n This function is used for polynomials in negative powers of z, such as digital filters in DSP. For positive powers, use `invres`. Parameters ---------- r : array_like Residues corresponding to the poles. For repeated poles, the residues must be ordered to correspond to ascending by power fractions. p : array_like Poles. Equal poles must be adjacent. k : array_like Coefficients of the direct polynomial term. tol : float, optional The tolerance for two roots to be considered equal in terms of the distance between them. Default is 1e-3. See `unique_roots` for further details. rtype : {'avg', 'min', 'max'}, optional Method for computing a root to represent a group of identical roots. Default is 'avg'. See `unique_roots` for further details. Returns ------- b : ndarray Numerator polynomial coefficients. a : ndarray Denominator polynomial coefficients. See Also -------- residuez, unique_roots, invres """ r = np.atleast_1d(r) p = np.atleast_1d(p) k = np.trim_zeros(np.atleast_1d(k), 'b') unique_poles, multiplicity = _group_poles(p, tol, rtype) factors, denominator = _compute_factors(unique_poles, multiplicity, include_powers=True) if len(k) == 0: numerator = 0 else: numerator = np.polymul(k[::-1], denominator[::-1]) for residue, factor in zip(r, factors): numerator = np.polyadd(numerator, residue * factor[::-1]) return numerator[::-1], denominator def resample(x, num, t=None, axis=0, window=None): """ Resample `x` to `num` samples using Fourier method along the given axis. The resampled signal starts at the same value as `x` but is sampled with a spacing of ``len(x) / num * (spacing of x)``. Because a Fourier method is used, the signal is assumed to be periodic. Parameters ---------- x : array_like The data to be resampled. num : int The number of samples in the resampled signal. t : array_like, optional If `t` is given, it is assumed to be the equally spaced sample positions associated with the signal data in `x`. axis : int, optional The axis of `x` that is resampled. Default is 0. window : array_like, callable, string, float, or tuple, optional Specifies the window applied to the signal in the Fourier domain. See below for details. Returns ------- resampled_x or (resampled_x, resampled_t) Either the resampled array, or, if `t` was given, a tuple containing the resampled array and the corresponding resampled positions. See Also -------- decimate : Downsample the signal after applying an FIR or IIR filter. resample_poly : Resample using polyphase filtering and an FIR filter. Notes ----- The argument `window` controls a Fourier-domain window that tapers the Fourier spectrum before zero-padding to alleviate ringing in the resampled values for sampled signals you didn't intend to be interpreted as band-limited. If `window` is a function, then it is called with a vector of inputs indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ). If `window` is an array of the same length as `x.shape[axis]` it is assumed to be the window to be applied directly in the Fourier domain (with dc and low-frequency first). For any other type of `window`, the function `scipy.signal.get_window` is called to generate the window. The first sample of the returned vector is the same as the first sample of the input vector. The spacing between samples is changed from ``dx`` to ``dx * len(x) / num``. If `t` is not None, then it is used solely to calculate the resampled positions `resampled_t` As noted, `resample` uses FFT transformations, which can be very slow if the number of input or output samples is large and prime; see `scipy.fft.fft`. Examples -------- Note that the end of the resampled data rises to meet the first sample of the next cycle: >>> from scipy import signal >>> x = np.linspace(0, 10, 20, endpoint=False) >>> y = np.cos(-x**2/6.0) >>> f = signal.resample(y, 100) >>> xnew = np.linspace(0, 10, 100, endpoint=False) >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro') >>> plt.legend(['data', 'resampled'], loc='best') >>> plt.show() """ x = np.asarray(x) Nx = x.shape[axis] # Check if we can use faster real FFT real_input = np.isrealobj(x) # Forward transform if real_input: X = sp_fft.rfft(x, axis=axis) else: # Full complex FFT X = sp_fft.fft(x, axis=axis) # Apply window to spectrum if window is not None: if callable(window): W = window(sp_fft.fftfreq(Nx)) elif isinstance(window, np.ndarray): if window.shape != (Nx,): raise ValueError('window must have the same length as data') W = window else: W = sp_fft.ifftshift(get_window(window, Nx)) newshape_W = [1] * x.ndim newshape_W[axis] = X.shape[axis] if real_input: # Fold the window back on itself to mimic complex behavior W_real = W.copy() W_real[1:] += W_real[-1:0:-1] W_real[1:] *= 0.5 X *= W_real[:newshape_W[axis]].reshape(newshape_W) else: X *= W.reshape(newshape_W) # Copy each half of the original spectrum to the output spectrum, either # truncating high frequences (downsampling) or zero-padding them # (upsampling) # Placeholder array for output spectrum newshape = list(x.shape) if real_input: newshape[axis] = num // 2 + 1 else: newshape[axis] = num Y = np.zeros(newshape, X.dtype) # Copy positive frequency components (and Nyquist, if present) N = min(num, Nx) nyq = N // 2 + 1 # Slice index that includes Nyquist if present sl = [slice(None)] * x.ndim sl[axis] = slice(0, nyq) Y[tuple(sl)] = X[tuple(sl)] if not real_input: # Copy negative frequency components if N > 2: # (slice expression doesn't collapse to empty array) sl[axis] = slice(nyq - N, None) Y[tuple(sl)] = X[tuple(sl)] # Split/join Nyquist component(s) if present # So far we have set Y[+N/2]=X[+N/2] if N % 2 == 0: if num < Nx: # downsampling if real_input: sl[axis] = slice(N//2, N//2 + 1) Y[tuple(sl)] *= 2. else: # select the component of Y at frequency +N/2, # add the component of X at -N/2 sl[axis] = slice(-N//2, -N//2 + 1) Y[tuple(sl)] += X[tuple(sl)] elif Nx < num: # upsampling # select the component at frequency +N/2 and halve it sl[axis] = slice(N//2, N//2 + 1) Y[tuple(sl)] *= 0.5 if not real_input: temp = Y[tuple(sl)] # set the component at -N/2 equal to the component at +N/2 sl[axis] = slice(num-N//2, num-N//2 + 1) Y[tuple(sl)] = temp # Inverse transform if real_input: y = sp_fft.irfft(Y, num, axis=axis) else: y = sp_fft.ifft(Y, axis=axis, overwrite_x=True) y *= (float(num) / float(Nx)) if t is None: return y else: new_t = np.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0] return y, new_t def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0), padtype='constant', cval=None): """ Resample `x` along the given axis using polyphase filtering. The signal `x` is upsampled by the factor `up`, a zero-phase low-pass FIR filter is applied, and then it is downsampled by the factor `down`. The resulting sample rate is ``up / down`` times the original sample rate. By default, values beyond the boundary of the signal are assumed to be zero during the filtering step. Parameters ---------- x : array_like The data to be resampled. up : int The upsampling factor. down : int The downsampling factor. axis : int, optional The axis of `x` that is resampled. Default is 0. window : string, tuple, or array_like, optional Desired window to use to design the low-pass filter, or the FIR filter coefficients to employ. See below for details. padtype : string, optional `constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of the other signal extension modes supported by `scipy.signal.upfirdn`. Changes assumptions on values beyond the boundary. If `constant`, assumed to be `cval` (default zero). If `line` assumed to continue a linear trend defined by the first and last points. `mean`, `median`, `maximum` and `minimum` work as in `np.pad` and assume that the values beyond the boundary are the mean, median, maximum or minimum respectively of the array along the axis. .. versionadded:: 1.4.0 cval : float, optional Value to use if `padtype='constant'`. Default is zero. .. versionadded:: 1.4.0 Returns ------- resampled_x : array The resampled array. See Also -------- decimate : Downsample the signal after applying an FIR or IIR filter. resample : Resample up or down using the FFT method. Notes ----- This polyphase method will likely be faster than the Fourier method in `scipy.signal.resample` when the number of samples is large and prime, or when the number of samples is large and `up` and `down` share a large greatest common denominator. The length of the FIR filter used will depend on ``max(up, down) // gcd(up, down)``, and the number of operations during polyphase filtering will depend on the filter length and `down` (see `scipy.signal.upfirdn` for details). The argument `window` specifies the FIR low-pass filter design. If `window` is an array_like it is assumed to be the FIR filter coefficients. Note that the FIR filter is applied after the upsampling step, so it should be designed to operate on a signal at a sampling frequency higher than the original by a factor of `up//gcd(up, down)`. This function's output will be centered with respect to this array, so it is best to pass a symmetric filter with an odd number of samples if, as is usually the case, a zero-phase filter is desired. For any other type of `window`, the functions `scipy.signal.get_window` and `scipy.signal.firwin` are called to generate the appropriate filter coefficients. The first sample of the returned vector is the same as the first sample of the input vector. The spacing between samples is changed from ``dx`` to ``dx * down / float(up)``. Examples -------- By default, the end of the resampled data rises to meet the first sample of the next cycle for the FFT method, and gets closer to zero for the polyphase method: >>> from scipy import signal >>> x = np.linspace(0, 10, 20, endpoint=False) >>> y = np.cos(-x**2/6.0) >>> f_fft = signal.resample(y, 100) >>> f_poly = signal.resample_poly(y, 100, 20) >>> xnew = np.linspace(0, 10, 100, endpoint=False) >>> import matplotlib.pyplot as plt >>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-') >>> plt.plot(x, y, 'ko-') >>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best') >>> plt.show() This default behaviour can be changed by using the padtype option: >>> import numpy as np >>> from scipy import signal >>> N = 5 >>> x = np.linspace(0, 1, N, endpoint=False) >>> y = 2 + x**2 - 1.7*np.sin(x) + .2*np.cos(11*x) >>> y2 = 1 + x**3 + 0.1*np.sin(x) + .1*np.cos(11*x) >>> Y = np.stack([y, y2], axis=-1) >>> up = 4 >>> xr = np.linspace(0, 1, N*up, endpoint=False) >>> y2 = signal.resample_poly(Y, up, 1, padtype='constant') >>> y3 = signal.resample_poly(Y, up, 1, padtype='mean') >>> y4 = signal.resample_poly(Y, up, 1, padtype='line') >>> import matplotlib.pyplot as plt >>> for i in [0,1]: ... plt.figure() ... plt.plot(xr, y4[:,i], 'g.', label='line') ... plt.plot(xr, y3[:,i], 'y.', label='mean') ... plt.plot(xr, y2[:,i], 'r.', label='constant') ... plt.plot(x, Y[:,i], 'k-') ... plt.legend() >>> plt.show() """ x = np.asarray(x) if up != int(up): raise ValueError("up must be an integer") if down != int(down): raise ValueError("down must be an integer") up = int(up) down = int(down) if up < 1 or down < 1: raise ValueError('up and down must be >= 1') if cval is not None and padtype != 'constant': raise ValueError('cval has no effect when padtype is ', padtype) # Determine our up and down factors # Use a rational approximation to save computation time on really long # signals g_ = math.gcd(up, down) up //= g_ down //= g_ if up == down == 1: return x.copy() n_in = x.shape[axis] n_out = n_in * up n_out = n_out // down + bool(n_out % down) if isinstance(window, (list, np.ndarray)): window = np.array(window) # use array to force a copy (we modify it) if window.ndim > 1: raise ValueError('window must be 1-D') half_len = (window.size - 1) // 2 h = window else: # Design a linear-phase low-pass FIR filter max_rate = max(up, down) f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist) half_len = 10 * max_rate # reasonable cutoff for our sinc-like function h = firwin(2 * half_len + 1, f_c, window=window) h *= up # Zero-pad our filter to put the output samples at the center n_pre_pad = (down - half_len % down) n_post_pad = 0 n_pre_remove = (half_len + n_pre_pad) // down # We should rarely need to do this given our filter lengths... while _output_len(len(h) + n_pre_pad + n_post_pad, n_in, up, down) < n_out + n_pre_remove: n_post_pad += 1 h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h, np.zeros(n_post_pad, dtype=h.dtype))) n_pre_remove_end = n_pre_remove + n_out # Remove background depending on the padtype option funcs = {'mean': np.mean, 'median': np.median, 'minimum': np.amin, 'maximum': np.amax} upfirdn_kwargs = {'mode': 'constant', 'cval': 0} if padtype in funcs: background_values = funcs[padtype](x, axis=axis, keepdims=True) elif padtype in _upfirdn_modes: upfirdn_kwargs = {'mode': padtype} if padtype == 'constant': if cval is None: cval = 0 upfirdn_kwargs['cval'] = cval else: raise ValueError( 'padtype must be one of: maximum, mean, median, minimum, ' + ', '.join(_upfirdn_modes)) if padtype in funcs: x = x - background_values # filter then remove excess y = upfirdn(h, x, up, down, axis=axis, **upfirdn_kwargs) keep = [slice(None), ]*x.ndim keep[axis] = slice(n_pre_remove, n_pre_remove_end) y_keep = y[tuple(keep)] # Add background back if padtype in funcs: y_keep += background_values return y_keep def vectorstrength(events, period): ''' Determine the vector strength of the events corresponding to the given period. The vector strength is a measure of phase synchrony, how well the timing of the events is synchronized to a single period of a periodic signal. If multiple periods are used, calculate the vector strength of each. This is called the "resonating vector strength". Parameters ---------- events : 1D array_like An array of time points containing the timing of the events. period : float or array_like The period of the signal that the events should synchronize to. The period is in the same units as `events`. It can also be an array of periods, in which case the outputs are arrays of the same length. Returns ------- strength : float or 1D array The strength of the synchronization. 1.0 is perfect synchronization and 0.0 is no synchronization. If `period` is an array, this is also an array with each element containing the vector strength at the corresponding period. phase : float or array The phase that the events are most strongly synchronized to in radians. If `period` is an array, this is also an array with each element containing the phase for the corresponding period. References ---------- van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector strength: Auditory system, electric fish, and noise. Chaos 21, 047508 (2011); :doi:`10.1063/1.3670512`. van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises: biological and mathematical perspectives. Biol Cybern. 2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`. van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens when we vary the "probing" frequency while keeping the spike times fixed. Biol Cybern. 2013 Aug;107(4):491-94. :doi:`10.1007/s00422-013-0560-8`. ''' events = np.asarray(events) period = np.asarray(period) if events.ndim > 1: raise ValueError('events cannot have dimensions more than 1') if period.ndim > 1: raise ValueError('period cannot have dimensions more than 1') # we need to know later if period was originally a scalar scalarperiod = not period.ndim events = np.atleast_2d(events) period = np.atleast_2d(period) if (period <= 0).any(): raise ValueError('periods must be positive') # this converts the times to vectors vectors = np.exp(np.dot(2j*np.pi/period.T, events)) # the vector strength is just the magnitude of the mean of the vectors # the vector phase is the angle of the mean of the vectors vectormean = np.mean(vectors, axis=1) strength = abs(vectormean) phase = np.angle(vectormean) # if the original period was a scalar, return scalars if scalarperiod: strength = strength[0] phase = phase[0] return strength, phase def detrend(data, axis=-1, type='linear', bp=0, overwrite_data=False): """ Remove linear trend along axis from data. Parameters ---------- data : array_like The input data. axis : int, optional The axis along which to detrend the data. By default this is the last axis (-1). type : {'linear', 'constant'}, optional The type of detrending. If ``type == 'linear'`` (default), the result of a linear least-squares fit to `data` is subtracted from `data`. If ``type == 'constant'``, only the mean of `data` is subtracted. bp : array_like of ints, optional A sequence of break points. If given, an individual linear fit is performed for each part of `data` between two break points. Break points are specified as indices into `data`. This parameter only has an effect when ``type == 'linear'``. overwrite_data : bool, optional If True, perform in place detrending and avoid a copy. Default is False Returns ------- ret : ndarray The detrended input data. Examples -------- >>> from scipy import signal >>> randgen = np.random.RandomState(9) >>> npoints = 1000 >>> noise = randgen.randn(npoints) >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise >>> (signal.detrend(x) - noise).max() < 0.01 True """ if type not in ['linear', 'l', 'constant', 'c']: raise ValueError("Trend type must be 'linear' or 'constant'.") data = np.asarray(data) dtype = data.dtype.char if dtype not in 'dfDF': dtype = 'd' if type in ['constant', 'c']: ret = data - np.expand_dims(np.mean(data, axis), axis) return ret else: dshape = data.shape N = dshape[axis] bp = np.sort(np.unique(np.r_[0, bp, N])) if np.any(bp > N): raise ValueError("Breakpoints must be less than length " "of data along given axis.") Nreg = len(bp) - 1 # Restructure data so that axis is along first dimension and # all other dimensions are collapsed into second dimension rnk = len(dshape) if axis < 0: axis = axis + rnk newdims = np.r_[axis, 0:axis, axis + 1:rnk] newdata = np.reshape(np.transpose(data, tuple(newdims)), (N, _prod(dshape) // N)) if not overwrite_data: newdata = newdata.copy() # make sure we have a copy if newdata.dtype.char not in 'dfDF': newdata = newdata.astype(dtype) # Find leastsq fit and remove it for each piece for m in range(Nreg): Npts = bp[m + 1] - bp[m] A = np.ones((Npts, 2), dtype) A[:, 0] = np.cast[dtype](np.arange(1, Npts + 1) * 1.0 / Npts) sl = slice(bp[m], bp[m + 1]) coef, resids, rank, s = linalg.lstsq(A, newdata[sl]) newdata[sl] = newdata[sl] - np.dot(A, coef) # Put data back in original shape. tdshape = np.take(dshape, newdims, 0) ret = np.reshape(newdata, tuple(tdshape)) vals = list(range(1, rnk)) olddims = vals[:axis] + [0] + vals[axis:] ret = np.transpose(ret, tuple(olddims)) return ret def lfilter_zi(b, a): """ Construct initial conditions for lfilter for step response steady-state. Compute an initial state `zi` for the `lfilter` function that corresponds to the steady state of the step response. A typical use of this function is to set the initial state so that the output of the filter starts at the same value as the first element of the signal to be filtered. Parameters ---------- b, a : array_like (1-D) The IIR filter coefficients. See `lfilter` for more information. Returns ------- zi : 1-D ndarray The initial state for the filter. See Also -------- lfilter, lfiltic, filtfilt Notes ----- A linear filter with order m has a state space representation (A, B, C, D), for which the output y of the filter can be expressed as:: z(n+1) = A*z(n) + B*x(n) y(n) = C*z(n) + D*x(n) where z(n) is a vector of length m, A has shape (m, m), B has shape (m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is a scalar). lfilter_zi solves:: zi = A*zi + B In other words, it finds the initial condition for which the response to an input of all ones is a constant. Given the filter coefficients `a` and `b`, the state space matrices for the transposed direct form II implementation of the linear filter, which is the implementation used by scipy.signal.lfilter, are:: A = scipy.linalg.companion(a).T B = b[1:] - a[1:]*b[0] assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first divided by a[0]. Examples -------- The following code creates a lowpass Butterworth filter. Then it applies that filter to an array whose values are all 1.0; the output is also all 1.0, as expected for a lowpass filter. If the `zi` argument of `lfilter` had not been given, the output would have shown the transient signal. >>> from numpy import array, ones >>> from scipy.signal import lfilter, lfilter_zi, butter >>> b, a = butter(5, 0.25) >>> zi = lfilter_zi(b, a) >>> y, zo = lfilter(b, a, ones(10), zi=zi) >>> y array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) Another example: >>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]) >>> y, zf = lfilter(b, a, x, zi=zi*x[0]) >>> y array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528, 0.44399389, 0.35505241]) Note that the `zi` argument to `lfilter` was computed using `lfilter_zi` and scaled by `x[0]`. Then the output `y` has no transient until the input drops from 0.5 to 0.0. """ # FIXME: Can this function be replaced with an appropriate # use of lfiltic? For example, when b,a = butter(N,Wn), # lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)). # # We could use scipy.signal.normalize, but it uses warnings in # cases where a ValueError is more appropriate, and it allows # b to be 2D. b = np.atleast_1d(b) if b.ndim != 1: raise ValueError("Numerator b must be 1-D.") a = np.atleast_1d(a) if a.ndim != 1: raise ValueError("Denominator a must be 1-D.") while len(a) > 1 and a[0] == 0.0: a = a[1:] if a.size < 1: raise ValueError("There must be at least one nonzero `a` coefficient.") if a[0] != 1.0: # Normalize the coefficients so a[0] == 1. b = b / a[0] a = a / a[0] n = max(len(a), len(b)) # Pad a or b with zeros so they are the same length. if len(a) < n: a = np.r_[a, np.zeros(n - len(a))] elif len(b) < n: b = np.r_[b, np.zeros(n - len(b))] IminusA = np.eye(n - 1) - linalg.companion(a).T B = b[1:] - a[1:] * b[0] # Solve zi = A*zi + B zi = np.linalg.solve(IminusA, B) # For future reference: we could also use the following # explicit formulas to solve the linear system: # # zi = np.zeros(n - 1) # zi[0] = B.sum() / IminusA[:,0].sum() # asum = 1.0 # csum = 0.0 # for k in range(1,n-1): # asum += a[k] # csum += b[k] - a[k]*b[0] # zi[k] = asum*zi[0] - csum return zi def sosfilt_zi(sos): """ Construct initial conditions for sosfilt for step response steady-state. Compute an initial state `zi` for the `sosfilt` function that corresponds to the steady state of the step response. A typical use of this function is to set the initial state so that the output of the filter starts at the same value as the first element of the signal to be filtered. Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. Returns ------- zi : ndarray Initial conditions suitable for use with ``sosfilt``, shape ``(n_sections, 2)``. See Also -------- sosfilt, zpk2sos Notes ----- .. versionadded:: 0.16.0 Examples -------- Filter a rectangular pulse that begins at time 0, with and without the use of the `zi` argument of `scipy.signal.sosfilt`. >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> sos = signal.butter(9, 0.125, output='sos') >>> zi = signal.sosfilt_zi(sos) >>> x = (np.arange(250) < 100).astype(int) >>> f1 = signal.sosfilt(sos, x) >>> f2, zo = signal.sosfilt(sos, x, zi=zi) >>> plt.plot(x, 'k--', label='x') >>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered') >>> plt.plot(f2, 'g', alpha=
codeparrot/github-code-clean
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.stats', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## log.h (module 'core'): ns3::LogLevel [enumeration] module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'], import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator', 'ns3::AttributeConstructionList::CIterator') typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator*', 'ns3::AttributeConstructionList::CIterator*') typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator&', 'ns3::AttributeConstructionList::CIterator&') ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## data-output-interface.h (module 'stats'): ns3::DataOutputCallback [class] module.add_class('DataOutputCallback', allow_subclassing=True) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::SQLiteOutput> [struct] module.add_class('DefaultDeleter', template_parameters=['ns3::SQLiteOutput']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor']) ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## event-id.h (module 'core'): ns3::EventId::UID [enumeration] module.add_enum('UID', ['INVALID', 'NOW', 'DESTROY', 'RESERVED', 'VALID'], outer_class=root_module['ns3::EventId'], import_from_module='ns.core') ## file-helper.h (module 'stats'): ns3::FileHelper [class] module.add_class('FileHelper') ## gnuplot.h (module 'stats'): ns3::Gnuplot [class] module.add_class('Gnuplot') ## gnuplot.h (module 'stats'): ns3::GnuplotCollection [class] module.add_class('GnuplotCollection') ## gnuplot.h (module 'stats'): ns3::GnuplotDataset [class] module.add_class('GnuplotDataset') ## gnuplot-helper.h (module 'stats'): ns3::GnuplotHelper [class] module.add_class('GnuplotHelper') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## histogram.h (module 'stats'): ns3::Histogram [class] module.add_class('Histogram') ## log.h (module 'core'): ns3::LogComponent [class] module.add_class('LogComponent', import_from_module='ns.core') typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >', 'ns3::LogComponent::ComponentList') typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >*', 'ns3::LogComponent::ComponentList*') typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >&', 'ns3::LogComponent::ComponentList&') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## log.h (module 'core'): ns3::ParameterLogger [class] module.add_class('ParameterLogger', import_from_module='ns.core') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::ObjectBase'], template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter']) ## simulator.h (module 'core'): ns3::Simulator [class] module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core') ## simulator.h (module 'core'): ns3::Simulator [enumeration] module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core') ## data-calculator.h (module 'stats'): ns3::StatisticalSummary [class] module.add_class('StatisticalSummary', allow_subclassing=True) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST', 'AUTO'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') typehandlers.add_type_alias('void ( * ) ( ns3::Time )', 'ns3::Time::TracedCallback') typehandlers.add_type_alias('void ( * ) ( ns3::Time )*', 'ns3::Time::TracedCallback*') typehandlers.add_type_alias('void ( * ) ( ns3::Time )&', 'ns3::Time::TracedCallback&') ## nstime.h (module 'core'): ns3::TimeWithUnit [class] module.add_class('TimeWithUnit', import_from_module='ns.core') ## traced-value.h (module 'core'): ns3::TracedValue<bool> [class] module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['bool']) ## traced-value.h (module 'core'): ns3::TracedValue<double> [class] module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['double']) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned char> [class] module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['unsigned char']) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned int> [class] module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['unsigned int']) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned short> [class] module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['unsigned short']) ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration] module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) typehandlers.add_type_alias('uint32_t', 'ns3::TypeId::hash_t') typehandlers.add_type_alias('uint32_t*', 'ns3::TypeId::hash_t*') typehandlers.add_type_alias('uint32_t&', 'ns3::TypeId::hash_t&') ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-128.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-128.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset [class] module.add_class('Gnuplot2dDataset', parent=root_module['ns3::GnuplotDataset']) ## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset::Style [enumeration] module.add_enum('Style', ['LINES', 'POINTS', 'LINES_POINTS', 'DOTS', 'IMPULSES', 'STEPS', 'FSTEPS', 'HISTEPS'], outer_class=root_module['ns3::Gnuplot2dDataset']) ## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset::ErrorBars [enumeration] module.add_enum('ErrorBars', ['NONE', 'X', 'Y', 'XY'], outer_class=root_module['ns3::Gnuplot2dDataset']) ## gnuplot.h (module 'stats'): ns3::Gnuplot2dFunction [class] module.add_class('Gnuplot2dFunction', parent=root_module['ns3::GnuplotDataset']) ## gnuplot.h (module 'stats'): ns3::Gnuplot3dDataset [class] module.add_class('Gnuplot3dDataset', parent=root_module['ns3::GnuplotDataset']) ## gnuplot.h (module 'stats'): ns3::Gnuplot3dFunction [class] module.add_class('Gnuplot3dFunction', parent=root_module['ns3::GnuplotDataset']) ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SQLiteOutput, ns3::empty, ns3::DefaultDeleter<ns3::SQLiteOutput> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::SQLiteOutput', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SQLiteOutput>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## boolean.h (module 'core'): ns3::BooleanChecker [class] module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## boolean.h (module 'core'): ns3::BooleanValue [class] module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## data-calculator.h (module 'stats'): ns3::DataCalculator [class] module.add_class('DataCalculator', parent=root_module['ns3::Object']) ## data-collection-object.h (module 'stats'): ns3::DataCollectionObject [class] module.add_class('DataCollectionObject', parent=root_module['ns3::Object']) ## data-collector.h (module 'stats'): ns3::DataCollector [class] module.add_class('DataCollector', parent=root_module['ns3::Object']) ## data-output-interface.h (module 'stats'): ns3::DataOutputInterface [class] module.add_class('DataOutputInterface', parent=root_module['ns3::Object']) ## double.h (module 'core'): ns3::DoubleValue [class] module.add_class('DoubleValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class] module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor']) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class] module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## enum.h (module 'core'): ns3::EnumChecker [class] module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## enum.h (module 'core'): ns3::EnumValue [class] module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## file-aggregator.h (module 'stats'): ns3::FileAggregator [class] module.add_class('FileAggregator', parent=root_module['ns3::DataCollectionObject']) ## file-aggregator.h (module 'stats'): ns3::FileAggregator::FileType [enumeration] module.add_enum('FileType', ['FORMATTED', 'SPACE_SEPARATED', 'COMMA_SEPARATED', 'TAB_SEPARATED'], outer_class=root_module['ns3::FileAggregator']) ## gnuplot-aggregator.h (module 'stats'): ns3::GnuplotAggregator [class] module.add_class('GnuplotAggregator', parent=root_module['ns3::DataCollectionObject']) ## gnuplot-aggregator.h (module 'stats'): ns3::GnuplotAggregator::KeyLocation [enumeration] module.add_enum('KeyLocation', ['NO_KEY', 'KEY_INSIDE', 'KEY_ABOVE', 'KEY_BELOW'], outer_class=root_module['ns3::GnuplotAggregator']) ## integer.h (module 'core'): ns3::IntegerValue [class] module.add_class('IntegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## basic-data-calculators.h (module 'stats'): ns3::MinMaxAvgTotalCalculator<double> [class] module.add_class('MinMaxAvgTotalCalculator', parent=[root_module['ns3::DataCalculator'], root_module['ns3::StatisticalSummary']], template_parameters=['double']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## omnet-data-output.h (module 'stats'): ns3::OmnetDataOutput [class] module.add_class('OmnetDataOutput', parent=root_module['ns3::DataOutputInterface']) ## probe.h (module 'stats'): ns3::Probe [class] module.add_class('Probe', parent=root_module['ns3::DataCollectionObject']) ## sqlite-output.h (module 'stats'): ns3::SQLiteOutput [class] module.add_class('SQLiteOutput', parent=root_module['ns3::SimpleRefCount< ns3::SQLiteOutput, ns3::empty, ns3::DefaultDeleter<ns3::SQLiteOutput> >']) ## sqlite-data-output.h (module 'stats'): ns3::SqliteDataOutput [class] module.add_class('SqliteDataOutput', parent=root_module['ns3::DataOutputInterface']) ## time-data-calculators.h (module 'stats'): ns3::TimeMinMaxAvgTotalCalculator [class] module.add_class('TimeMinMaxAvgTotalCalculator', parent=root_module['ns3::DataCalculator']) ## time-probe.h (module 'stats'): ns3::TimeProbe [class] module.add_class('TimeProbe', parent=root_module['ns3::Probe']) ## time-series-adaptor.h (module 'stats'): ns3::TimeSeriesAdaptor [class] module.add_class('TimeSeriesAdaptor', parent=root_module['ns3::DataCollectionObject']) typehandlers.add_type_alias('void ( * ) ( double const, double const )', 'ns3::TimeSeriesAdaptor::OutputTracedCallback') typehandlers.add_type_alias('void ( * ) ( double const, double const )*', 'ns3::TimeSeriesAdaptor::OutputTracedCallback*') typehandlers.add_type_alias('void ( * ) ( double const, double const )&', 'ns3::TimeSeriesAdaptor::OutputTracedCallback&') ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## uinteger-16-probe.h (module 'stats'): ns3::Uinteger16Probe [class] module.add_class('Uinteger16Probe', parent=root_module['ns3::Probe']) ## uinteger-32-probe.h (module 'stats'): ns3::Uinteger32Probe [class] module.add_class('Uinteger32Probe', parent=root_module['ns3::Probe']) ## uinteger-8-probe.h (module 'stats'): ns3::Uinteger8Probe [class] module.add_class('Uinteger8Probe', parent=root_module['ns3::Probe']) ## uinteger.h (module 'core'): ns3::UintegerValue [class] module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## boolean-probe.h (module 'stats'): ns3::BooleanProbe [class] module.add_class('BooleanProbe', parent=root_module['ns3::Probe']) ## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class] module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty']) ## callback.h (module 'core'): ns3::CallbackImpl<void, bool, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class] module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'bool', 'bool', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty']) ## callback.h (module 'core'): ns3::CallbackImpl<void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class] module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'double', 'double', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty']) ## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned char, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class] module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'unsigned char', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty']) ## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class] module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'unsigned int', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty']) ## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned short, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class] module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'unsigned short', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty']) ## double-probe.h (module 'stats'): ns3::DoubleProbe [class] module.add_class('DoubleProbe', parent=root_module['ns3::Probe']) module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type='map') typehandlers.add_type_alias('std::list< ns3::Ptr< ns3::DataCalculator > >', 'ns3::DataCalculatorList') typehandlers.add_type_alias('std::list< ns3::Ptr< ns3::DataCalculator > >*', 'ns3::DataCalculatorList*') typehandlers.add_type_alias('std::list< ns3::Ptr< ns3::DataCalculator > >&', 'ns3::DataCalculatorList&') typehandlers.add_type_alias('std::list< std::pair< std::string, std::string > >', 'ns3::MetadataList') typehandlers.add_type_alias('std::list< std::pair< std::string, std::string > >*', 'ns3::MetadataList*') typehandlers.add_type_alias('std::list< std::pair< std::string, std::string > >&', 'ns3::MetadataList&') typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::TimePrinter') typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::TimePrinter*') typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::TimePrinter&') typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::NodePrinter') typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::NodePrinter*') typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::NodePrinter&') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) ## Register a nested module for the namespace TracedValueCallback nested_module = module.add_cpp_namespace('TracedValueCallback') register_types_ns3_TracedValueCallback(nested_module) ## Register a nested module for the namespace internal nested_module = module.add_cpp_namespace('internal') register_types_ns3_internal(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_types_ns3_TracedValueCallback(module): root_module = module.get_root() typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )', 'ns3::TracedValueCallback::Time') typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )*', 'ns3::TracedValueCallback::Time*') typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )&', 'ns3::TracedValueCallback::Time&') typehandlers.add_type_alias('void ( * ) ( bool, bool )', 'ns3::TracedValueCallback::Bool') typehandlers.add_type_alias('void ( * ) ( bool, bool )*', 'ns3::TracedValueCallback::Bool*') typehandlers.add_type_alias('void ( * ) ( bool, bool )&', 'ns3::TracedValueCallback::Bool&') typehandlers.add_type_alias('void ( * ) ( int8_t, int8_t )', 'ns3::TracedValueCallback::Int8') typehandlers.add_type_alias('void ( * ) ( int8_t, int8_t )*', 'ns3::TracedValueCallback::Int8*') typehandlers.add_type_alias('void ( * ) ( int8_t, int8_t )&', 'ns3::TracedValueCallback::Int8&') typehandlers.add_type_alias('void ( * ) ( uint8_t, uint8_t )', 'ns3::TracedValueCallback::Uint8') typehandlers.add_type_alias('void ( * ) ( uint8_t, uint8_t )*', 'ns3::TracedValueCallback::Uint8*') typehandlers.add_type_alias('void ( * ) ( uint8_t, uint8_t )&', 'ns3::TracedValueCallback::Uint8&') typehandlers.add_type_alias('void ( * ) ( int16_t, int16_t )', 'ns3::TracedValueCallback::Int16') typehandlers.add_type_alias('void ( * ) ( int16_t, int16_t )*', 'ns3::TracedValueCallback::Int16*') typehandlers.add_type_alias('void ( * ) ( int16_t, int16_t )&', 'ns3::TracedValueCallback::Int16&') typehandlers.add_type_alias('void ( * ) ( uint16_t, uint16_t )', 'ns3::TracedValueCallback::Uint16') typehandlers.add_type_alias('void ( * ) ( uint16_t, uint16_t )*', 'ns3::TracedValueCallback::Uint16*') typehandlers.add_type_alias('void ( * ) ( uint16_t, uint16_t )&', 'ns3::TracedValueCallback::Uint16&') typehandlers.add_type_alias('void ( * ) ( int32_t, int32_t )', 'ns3::TracedValueCallback::Int32') typehandlers.add_type_alias('void ( * ) ( int32_t, int32_t )*', 'ns3::TracedValueCallback::Int32*') typehandlers.add_type_alias('void ( * ) ( int32_t, int32_t )&', 'ns3::TracedValueCallback::Int32&') typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )', 'ns3::TracedValueCallback::Uint32') typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )*', 'ns3::TracedValueCallback::Uint32*') typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )&', 'ns3::TracedValueCallback::Uint32&') typehandlers.add_type_alias('void ( * ) ( int64_t, int64_t )', 'ns3::TracedValueCallback::Int64') typehandlers.add_type_alias('void ( * ) ( int64_t, int64_t )*', 'ns3::TracedValueCallback::Int64*') typehandlers.add_type_alias('void ( * ) ( int64_t, int64_t )&', 'ns3::TracedValueCallback::Int64&') typehandlers.add_type_alias('void ( * ) ( uint64_t, uint64_t )', 'ns3::TracedValueCallback::Uint64') typehandlers.add_type_alias('void ( * ) ( uint64_t, uint64_t )*', 'ns3::TracedValueCallback::Uint64*') typehandlers.add_type_alias('void ( * ) ( uint64_t, uint64_t )&', 'ns3::TracedValueCallback::Uint64&') typehandlers.add_type_alias('void ( * ) ( double, double )', 'ns3::TracedValueCallback::Double') typehandlers.add_type_alias('void ( * ) ( double, double )*', 'ns3::TracedValueCallback::Double*') typehandlers.add_type_alias('void ( * ) ( double, double )&', 'ns3::TracedValueCallback::Double&') typehandlers.add_type_alias('void ( * ) ( )', 'ns3::TracedValueCallback::Void') typehandlers.add_type_alias('void ( * ) ( )*', 'ns3::TracedValueCallback::Void*') typehandlers.add_type_alias('void ( * ) ( )&', 'ns3::TracedValueCallback::Void&') def register_types_ns3_internal(module): root_module = module.get_root() def register_methods(root_module): register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3DataOutputCallback_methods(root_module, root_module['ns3::DataOutputCallback']) register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >']) register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >']) register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >']) register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >']) register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >']) register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >']) register_Ns3DefaultDeleter__Ns3SQLiteOutput_methods(root_module, root_module['ns3::DefaultDeleter< ns3::SQLiteOutput >']) register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3FileHelper_methods(root_module, root_module['ns3::FileHelper']) register_Ns3Gnuplot_methods(root_module, root_module['ns3::Gnuplot']) register_Ns3GnuplotCollection_methods(root_module, root_module['ns3::GnuplotCollection']) register_Ns3GnuplotDataset_methods(root_module, root_module['ns3::GnuplotDataset']) register_Ns3GnuplotHelper_methods(root_module, root_module['ns3::GnuplotHelper']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Histogram_methods(root_module, root_module['ns3::Histogram']) register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3ParameterLogger_methods(root_module, root_module['ns3::ParameterLogger']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3StatisticalSummary_methods(root_module, root_module['ns3::StatisticalSummary']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TracedValue__Bool_methods(root_module, root_module['ns3::TracedValue< bool >']) register_Ns3TracedValue__Double_methods(root_module, root_module['ns3::TracedValue< double >']) register_Ns3TracedValue__Unsigned_char_methods(root_module, root_module['ns3::TracedValue< unsigned char >']) register_Ns3TracedValue__Unsigned_int_methods(root_module, root_module['ns3::TracedValue< unsigned int >']) register_Ns3TracedValue__Unsigned_short_methods(root_module, root_module['ns3::TracedValue< unsigned short >']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Gnuplot2dDataset_methods(root_module, root_module['ns3::Gnuplot2dDataset']) register_Ns3Gnuplot2dFunction_methods(root_module, root_module['ns3::Gnuplot2dFunction']) register_Ns3Gnuplot3dDataset_methods(root_module, root_module['ns3::Gnuplot3dDataset']) register_Ns3Gnuplot3dFunction_methods(root_module, root_module['ns3::Gnuplot3dFunction']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3SQLiteOutput_Ns3Empty_Ns3DefaultDeleter__lt__ns3SQLiteOutput__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SQLiteOutput, ns3::empty, ns3::DefaultDeleter<ns3::SQLiteOutput> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker']) register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3DataCalculator_methods(root_module, root_module['ns3::DataCalculator']) register_Ns3DataCollectionObject_methods(root_module, root_module['ns3::DataCollectionObject']) register_Ns3DataCollector_methods(root_module, root_module['ns3::DataCollector']) register_Ns3DataOutputInterface_methods(root_module, root_module['ns3::DataOutputInterface']) register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue']) register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor']) register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker']) register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3FileAggregator_methods(root_module, root_module['ns3::FileAggregator']) register_Ns3GnuplotAggregator_methods(root_module, root_module['ns3::GnuplotAggregator']) register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue']) register_Ns3MinMaxAvgTotalCalculator__Double_methods(root_module, root_module['ns3::MinMaxAvgTotalCalculator< double >']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OmnetDataOutput_methods(root_module, root_module['ns3::OmnetDataOutput']) register_Ns3Probe_methods(root_module, root_module['ns3::Probe']) register_Ns3SQLiteOutput_methods(root_module, root_module['ns3::SQLiteOutput']) register_Ns3SqliteDataOutput_methods(root_module, root_module['ns3::SqliteDataOutput']) register_Ns3TimeMinMaxAvgTotalCalculator_methods(root_module, root_module['ns3::TimeMinMaxAvgTotalCalculator']) register_Ns3TimeProbe_methods(root_module, root_module['ns3::TimeProbe']) register_Ns3TimeSeriesAdaptor_methods(root_module, root_module['ns3::TimeSeriesAdaptor']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3Uinteger16Probe_methods(root_module, root_module['ns3::Uinteger16Probe']) register_Ns3Uinteger32Probe_methods(root_module, root_module['ns3::Uinteger32Probe']) register_Ns3Uinteger8Probe_methods(root_module, root_module['ns3::Uinteger8Probe']) register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue']) register_Ns3BooleanProbe_methods(root_module, root_module['ns3::BooleanProbe']) register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Bool_Bool_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, bool, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Double_Double_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Unsigned_char_Unsigned_char_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, unsigned char, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Unsigned_int_Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Unsigned_short_Unsigned_short_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, unsigned short, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3DoubleProbe_methods(root_module, root_module['ns3::DoubleProbe']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'ns3::AttributeConstructionList::CIterator', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'ns3::AttributeConstructionList::CIterator', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') return def register_Ns3DataOutputCallback_methods(root_module, cls): ## data-output-interface.h (module 'stats'): ns3::DataOutputCallback::DataOutputCallback() [constructor] cls.add_constructor([]) ## data-output-interface.h (module 'stats'): ns3::DataOutputCallback::DataOutputCallback(ns3::DataOutputCallback const & arg0) [constructor] cls.add_constructor([param('ns3::DataOutputCallback const &', 'arg0')]) ## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, int val) [member function] cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('int', 'val')], is_pure_virtual=True, is_virtual=True) ## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, uint32_t val) [member function] cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('uint32_t', 'val')], is_pure_virtual=True, is_virtual=True) ## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, double val) [member function] cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('double', 'val')], is_pure_virtual=True, is_virtual=True) ## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, std::string val) [member function] cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('std::string', 'val')], is_pure_virtual=True, is_virtual=True) ## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, ns3::Time val) [member function] cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('ns3::Time', 'val')], is_pure_virtual=True, is_virtual=True) ## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputStatistic(std::string key, std::string variable, ns3::StatisticalSummary const * statSum) [member function] cls.add_method('OutputStatistic', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('ns3::StatisticalSummary const *', 'statSum')], is_pure_virtual=True, is_virtual=True) return def register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeAccessor> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeAccessor > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeAccessor>::Delete(ns3::AttributeAccessor * object) [member function] cls.add_method('Delete', 'void', [param('ns3::AttributeAccessor *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeChecker> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeChecker > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeChecker>::Delete(ns3::AttributeChecker * object) [member function] cls.add_method('Delete', 'void', [param('ns3::AttributeChecker *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeValue> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeValue > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeValue>::Delete(ns3::AttributeValue * object) [member function] cls.add_method('Delete', 'void', [param('ns3::AttributeValue *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter(ns3::DefaultDeleter<ns3::CallbackImplBase> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::CallbackImplBase > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::CallbackImplBase>::Delete(ns3::CallbackImplBase * object) [member function] cls.add_method('Delete', 'void', [param('ns3::CallbackImplBase *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter(ns3::DefaultDeleter<ns3::EventImpl> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::EventImpl > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::EventImpl>::Delete(ns3::EventImpl * object) [member function] cls.add_method('Delete', 'void', [param('ns3::EventImpl *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter(ns3::DefaultDeleter<ns3::Hash::Implementation> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::Hash::Implementation > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Hash::Implementation>::Delete(ns3::Hash::Implementation * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Hash::Implementation *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3SQLiteOutput_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::SQLiteOutput>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::SQLiteOutput>::DefaultDeleter(ns3::DefaultDeleter<ns3::SQLiteOutput> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::SQLiteOutput > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::SQLiteOutput>::Delete(ns3::SQLiteOutput * object) [member function] cls.add_method('Delete', 'void', [param('ns3::SQLiteOutput *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::TraceSourceAccessor> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::TraceSourceAccessor > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::TraceSourceAccessor>::Delete(ns3::TraceSourceAccessor * object) [member function] cls.add_method('Delete', 'void', [param('ns3::TraceSourceAccessor *', 'object')], is_static=True) return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) ## event-id.h (module 'core'): void ns3::EventId::Remove() [member function] cls.add_method('Remove', 'void', []) return def register_Ns3FileHelper_methods(root_module, cls): ## file-helper.h (module 'stats'): ns3::FileHelper::FileHelper(ns3::FileHelper const & arg0) [constructor] cls.add_constructor([param('ns3::FileHelper const &', 'arg0')]) ## file-helper.h (module 'stats'): ns3::FileHelper::FileHelper() [constructor] cls.add_constructor([]) ## file-helper.h (module 'stats'): ns3::FileHelper::FileHelper(std::string const & outputFileNameWithoutExtension, ns3::FileAggregator::FileType fileType=::ns3::FileAggregator::FileType::SPACE_SEPARATED) [constructor] cls.add_constructor([param('std::string const &', 'outputFileNameWithoutExtension'), param('ns3::FileAggregator::FileType', 'fileType', default_value='::ns3::FileAggregator::FileType::SPACE_SEPARATED')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::AddAggregator(std::string const & aggregatorName, std::string const & outputFileName, bool onlyOneAggregator) [member function] cls.add_method('AddAggregator', 'void', [param('std::string const &', 'aggregatorName'), param('std::string const &', 'outputFileName'), param('bool', 'onlyOneAggregator')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::AddTimeSeriesAdaptor(std::string const & adaptorName) [member function] cls.add_method('AddTimeSeriesAdaptor', 'void', [param('std::string const &', 'adaptorName')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::ConfigureFile(std::string const & outputFileNameWithoutExtension, ns3::FileAggregator::FileType fileType=::ns3::FileAggregator::FileType::SPACE_SEPARATED) [member function] cls.add_method('ConfigureFile', 'void', [param('std::string const &', 'outputFileNameWithoutExtension'), param('ns3::FileAggregator::FileType', 'fileType', default_value='::ns3::FileAggregator::FileType::SPACE_SEPARATED')]) ## file-helper.h (module 'stats'): ns3::Ptr<ns3::FileAggregator> ns3::FileHelper::GetAggregatorMultiple(std::string const & aggregatorName, std::string const & outputFileName) [member function] cls.add_method('GetAggregatorMultiple', 'ns3::Ptr< ns3::FileAggregator >', [param('std::string const &', 'aggregatorName'), param('std::string const &', 'outputFileName')]) ## file-helper.h (module 'stats'): ns3::Ptr<ns3::FileAggregator> ns3::FileHelper::GetAggregatorSingle() [member function] cls.add_method('GetAggregatorSingle', 'ns3::Ptr< ns3::FileAggregator >', []) ## file-helper.h (module 'stats'): ns3::Ptr<ns3::Probe> ns3::FileHelper::GetProbe(std::string probeName) const [member function] cls.add_method('GetProbe', 'ns3::Ptr< ns3::Probe >', [param('std::string', 'probeName')], is_const=True) ## file-helper.h (module 'stats'): void ns3::FileHelper::Set10dFormat(std::string const & format) [member function] cls.add_method('Set10dFormat', 'void', [param('std::string const &', 'format')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::Set1dFormat(std::string const & format) [member function] cls.add_method('Set1dFormat', 'void', [param('std::string const &', 'format')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::Set2dFormat(std::string const & format) [member function] cls.add_method('Set2dFormat', 'void', [param('std::string const &', 'format')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::Set3dFormat(std::string const & format) [member function] cls.add_method('Set3dFormat', 'void', [param('std::string const &', 'format')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::Set4dFormat(std::string const & format) [member function] cls.add_method('Set4dFormat', 'void', [param('std::string const &', 'format')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::Set5dFormat(std::string const & format) [member function] cls.add_method('Set5dFormat', 'void', [param('std::string const &', 'format')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::Set6dFormat(std::string const & format) [member function] cls.add_method('Set6dFormat', 'void', [param('std::string const &', 'format')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::Set7dFormat(std::string const & format) [member function] cls.add_method('Set7dFormat', 'void', [param('std::string const &', 'format')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::Set8dFormat(std::string const & format) [member function] cls.add_method('Set8dFormat', 'void', [param('std::string const &', 'format')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::Set9dFormat(std::string const & format) [member function] cls.add_method('Set9dFormat', 'void', [param('std::string const &', 'format')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::SetHeading(std::string const & heading) [member function] cls.add_method('SetHeading', 'void', [param('std::string const &', 'heading')]) ## file-helper.h (module 'stats'): void ns3::FileHelper::WriteProbe(std::string const & typeId, std::string const & path, std::string const & probeTraceSource) [member function] cls.add_method('WriteProbe', 'void', [param('std::string const &', 'typeId'), param('std::string const &', 'path'), param('std::string const &', 'probeTraceSource')]) return def register_Ns3Gnuplot_methods(root_module, cls): ## gnuplot.h (module 'stats'): ns3::Gnuplot::Gnuplot(ns3::Gnuplot const & arg0) [constructor] cls.add_constructor([param('ns3::Gnuplot const &', 'arg0')]) ## gnuplot.h (module 'stats'): ns3::Gnuplot::Gnuplot(std::string const & outputFilename="", std::string const & title="") [constructor] cls.add_constructor([param('std::string const &', 'outputFilename', default_value='""'), param('std::string const &', 'title', default_value='""')]) ## gnuplot.h (module 'stats'): void ns3::Gnuplot::AddDataset(ns3::GnuplotDataset const & dataset) [member function] cls.add_method('AddDataset', 'void', [param('ns3::GnuplotDataset const &', 'dataset')]) ## gnuplot.h (module 'stats'): void ns3::Gnuplot::AppendExtra(std::string const & extra) [member function] cls.add_method('AppendExtra', 'void', [param('std::string const &', 'extra')]) ## gnuplot.h (module 'stats'): static std::string ns3::Gnuplot::DetectTerminal(std::string const & filename) [member function] cls.add_method('DetectTerminal', 'std::string', [param('std::string const &', 'filename')], is_static=True) ## gnuplot.h (module 'stats'): void ns3::Gnuplot::GenerateOutput(std::ostream & os) [member function] cls.add_method('GenerateOutput', 'void', [param('std::ostream &', 'os')]) ## gnuplot.h (module 'stats'): void ns3::Gnuplot::GenerateOutput(std::ostream & osControl, std::ostream & osData, std::string dataFileName) [member function] cls.add_method('GenerateOutput', 'void', [param('std::ostream &', 'osControl'), param('std::ostream &', 'osData'), param('std::string', 'dataFileName')]) ## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetDataFileDatasetIndex(unsigned int index) [member function] cls.add_method('SetDataFileDatasetIndex', 'void', [param('unsigned int', 'index')]) ## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetExtra(std::string const & extra) [member function] cls.add_method('SetExtra', 'void', [param('std::string const &', 'extra')]) ## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetLegend(std::string const & xLegend, std::string const & yLegend) [member function] cls.add_method('SetLegend', 'void', [param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend')]) ## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetOutputFilename(std::string const & outputFilename) [member function] cls.add_method('SetOutputFilename', 'void', [param('std::string const &', 'outputFilename')]) ## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetTerminal(std::string const & terminal) [member function] cls.add_method('SetTerminal', 'void', [param('std::string const &', 'terminal')]) ## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetTitle(std::string const & title) [member function] cls.add_method('SetTitle', 'void', [param('std::string const &', 'title')]) return def register_Ns3GnuplotCollection_methods(root_module, cls): ## gnuplot.h (module 'stats'): ns3::GnuplotCollection::GnuplotCollection(ns3::GnuplotCollection const & arg0) [constructor] cls.add_constructor([param('ns3::GnuplotCollection const &', 'arg0')]) ## gnuplot.h (module 'stats'): ns3::GnuplotCollection::GnuplotCollection(std::string const & outputFilename) [constructor] cls.add_constructor([param('std::string const &', 'outputFilename')]) ## gnuplot.h (module 'stats'): void ns3::GnuplotCollection::AddPlot(ns3::Gnuplot const & plot) [member function] cls.add_method('AddPlot', 'void', [param('ns3::Gnuplot const &', 'plot')]) ## gnuplot.h (module 'stats'): void ns3::GnuplotCollection::GenerateOutput(std::ostream & os) [member function] cls.add_method('GenerateOutput', 'void', [param('std::ostream &', 'os')]) ## gnuplot.h (module 'stats'): void ns3::GnuplotCollection::GenerateOutput(std::ostream & osControl, std::ostream & osData, std::string dataFileName) [member function] cls.add_method('GenerateOutput', 'void', [param('std::ostream &', 'osControl'), param('std::ostream &', 'osData'), param('std::string', 'dataFileName')]) ## gnuplot.h (module 'stats'): ns3::Gnuplot & ns3::GnuplotCollection::GetPlot(unsigned int id) [member function] cls.add_method('GetPlot', 'ns3::Gnuplot &', [param('unsigned int', 'id')]) ## gnuplot.h (module 'stats'): void ns3::GnuplotCollection::SetTerminal(std::string const & terminal) [member function] cls.add_method('SetTerminal', 'void', [param('std::string const &', 'terminal')]) return def register_Ns3GnuplotDataset_methods(root_module, cls): ## gnuplot.h (module 'stats'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset const & original) [constructor] cls.add_constructor([param('ns3::GnuplotDataset const &', 'original')]) ## gnuplot.h (module 'stats'): static void ns3::GnuplotDataset::SetDefaultExtra(std::string const & extra) [member function] cls.add_method('SetDefaultExtra', 'void', [param('std::string const &', 'extra')], is_static=True) ## gnuplot.h (module 'stats'): void ns3::GnuplotDataset::SetExtra(std::string const & extra) [member function] cls.add_method('SetExtra', 'void', [param('std::string const &', 'extra')]) ## gnuplot.h (module 'stats'): void ns3::GnuplotDataset::SetTitle(std::string const & title) [member function] cls.add_method('SetTitle', 'void', [param('std::string const &', 'title')]) ## gnuplot.h (module 'stats'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset::Data * data) [constructor] cls.add_constructor([param('ns3::GnuplotDataset::Data *', 'data')], visibility='protected') return def register_Ns3GnuplotHelper_methods(root_module, cls): ## gnuplot-helper.h (module 'stats'): ns3::GnuplotHelper::GnuplotHelper(ns3::GnuplotHelper const & arg0) [constructor] cls.add_constructor([param('ns3::GnuplotHelper const &', 'arg0')]) ## gnuplot-helper.h (module 'stats'): ns3::GnuplotHelper::GnuplotHelper() [constructor] cls.add_constructor([]) ## gnuplot-helper.h (module 'stats'): ns3::GnuplotHelper::GnuplotHelper(std::string const & outputFileNameWithoutExtension, std::string const & title, std::string const & xLegend, std::string const & yLegend, std::string const & terminalType="png") [constructor] cls.add_constructor([param('std::string const &', 'outputFileNameWithoutExtension'), param('std::string const &', 'title'), param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend'), param('std::string const &', 'terminalType', default_value='"png"')]) ## gnuplot-helper.h (module 'stats'): void ns3::GnuplotHelper::AddTimeSeriesAdaptor(std::string const & adaptorName) [member function] cls.add_method('AddTimeSeriesAdaptor', 'void', [param('std::string const &', 'adaptorName')]) ## gnuplot-helper.h (module 'stats'): void ns3::GnuplotHelper::ConfigurePlot(std::string const & outputFileNameWithoutExtension, std::string const & title, std::string const & xLegend, std::string const & yLegend, std::string const & terminalType="png") [member function] cls.add_method('ConfigurePlot', 'void', [param('std::string const &', 'outputFileNameWithoutExtension'), param('std::string const &', 'title'), param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend'), param('std::string const &', 'terminalType', default_value='"png"')]) ## gnuplot-helper.h (module 'stats'): ns3::Ptr<ns3::GnuplotAggregator> ns3::GnuplotHelper::GetAggregator() [member function] cls.add_method('GetAggregator', 'ns3::Ptr< ns3::GnuplotAggregator >', []) ## gnuplot-helper.h (module 'stats'): ns3::Ptr<ns3::Probe> ns3::GnuplotHelper::GetProbe(std::string probeName) const [member function] cls.add_method('GetProbe', 'ns3::Ptr< ns3::Probe >', [param('std::string', 'probeName')], is_const=True) ## gnuplot-helper.h (module 'stats'): void ns3::GnuplotHelper::PlotProbe(std::string const & typeId, std::string const & path, std::string const & probeTraceSource, std::string const & title, ns3::GnuplotAggregator::KeyLocation keyLocation=::ns3::GnuplotAggregator::KeyLocation::KEY_INSIDE) [member function] cls.add_method('PlotProbe', 'void', [param('std::string const &', 'typeId'), param('std::string const &', 'path'), param('std::string const &', 'probeTraceSource'), param('std::string const &', 'title'), param('ns3::GnuplotAggregator::KeyLocation', 'keyLocation', default_value='::ns3::GnuplotAggregator::KeyLocation::KEY_INSIDE')]) return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, std::size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, std::size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Histogram_methods(root_module, cls): ## histogram.h (module 'stats'): ns3::Histogram::Histogram(ns3::Histogram const & arg0) [constructor] cls.add_constructor([param('ns3::Histogram const &', 'arg0')]) ## histogram.h (module 'stats'): ns3::Histogram::Histogram(double binWidth) [constructor] cls.add_constructor([param('double', 'binWidth')]) ## histogram.h (module 'stats'): ns3::Histogram::Histogram() [constructor] cls.add_constructor([]) ## histogram.h (module 'stats'): void ns3::Histogram::AddValue(double value) [member function] cls.add_method('AddValue', 'void', [param('double', 'value')]) ## histogram.h (module 'stats'): uint32_t ns3::Histogram::GetBinCount(uint32_t index) [member function] cls.add_method('GetBinCount', 'uint32_t', [param('uint32_t', 'index')]) ## histogram.h (module 'stats'): double ns3::Histogram::GetBinEnd(uint32_t index) [member function] cls.add_method('GetBinEnd', 'double', [param('uint32_t', 'index')]) ## histogram.h (module 'stats'): double ns3::Histogram::GetBinStart(uint32_t index) [member function] cls.add_method('GetBinStart', 'double', [param('uint32_t', 'index')]) ## histogram.h (module 'stats'): double ns3::Histogram::GetBinWidth(uint32_t index) const [member function] cls.add_method('GetBinWidth', 'double', [param('uint32_t', 'index')], is_const=True) ## histogram.h (module 'stats'): uint32_t ns3::Histogram::GetNBins() const [member function] cls.add_method('GetNBins', 'uint32_t', [], is_const=True) ## histogram.h (module 'stats'): void ns3::Histogram::SerializeToXmlStream(std::ostream & os, uint16_t indent, std::string elementName) const [member function] cls.add_method('SerializeToXmlStream', 'void', [param('std::ostream &', 'os'), param('uint16_t', 'indent'), param('std::string', 'elementName')], is_const=True) ## histogram.h (module 'stats'): void ns3::Histogram::SetDefaultBinWidth(double binWidth) [member function] cls.add_method('SetDefaultBinWidth', 'void', [param('double', 'binWidth')]) return def register_Ns3LogComponent_methods(root_module, cls): ## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [constructor] cls.add_constructor([param('ns3::LogComponent const &', 'arg0')]) ## log.h (module 'core'): ns3::LogComponent::LogComponent(std::string const & name, std::string const & file, ns3::LogLevel const mask=::ns3::LogLevel::LOG_NONE) [constructor] cls.add_constructor([param('std::string const &', 'name'), param('std::string const &', 'file'), param('ns3::LogLevel const', 'mask', default_value='::ns3::LogLevel::LOG_NONE')]) ## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel const level) [member function] cls.add_method('Disable', 'void', [param('ns3::LogLevel const', 'level')]) ## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel const level) [member function] cls.add_method('Enable', 'void', [param('ns3::LogLevel const', 'level')]) ## log.h (module 'core'): std::string ns3::LogComponent::File() const [member function] cls.add_method('File', 'std::string', [], is_const=True) ## log.h (module 'core'): static ns3::LogComponent::ComponentList * ns3::LogComponent::GetComponentList() [member function] cls.add_method('GetComponentList', 'ns3::LogComponent::ComponentList *', [], is_static=True) ## log.h (module 'core'): static std::string ns3::LogComponent::GetLevelLabel(ns3::LogLevel const level) [member function] cls.add_method('GetLevelLabel', 'std::string', [param('ns3::LogLevel const', 'level')], is_static=True) ## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel const level) const [member function] cls.add_method('IsEnabled', 'bool', [param('ns3::LogLevel const', 'level')], is_const=True) ## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function] cls.add_method('IsNoneEnabled', 'bool', [], is_const=True) ## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function] cls.add_method('Name', 'char const *', [], is_const=True) ## log.h (module 'core'): void ns3::LogComponent::SetMask(ns3::LogLevel const level) [member function] cls.add_method('SetMask', 'void', [param('ns3::LogLevel const', 'level')]) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], is_virtual=True, visibility='protected') return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string const & typeId) [constructor] cls.add_constructor([param('std::string const &', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactory::IsTypeIdSet() const [member function] cls.add_method('IsTypeIdSet', 'bool', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set() [member function] cls.add_method('Set', 'void', []) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3ParameterLogger_methods(root_module, cls): ## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(ns3::ParameterLogger const & arg0) [constructor] cls.add_constructor([param('ns3::ParameterLogger const &', 'arg0')]) ## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(std::ostream & os) [constructor] cls.add_constructor([param('std::ostream &', 'os')]) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) return def register_Ns3Simulator_methods(root_module, cls): ## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [constructor] cls.add_constructor([param('ns3::Simulator const &', 'arg0')]) ## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function] cls.add_method('Cancel', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function] cls.add_method('Destroy', 'void', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function] cls.add_method('GetContext', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static uint64_t ns3::Simulator::GetEventCount() [member function] cls.add_method('GetEventCount', 'uint64_t', [], is_static=True) ## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function] cls.add_method('GetImplementation', 'ns3::Ptr< ns3::SimulatorImpl >', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function] cls.add_method('GetMaximumSimulationTime', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function] cls.add_method('IsExpired', 'bool', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function] cls.add_method('IsFinished', 'bool', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function] cls.add_method('Now', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function] cls.add_method('Remove', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function] cls.add_method('SetImplementation', 'void', [param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function] cls.add_method('SetScheduler', 'void', [param('ns3::ObjectFactory', 'schedulerFactory')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function] cls.add_method('Stop', 'void', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function] cls.add_method('Stop', 'void', [param('ns3::Time const &', 'delay')], is_static=True) return def register_Ns3StatisticalSummary_methods(root_module, cls): ## data-calculator.h (module 'stats'): ns3::StatisticalSummary::StatisticalSummary() [constructor] cls.add_constructor([]) ## data-calculator.h (module 'stats'): ns3::StatisticalSummary::StatisticalSummary(ns3::StatisticalSummary const & arg0) [constructor] cls.add_constructor([param('ns3::StatisticalSummary const &', 'arg0')]) ## data-calculator.h (module 'stats'): long int ns3::StatisticalSummary::getCount() const [member function] cls.add_method('getCount', 'long int', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMax() const [member function] cls.add_method('getMax', 'double', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMean() const [member function] cls.add_method('getMean', 'double', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMin() const [member function] cls.add_method('getMin', 'double', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getSqrSum() const [member function] cls.add_method('getSqrSum', 'double', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getStddev() const [member function] cls.add_method('getStddev', 'double', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getSum() const [member function] cls.add_method('getSum', 'double', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getVariance() const [member function] cls.add_method('getVariance', 'double', [], is_const=True, is_pure_virtual=True, is_virtual=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('>=') cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right')) cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit=::ns3::Time::Unit::AUTO) const [member function] cls.add_method('As', 'ns3::TimeWithUnit', [param('ns3::Time::Unit const', 'unit', default_value='::ns3::Time::Unit::AUTO')], is_const=True) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): ns3::Time ns3::Time::RoundTo(ns3::Time::Unit unit) const [member function] cls.add_method('RoundTo', 'ns3::Time', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) return def register_Ns3TimeWithUnit_methods(root_module, cls): cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [constructor] cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor] cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')]) return def register_Ns3TracedValue__Bool_methods(root_module, cls): ## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue() [constructor] cls.add_constructor([]) ## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue(ns3::TracedValue<bool> const & o) [constructor] cls.add_constructor([param('ns3::TracedValue< bool > const &', 'o')]) ## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue(bool const & v) [constructor] cls.add_constructor([param('bool const &', 'v')]) ## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue(ns3::TracedValue<bool> const & other) [constructor] cls.add_constructor([param('ns3::TracedValue< bool > const &', 'other')]) ## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue(ns3::TracedValue<bool> const & other) [constructor] cls.add_constructor([param('ns3::TracedValue< bool > const &', 'other')]) ## traced-value.h (module 'core'): void ns3::TracedValue<bool>::Connect(ns3::CallbackBase const & cb, std::string path) [member function] cls.add_method('Connect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) ## traced-value.h (module 'core'): void ns3::TracedValue<bool>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function] cls.add_method('ConnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) ## traced-value.h (module 'core'): void ns3::TracedValue<bool>::Disconnect(ns3::CallbackBase const & cb, std::string path) [member function] cls.add_method('Disconnect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) ## traced-value.h (module 'core'): void ns3::TracedValue<bool>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function] cls.add_method('DisconnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) ## traced-value.h (module 'core'): bool ns3::TracedValue<bool>::Get() const [member function] cls.add_method('Get', 'bool', [], is_const=True) ## traced-value.h (module 'core'): void ns3::TracedValue<bool>::Set(bool const & v) [member function] cls.add_method('Set', 'void', [param('bool const &', 'v')]) return def register_Ns3TracedValue__Double_methods(root_module, cls): ## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue() [constructor] cls.add_constructor([]) ## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(ns3::TracedValue<double> const & o) [constructor] cls.add_constructor([param('ns3::TracedValue< double > const &', 'o')]) ## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(double const & v) [constructor] cls.add_constructor([param('double const &', 'v')]) ## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(ns3::TracedValue<double> const & other) [constructor] cls.add_constructor([param('ns3::TracedValue< double > const &', 'other')]) ## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(ns3::TracedValue<double> const & other) [constructor] cls.add_constructor([param('ns3::TracedValue< double > const &', 'other')]) ## traced-value.h (module 'core'): void ns3::TracedValue<double>::Connect(ns3::CallbackBase const & cb, std::string path) [member function] cls.add_method('Connect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) ## traced-value.h (module 'core'): void ns3::TracedValue<double>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function] cls.add_method('ConnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) ## traced-value.h (module 'core'): void ns3::TracedValue<double>::Disconnect(ns3::CallbackBase const & cb, std::string path) [member function] cls.add_method('Disconnect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) ## traced-value.h (module 'core'): void ns3::TracedValue<double>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function] cls.add_method('DisconnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) ## traced-value.h (module 'core'): double ns3::TracedValue<double>::Get() const [member function] cls.add_method('Get', 'double', [], is_const=True) ## traced-value.h (module 'core'): void ns3::TracedValue<double>::Set(double const & v) [member function] cls.add_method('Set', 'void', [param('double const &', 'v')]) return def register_Ns3TracedValue__Unsigned_char_methods(root_module, cls): ## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue() [constructor] cls.add_constructor([]) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue(ns3::TracedValue<unsigned char> const & o) [constructor] cls.add_constructor([param('ns3::TracedValue< unsigned char > const &', 'o')]) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue(unsigned char const & v) [constructor] cls.add_constructor([param('unsigned char const &', 'v')]) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue(ns3::TracedValue<unsigned char> const & other) [constructor] cls.add_constructor([param('ns3::TracedValue< unsigned char > const &', 'other')]) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue(ns3::TracedValue<unsigned char> const & other) [constructor] cls.add_constructor([param('ns3::TracedValue< unsigned char > const &', 'other')]) ## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::Connect(ns3::CallbackBase const & cb, std::string path) [member function] cls.add_method('Connect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) ## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function] cls.add_method('ConnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) ## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::Disconnect(ns3::CallbackBase const & cb, std::string path) [member function] cls.add_method('Disconnect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) ## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function] cls.add_method('DisconnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) ## traced-value.h (module 'core'): unsigned char ns3::TracedValue<unsigned char>::Get() const [member function] cls.add_method('Get', 'unsigned char', [], is_const=True) ## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::Set(unsigned char const & v) [member function] cls.add_method('Set', 'void', [param('unsigned char const &', 'v')]) return def register_Ns3TracedValue__Unsigned_int_methods(root_module, cls): ## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue() [constructor] cls.add_constructor([]) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue(ns3::TracedValue<unsigned int> const & o) [constructor] cls.add_constructor([param('ns3::TracedValue< unsigned int > const &', 'o')]) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue(unsigned int const & v) [constructor] cls.add_constructor([param('unsigned int const &', 'v')]) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue(ns3::TracedValue<unsigned int> const & other) [constructor] cls.add_constructor([param('ns3::TracedValue< unsigned int > const &', 'other')]) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue(ns3::TracedValue<unsigned int> const & other) [constructor] cls.add_constructor([param('ns3::TracedValue< unsigned int > const &', 'other')]) ## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::Connect(ns3::CallbackBase const & cb, std::string path) [member function] cls.add_method('Connect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) ## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function] cls.add_method('ConnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) ## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::Disconnect(ns3::CallbackBase const & cb, std::string path) [member function] cls.add_method('Disconnect', 'void', [param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')]) ## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function] cls.add_method('DisconnectWithoutContext', 'void', [param('ns3::CallbackBase const &', 'cb')]) ## traced-value.h (module 'core'): unsigned int ns3::TracedValue<unsigned int>::Get() const [member function] cls.add_method('Get', 'unsigned int', [], is_const=True) ## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::Set(unsigned int const & v) [member function] cls.add_method('Set', 'void', [param('unsigned int const &', 'v')]) return def register_Ns3TracedValue__Unsigned_short_methods(root_module, cls): ## traced-value.h (module 'core'): ns3::TracedValue<unsigned short>::TracedValue() [constructor] cls.add_constructor([]) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned short>::TracedValue(ns3::TracedValue<unsigned short> const & o) [constructor] cls.add_constructor([param('ns3::TracedValue< unsigned short > const &', 'o')]) ## traced-value.h (module 'core'): ns3::TracedValue<unsigned short>::TracedValue(short unsigned int const & v) [constructor] cls.add
codeparrot/github-code-clean
# Copyright 2010-2011 OpenStack Foundation # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import collections import copy import datetime import uuid import fixtures import iso8601 import mock from mox3 import mox from oslo_config import cfg from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from oslo_utils import timeutils from six.moves import range import six.moves.urllib.parse as urlparse import testtools import webob from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack import compute from nova.api.openstack.compute import disk_config from nova.api.openstack.compute import extension_info from nova.api.openstack.compute import ips from nova.api.openstack.compute import keypairs from nova.api.openstack.compute.schemas import servers as servers_schema from nova.api.openstack.compute import servers from nova.api.openstack.compute import views from nova.api.openstack import extensions from nova.api.openstack import wsgi as os_wsgi from nova import availability_zones from nova.compute import api as compute_api from nova.compute import flavors from nova.compute import task_states from nova.compute import vm_states from nova import context from nova import db from nova.db.sqlalchemy import models from nova import exception from nova.image import glance from nova.network import manager from nova.network.neutronv2 import api as neutron_api from nova import objects from nova.objects import instance as instance_obj from nova import policy from nova import test from nova.tests.unit.api.openstack import fakes from nova.tests.unit import fake_instance from nova.tests.unit import fake_network from nova.tests.unit.image import fake from nova.tests.unit import matchers from nova import utils as nova_utils CONF = cfg.CONF CONF.import_opt('password_length', 'nova.utils') FAKE_UUID = fakes.FAKE_UUID INSTANCE_IDS = {FAKE_UUID: 1} FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS def fake_gen_uuid(): return FAKE_UUID def return_servers_empty(context, *args, **kwargs): return objects.InstanceList(objects=[]) def instance_update_and_get_original(context, instance_uuid, values, columns_to_join=None, ): inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid), name=values.get('display_name')) inst = dict(inst, **values) return (inst, inst) def instance_update(context, instance_uuid, values): inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid), name=values.get('display_name')) inst = dict(inst, **values) return inst def fake_compute_api(cls, req, id): return True def fake_start_stop_not_ready(self, context, instance): raise exception.InstanceNotReady(instance_id=instance["uuid"]) def fake_start_stop_invalid_state(self, context, instance): raise exception.InstanceInvalidState( instance_uuid=instance['uuid'], attr='fake_attr', method='fake_method', state='fake_state') def fake_instance_get_by_uuid_not_found(context, uuid, columns_to_join, use_slave=False): raise exception.InstanceNotFound(instance_id=uuid) def fake_instance_get_all_with_locked(context, list_locked, **kwargs): obj_list = [] s_id = 0 for locked in list_locked: uuid = fakes.get_fake_uuid(locked) s_id = s_id + 1 kwargs['locked_by'] = None if locked == 'not_locked' else locked server = fakes.stub_instance_obj(context, id=s_id, uuid=uuid, **kwargs) obj_list.append(server) return objects.InstanceList(objects=obj_list) class MockSetAdminPassword(object): def __init__(self): self.instance_id = None self.password = None def __call__(self, context, instance_id, password): self.instance_id = instance_id self.password = password class Base64ValidationTest(test.TestCase): def setUp(self): super(Base64ValidationTest, self).setUp() ext_info = extension_info.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) def test_decode_base64(self): value = "A random string" result = self.controller._decode_base64(base64.b64encode(value)) self.assertEqual(result, value) def test_decode_base64_binary(self): value = "\x00\x12\x75\x99" result = self.controller._decode_base64(base64.b64encode(value)) self.assertEqual(result, value) def test_decode_base64_whitespace(self): value = "A random string" encoded = base64.b64encode(value) white = "\n \n%s\t%s\n" % (encoded[:2], encoded[2:]) result = self.controller._decode_base64(white) self.assertEqual(result, value) def test_decode_base64_invalid(self): invalid = "A random string" result = self.controller._decode_base64(invalid) self.assertIsNone(result) def test_decode_base64_illegal_bytes(self): value = "A random string" encoded = base64.b64encode(value) white = ">\x01%s*%s()" % (encoded[:2], encoded[2:]) result = self.controller._decode_base64(white) self.assertIsNone(result) class NeutronV2Subclass(neutron_api.API): """Used to ensure that API handles subclasses properly.""" pass class ControllerTest(test.TestCase): def setUp(self): super(ControllerTest, self).setUp() self.flags(verbose=True, use_ipv6=False) fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) fake.stub_out_image_service(self.stubs) return_server = fakes.fake_compute_get() return_servers = fakes.fake_compute_get_all() # Server sort keys extension is enabled in v21 so sort data is passed # to the instance API and the sorted DB API is invoked self.stubs.Set(compute_api.API, 'get_all', lambda api, *a, **k: return_servers(*a, **k)) self.stubs.Set(compute_api.API, 'get', lambda api, *a, **k: return_server(*a, **k)) self.stubs.Set(db, 'instance_update_and_get_original', instance_update_and_get_original) ext_info = extension_info.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) self.ips_controller = ips.IPsController() policy.reset() policy.init() fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs) class ServersControllerTest(ControllerTest): wsgi_api_version = os_wsgi.DEFAULT_API_VERSION def setUp(self): super(ServersControllerTest, self).setUp() CONF.set_override('host', 'localhost', group='glance') def req(self, url, use_admin_context=False): return fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context, version=self.wsgi_api_version) def test_requested_networks_prefix(self): uuid = 'br-00000000-0000-0000-0000-000000000000' requested_networks = [{'uuid': uuid}] res = self.controller._get_requested_networks(requested_networks) self.assertIn((uuid, None), res.as_tuples()) def test_requested_networks_neutronv2_enabled_with_port(self): self.flags(network_api_class='nova.network.neutronv2.api.API') port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'port': port}] res = self.controller._get_requested_networks(requested_networks) self.assertEqual([(None, None, port, None)], res.as_tuples()) def test_requested_networks_neutronv2_enabled_with_network(self): self.flags(network_api_class='nova.network.neutronv2.api.API') network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' requested_networks = [{'uuid': network}] res = self.controller._get_requested_networks(requested_networks) self.assertEqual([(network, None, None, None)], res.as_tuples()) def test_requested_networks_neutronv2_enabled_with_network_and_port(self): self.flags(network_api_class='nova.network.neutronv2.api.API') network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'uuid': network, 'port': port}] res = self.controller._get_requested_networks(requested_networks) self.assertEqual([(None, None, port, None)], res.as_tuples()) def test_requested_networks_with_duplicate_networks(self): # duplicate networks are allowed only for nova neutron v2.0 network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' requested_networks = [{'uuid': network}, {'uuid': network}] self.assertRaises( webob.exc.HTTPBadRequest, self.controller._get_requested_networks, requested_networks) def test_requested_networks_with_neutronv2_and_duplicate_networks(self): # duplicate networks are allowed only for nova neutron v2.0 self.flags(network_api_class='nova.network.neutronv2.api.API') network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' requested_networks = [{'uuid': network}, {'uuid': network}] res = self.controller._get_requested_networks(requested_networks) self.assertEqual([(network, None, None, None), (network, None, None, None)], res.as_tuples()) def test_requested_networks_neutronv2_enabled_conflict_on_fixed_ip(self): self.flags(network_api_class='nova.network.neutronv2.api.API') network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' addr = '10.0.0.1' requested_networks = [{'uuid': network, 'fixed_ip': addr, 'port': port}] self.assertRaises( webob.exc.HTTPBadRequest, self.controller._get_requested_networks, requested_networks) def test_requested_networks_neutronv2_disabled_with_port(self): port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'port': port}] self.assertRaises( webob.exc.HTTPBadRequest, self.controller._get_requested_networks, requested_networks) def test_requested_networks_api_enabled_with_v2_subclass(self): self.flags(network_api_class='nova.network.neutronv2.api.API') network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'uuid': network, 'port': port}] res = self.controller._get_requested_networks(requested_networks) self.assertEqual([(None, None, port, None)], res.as_tuples()) def test_requested_networks_neutronv2_subclass_with_port(self): cls = ('nova.tests.unit.api.openstack.compute.test_serversV21.' 'NeutronV2Subclass') self.flags(network_api_class=cls) port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'port': port}] res = self.controller._get_requested_networks(requested_networks) self.assertEqual([(None, None, port, None)], res.as_tuples()) def test_get_server_by_uuid(self): req = self.req('/fake/servers/%s' % FAKE_UUID) res_dict = self.controller.show(req, FAKE_UUID) self.assertEqual(res_dict['server']['id'], FAKE_UUID) def test_get_server_joins_pci_devices(self): def fake_get(_self, *args, **kwargs): expected_attrs = kwargs['expected_attrs'] self.assertEqual(['flavor', 'info_cache', 'metadata', 'pci_devices'], expected_attrs) ctxt = context.RequestContext('fake', 'fake') return fake_instance.fake_instance_obj( ctxt, expected_attrs=expected_attrs) self.stubs.Set(compute_api.API, 'get', fake_get) req = self.req('/fake/servers/%s' % FAKE_UUID) self.controller.show(req, FAKE_UUID) def test_unique_host_id(self): """Create two servers with the same host and different project_ids and check that the host_id's are unique. """ def return_instance_with_host(context, *args, **kwargs): project_id = str(uuid.uuid4()) return fakes.stub_instance_obj(context, id=1, uuid=FAKE_UUID, project_id=project_id, host='fake_host') self.stubs.Set(compute_api.API, 'get', return_instance_with_host) req = self.req('/fake/servers/%s' % FAKE_UUID) with mock.patch.object(compute_api.API, 'get') as mock_get: mock_get.side_effect = return_instance_with_host server1 = self.controller.show(req, FAKE_UUID) server2 = self.controller.show(req, FAKE_UUID) self.assertNotEqual(server1['server']['hostId'], server2['server']['hostId']) def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark, status="ACTIVE", progress=100): return { "server": { "id": uuid, "user_id": "fake_user", "tenant_id": "fake_project", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": progress, "name": "server2", "status": status, "hostId": '', "image": { "id": "10", "links": [ { "rel": "bookmark", "href": image_bookmark, }, ], }, "flavor": { "id": "2", "links": [ { "rel": "bookmark", "href": flavor_bookmark, }, ], }, "addresses": { 'test1': [ {'version': 4, 'addr': '192.168.1.100', 'OS-EXT-IPS:type': 'fixed', 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}, {'version': 6, 'addr': '2001:db8:0:1::1', 'OS-EXT-IPS:type': 'fixed', 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'} ] }, "metadata": { "seq": "2", }, "links": [ { "rel": "self", "href": "http://localhost/v2/fake/servers/%s" % uuid, }, { "rel": "bookmark", "href": "http://localhost/fake/servers/%s" % uuid, }, ], } } def test_get_server_by_id(self): self.flags(use_ipv6=True) image_bookmark = "http://localhost/fake/images/10" flavor_bookmark = "http://localhost/fake/flavors/2" uuid = FAKE_UUID req = self.req('/v2/fake/servers/%s' % uuid) res_dict = self.controller.show(req, uuid) expected_server = self._get_server_data_dict(uuid, image_bookmark, flavor_bookmark, status="BUILD", progress=0) expected_server['server']['name'] = 'server1' expected_server['server']['metadata']['seq'] = '1' self.assertThat(res_dict, matchers.DictMatches(expected_server)) def test_get_server_with_active_status_by_id(self): image_bookmark = "http://localhost/fake/images/10" flavor_bookmark = "http://localhost/fake/flavors/2" new_return_server = fakes.fake_compute_get( id=2, vm_state=vm_states.ACTIVE, progress=100) self.stubs.Set(compute_api.API, 'get', lambda api, *a, **k: new_return_server(*a, **k)) uuid = FAKE_UUID req = self.req('/fake/servers/%s' % uuid) res_dict = self.controller.show(req, uuid) expected_server = self._get_server_data_dict(uuid, image_bookmark, flavor_bookmark) self.assertThat(res_dict, matchers.DictMatches(expected_server)) def test_get_server_with_id_image_ref_by_id(self): image_ref = "10" image_bookmark = "http://localhost/fake/images/10" flavor_id = "1" flavor_bookmark = "http://localhost/fake/flavors/2" new_return_server = fakes.fake_compute_get( id=2, vm_state=vm_states.ACTIVE, image_ref=image_ref, flavor_id=flavor_id, progress=100) self.stubs.Set(compute_api.API, 'get', lambda api, *a, **k: new_return_server(*a, **k)) uuid = FAKE_UUID req = self.req('/fake/servers/%s' % uuid) res_dict = self.controller.show(req, uuid) expected_server = self._get_server_data_dict(uuid, image_bookmark, flavor_bookmark) self.assertThat(res_dict, matchers.DictMatches(expected_server)) def test_get_server_addresses_from_cache(self): pub0 = ('172.19.0.1', '172.19.0.2',) pub1 = ('1.2.3.4',) pub2 = ('b33f::fdee:ddff:fecc:bbaa',) priv0 = ('192.168.0.3', '192.168.0.4',) def _ip(ip): return {'address': ip, 'type': 'fixed'} nw_cache = [ {'address': 'aa:aa:aa:aa:aa:aa', 'id': 1, 'network': {'bridge': 'br0', 'id': 1, 'label': 'public', 'subnets': [{'cidr': '172.19.0.0/24', 'ips': [_ip(ip) for ip in pub0]}, {'cidr': '1.2.3.0/16', 'ips': [_ip(ip) for ip in pub1]}, {'cidr': 'b33f::/64', 'ips': [_ip(ip) for ip in pub2]}]}}, {'address': 'bb:bb:bb:bb:bb:bb', 'id': 2, 'network': {'bridge': 'br1', 'id': 2, 'label': 'private', 'subnets': [{'cidr': '192.168.0.0/24', 'ips': [_ip(ip) for ip in priv0]}]}}] return_server = fakes.fake_compute_get(nw_cache=nw_cache) self.stubs.Set(compute_api.API, 'get', lambda api, *a, **k: return_server(*a, **k)) req = self.req('/fake/servers/%s/ips' % FAKE_UUID) res_dict = self.ips_controller.index(req, FAKE_UUID) expected = { 'addresses': { 'private': [ {'version': 4, 'addr': '192.168.0.3'}, {'version': 4, 'addr': '192.168.0.4'}, ], 'public': [ {'version': 4, 'addr': '172.19.0.1'}, {'version': 4, 'addr': '172.19.0.2'}, {'version': 4, 'addr': '1.2.3.4'}, {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'}, ], }, } self.assertThat(res_dict, matchers.DictMatches(expected)) # Make sure we kept the addresses in order self.assertIsInstance(res_dict['addresses'], collections.OrderedDict) labels = [vif['network']['label'] for vif in nw_cache] for index, label in enumerate(res_dict['addresses'].keys()): self.assertEqual(label, labels[index]) def test_get_server_addresses_nonexistent_network(self): url = '/v2/fake/servers/%s/ips/network_0' % FAKE_UUID req = self.req(url) self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.show, req, FAKE_UUID, 'network_0') def test_get_server_addresses_nonexistent_server(self): def fake_instance_get(*args, **kwargs): raise exception.InstanceNotFound(instance_id='fake') self.stubs.Set(compute_api.API, 'get', fake_instance_get) server_id = str(uuid.uuid4()) req = self.req('/fake/servers/%s/ips' % server_id) self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.index, req, server_id) def test_get_server_list_empty(self): self.stubs.Set(compute_api.API, 'get_all', return_servers_empty) req = self.req('/fake/servers') res_dict = self.controller.index(req) num_servers = len(res_dict['servers']) self.assertEqual(0, num_servers) def test_get_server_list_with_reservation_id(self): req = self.req('/fake/servers?reservation_id=foo') res_dict = self.controller.index(req) i = 0 for s in res_dict['servers']: self.assertEqual(s.get('name'), 'server%d' % (i + 1)) i += 1 def test_get_server_list_with_reservation_id_empty(self): req = self.req('/fake/servers/detail?' 'reservation_id=foo') res_dict = self.controller.detail(req) i = 0 for s in res_dict['servers']: self.assertEqual(s.get('name'), 'server%d' % (i + 1)) i += 1 def test_get_server_list_with_reservation_id_details(self): req = self.req('/fake/servers/detail?' 'reservation_id=foo') res_dict = self.controller.detail(req) i = 0 for s in res_dict['servers']: self.assertEqual(s.get('name'), 'server%d' % (i + 1)) i += 1 def test_get_server_list(self): req = self.req('/fake/servers') res_dict = self.controller.index(req) self.assertEqual(len(res_dict['servers']), 5) for i, s in enumerate(res_dict['servers']): self.assertEqual(s['id'], fakes.get_fake_uuid(i)) self.assertEqual(s['name'], 'server%d' % (i + 1)) self.assertIsNone(s.get('image', None)) expected_links = [ { "rel": "self", "href": "http://localhost/v2/fake/servers/%s" % s['id'], }, { "rel": "bookmark", "href": "http://localhost/fake/servers/%s" % s['id'], }, ] self.assertEqual(s['links'], expected_links) def test_get_servers_with_limit(self): req = self.req('/fake/servers?limit=3') res_dict = self.controller.index(req) servers = res_dict['servers'] self.assertEqual([s['id'] for s in servers], [fakes.get_fake_uuid(i) for i in range(len(servers))]) servers_links = res_dict['servers_links'] self.assertEqual(servers_links[0]['rel'], 'next') href_parts = urlparse.urlparse(servers_links[0]['href']) self.assertEqual('/v2/fake/servers', href_parts.path) params = urlparse.parse_qs(href_parts.query) expected_params = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]} self.assertThat(params, matchers.DictMatches(expected_params)) def test_get_servers_with_limit_bad_value(self): req = self.req('/fake/servers?limit=aaa') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_get_server_details_empty(self): self.stubs.Set(compute_api.API, 'get_all', return_servers_empty) req = self.req('/fake/servers/detail') res_dict = self.controller.detail(req) num_servers = len(res_dict['servers']) self.assertEqual(0, num_servers) def test_get_server_details_with_limit(self): req = self.req('/fake/servers/detail?limit=3') res = self.controller.detail(req) servers = res['servers'] self.assertEqual([s['id'] for s in servers], [fakes.get_fake_uuid(i) for i in range(len(servers))]) servers_links = res['servers_links'] self.assertEqual(servers_links[0]['rel'], 'next') href_parts = urlparse.urlparse(servers_links[0]['href']) self.assertEqual('/v2/fake/servers/detail', href_parts.path) params = urlparse.parse_qs(href_parts.query) expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]} self.assertThat(params, matchers.DictMatches(expected)) def test_get_server_details_with_limit_bad_value(self): req = self.req('/fake/servers/detail?limit=aaa') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) def test_get_server_details_with_limit_and_other_params(self): req = self.req('/fake/servers/detail' '?limit=3&blah=2:t' '&sort_key=id1&sort_dir=asc') res = self.controller.detail(req) servers = res['servers'] self.assertEqual([s['id'] for s in servers], [fakes.get_fake_uuid(i) for i in range(len(servers))]) servers_links = res['servers_links'] self.assertEqual(servers_links[0]['rel'], 'next') href_parts = urlparse.urlparse(servers_links[0]['href']) self.assertEqual('/v2/fake/servers/detail', href_parts.path) params = urlparse.parse_qs(href_parts.query) expected = {'limit': ['3'], 'blah': ['2:t'], 'sort_key': ['id1'], 'sort_dir': ['asc'], 'marker': [fakes.get_fake_uuid(2)]} self.assertThat(params, matchers.DictMatches(expected)) def test_get_servers_with_too_big_limit(self): req = self.req('/fake/servers?limit=30') res_dict = self.controller.index(req) self.assertNotIn('servers_links', res_dict) def test_get_servers_with_bad_limit(self): req = self.req('/fake/servers?limit=asdf') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_get_servers_with_marker(self): url = '/v2/fake/servers?marker=%s' % fakes.get_fake_uuid(2) req = self.req(url) servers = self.controller.index(req)['servers'] self.assertEqual([s['name'] for s in servers], ["server4", "server5"]) def test_get_servers_with_limit_and_marker(self): url = ('/v2/fake/servers?limit=2&marker=%s' % fakes.get_fake_uuid(1)) req = self.req(url) servers = self.controller.index(req)['servers'] self.assertEqual([s['name'] for s in servers], ['server3', 'server4']) def test_get_servers_with_bad_marker(self): req = self.req('/fake/servers?limit=2&marker=asdf') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_get_servers_with_bad_option(self): server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): db_list = [fakes.stub_instance(100, uuid=server_uuid)] return instance_obj._make_instance_list( context, objects.InstanceList(), db_list, FIELDS) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?unknownoption=whee') servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) def test_get_servers_allows_image(self): server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIsNotNone(search_opts) self.assertIn('image', search_opts) self.assertEqual(search_opts['image'], '12345') db_list = [fakes.stub_instance(100, uuid=server_uuid)] return instance_obj._make_instance_list( context, objects.InstanceList(), db_list, FIELDS) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?image=12345') servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) def test_tenant_id_filter_no_admin_context(self): def fake_get_all(context, search_opts=None, **kwargs): self.assertNotEqual(search_opts, None) self.assertEqual(search_opts['project_id'], 'fake') return [fakes.stub_instance_obj(100)] req = self.req('/fake/servers?tenant_id=newfake') with mock.patch.object(compute_api.API, 'get_all') as mock_get: mock_get.side_effect = fake_get_all servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) def test_all_tenants_param_normal(self): def fake_get_all(context, search_opts=None, **kwargs): self.assertNotIn('project_id', search_opts) return [fakes.stub_instance_obj(100)] req = self.req('/fake/servers?all_tenants', use_admin_context=True) with mock.patch.object(compute_api.API, 'get_all') as mock_get: mock_get.side_effect = fake_get_all servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) def test_all_tenants_param_one(self): def fake_get_all(api, context, search_opts=None, **kwargs): self.assertNotIn('project_id', search_opts) return [fakes.stub_instance_obj(100)] self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?all_tenants=1', use_admin_context=True) servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) def test_all_tenants_param_zero(self): def fake_get_all(api, context, search_opts=None, **kwargs): self.assertNotIn('all_tenants', search_opts) return [fakes.stub_instance_obj(100)] self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?all_tenants=0', use_admin_context=True) servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) def test_all_tenants_param_false(self): def fake_get_all(api, context, search_opts=None, **kwargs): self.assertNotIn('all_tenants', search_opts) return [fakes.stub_instance_obj(100)] self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?all_tenants=false', use_admin_context=True) servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) def test_all_tenants_param_invalid(self): def fake_get_all(api, context, search_opts=None, **kwargs): self.assertNotIn('all_tenants', search_opts) return [fakes.stub_instance_obj(100)] self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?all_tenants=xxx', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_admin_restricted_tenant(self): def fake_get_all(api, context, search_opts=None, **kwargs): self.assertIsNotNone(search_opts) self.assertEqual(search_opts['project_id'], 'fake') return [fakes.stub_instance_obj(100)] self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers', use_admin_context=True) servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) def test_all_tenants_pass_policy(self): def fake_get_all(api, context, search_opts=None, **kwargs): self.assertIsNotNone(search_opts) self.assertNotIn('project_id', search_opts) self.assertTrue(context.is_admin) return [fakes.stub_instance_obj(100)] self.stubs.Set(compute_api.API, 'get_all', fake_get_all) rules = { "os_compute_api:servers:index": "project_id:fake", "os_compute_api:servers:index:get_all_tenants": "project_id:fake" } policy.set_rules(oslo_policy.Rules.from_dict(rules)) req = self.req('/fake/servers?all_tenants=1') servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) def test_all_tenants_fail_policy(self): def fake_get_all(api, context, search_opts=None, **kwargs): self.assertIsNotNone(search_opts) return [fakes.stub_instance_obj(100)] rules = { "os_compute_api:servers:index:get_all_tenants": "project_id:non_fake", "os_compute_api:servers:get_all": "project_id:fake", } policy.set_rules(oslo_policy.Rules.from_dict(rules)) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?all_tenants=1') self.assertRaises(exception.PolicyNotAuthorized, self.controller.index, req) def test_get_servers_allows_flavor(self): server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIsNotNone(search_opts) self.assertIn('flavor', search_opts) # flavor is an integer ID self.assertEqual(search_opts['flavor'], '12345') return objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid)]) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?flavor=12345') servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) def test_get_servers_with_bad_flavor(self): req = self.req('/fake/servers?flavor=abcde') with mock.patch.object(compute_api.API, 'get_all') as mock_get: mock_get.return_value = objects.InstanceList(objects=[]) servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 0) def test_get_server_details_with_bad_flavor(self): req = self.req('/fake/servers?flavor=abcde') with mock.patch.object(compute_api.API, 'get_all') as mock_get: mock_get.return_value = objects.InstanceList(objects=[]) servers = self.controller.detail(req)['servers'] self.assertThat(servers, testtools.matchers.HasLength(0)) def test_get_servers_allows_status(self): server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIsNotNone(search_opts) self.assertIn('vm_state', search_opts) self.assertEqual(search_opts['vm_state'], [vm_states.ACTIVE]) return objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid)]) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?status=active') servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) def test_get_servers_allows_task_status(self): server_uuid = str(uuid.uuid4()) task_state = task_states.REBOOTING def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIsNotNone(search_opts) self.assertIn('task_state', search_opts) self.assertEqual([task_states.REBOOT_PENDING, task_states.REBOOT_STARTED, task_states.REBOOTING], search_opts['task_state']) return objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid, task_state=task_state)]) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?status=reboot') servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) def test_get_servers_resize_status(self): # Test when resize status, it maps list of vm states. server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIn('vm_state', search_opts) self.assertEqual(search_opts['vm_state'], [vm_states.ACTIVE, vm_states.STOPPED]) return objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid)]) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?status=resize') servers = self.controller.detail(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) def test_get_servers_invalid_status(self): # Test getting servers by invalid status. req = self.req('/fake/servers?status=baloney', use_admin_context=False) servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 0) def test_get_servers_deleted_status_as_user(self): req = self.req('/fake/servers?status=deleted', use_admin_context=False) self.assertRaises(webob.exc.HTTPForbidden, self.controller.detail, req) def test_get_servers_deleted_status_as_admin(self): server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIn('vm_state', search_opts) self.assertEqual(search_opts['vm_state'], ['deleted']) return objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid)]) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?status=deleted', use_admin_context=True) servers = self.controller.detail(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) @mock.patch.object(compute_api.API, 'get_all') def test_get_servers_deleted_filter_str_to_bool(self, mock_get_all): server_uuid = str(uuid.uuid4()) db_list = objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid, vm_state='deleted')]) mock_get_all.return_value = db_list req = self.req('/fake/servers?deleted=true', use_admin_context=True) servers = self.controller.detail(req)['servers'] self.assertEqual(1, len(servers)) self.assertEqual(server_uuid, servers[0]['id']) # Assert that 'deleted' filter value is converted to boolean # while calling get_all() method. expected_search_opts = {'deleted': True, 'project_id': 'fake'} mock_get_all.assert_called_once_with( mock.ANY, search_opts=expected_search_opts, limit=mock.ANY, expected_attrs=['flavor', 'info_cache', 'metadata', 'pci_devices'], marker=mock.ANY, want_objects=mock.ANY, sort_keys=mock.ANY, sort_dirs=mock.ANY) @mock.patch.object(compute_api.API, 'get_all') def test_get_servers_deleted_filter_invalid_str(self, mock_get_all): server_uuid = str(uuid.uuid4()) db_list = objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid)]) mock_get_all.return_value = db_list req = fakes.HTTPRequest.blank('/fake/servers?deleted=abc', use_admin_context=True) servers = self.controller.detail(req)['servers'] self.assertEqual(1, len(servers)) self.assertEqual(server_uuid, servers[0]['id']) # Assert that invalid 'deleted' filter value is converted to boolean # False while calling get_all() method. expected_search_opts = {'deleted': False, 'project_id': 'fake'} mock_get_all.assert_called_once_with( mock.ANY, search_opts=expected_search_opts, limit=mock.ANY, expected_attrs=['flavor', 'info_cache', 'metadata', 'pci_devices'], marker=mock.ANY, want_objects=mock.ANY, sort_keys=mock.ANY, sort_dirs=mock.ANY) def test_get_servers_allows_name(self): server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIsNotNone(search_opts) self.assertIn('name', search_opts) self.assertEqual(search_opts['name'], 'whee.*') self.assertEqual(['pci_devices'], expected_attrs) return objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid)]) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?name=whee.*') servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) @mock.patch.object(compute_api.API, 'get_all') def test_get_servers_flavor_not_found(self, get_all_mock): get_all_mock.side_effect = exception.FlavorNotFound(flavor_id=1) req = fakes.HTTPRequest.blank( '/fake/servers?status=active&flavor=abc') servers = self.controller.index(req)['servers'] self.assertEqual(0, len(servers)) def test_get_servers_allows_changes_since(self): server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIsNotNone(search_opts) self.assertIn('changes-since', search_opts) changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1, tzinfo=iso8601.iso8601.UTC) self.assertEqual(search_opts['changes-since'], changes_since) self.assertNotIn('deleted', search_opts) return objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid)]) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) params = 'changes-since=2011-01-24T17:08:01Z' req = self.req('/fake/servers?%s' % params) servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) def test_get_servers_allows_changes_since_bad_value(self): params = 'changes-since=asdf' req = self.req('/fake/servers?%s' % params) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_get_servers_admin_filters_as_user(self): """Test getting servers by admin-only or unknown options when context is not admin. Make sure the admin and unknown options are stripped before they get to compute_api.get_all() """ server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIsNotNone(search_opts) # Allowed by user self.assertIn('name', search_opts) self.assertIn('ip', search_opts) # OSAPI converts status to vm_state self.assertIn('vm_state', search_opts) # Allowed only by admins with admin API on self.assertNotIn('unknown_option', search_opts) return objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid)]) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) query_str = "name=foo&ip=10.*&status=active&unknown_option=meow" req = fakes.HTTPRequest.blank('/fake/servers?%s' % query_str) res = self.controller.index(req) servers = res['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) def test_get_servers_admin_options_as_admin(self): """Test getting servers by admin-only or unknown options when context is admin. All options should be passed """ server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIsNotNone(search_opts) # Allowed by user self.assertIn('name', search_opts) # OSAPI converts status to vm_state self.assertIn('vm_state', search_opts) # Allowed only by admins with admin API on self.assertIn('ip', search_opts) self.assertIn('unknown_option', search_opts) return objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid)]) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) query_str = "name=foo&ip=10.*&status=active&unknown_option=meow" req = self.req('/fake/servers?%s' % query_str, use_admin_context=True) servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) def test_get_servers_allows_ip(self): """Test getting servers by ip.""" server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIsNotNone(search_opts) self.assertIn('ip', search_opts) self.assertEqual(search_opts['ip'], '10\..*') return objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid)]) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?ip=10\..*') servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) def test_get_servers_admin_allows_ip6(self): """Test getting servers by ip6 with admin_api enabled and admin context """ server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIsNotNone(search_opts) self.assertIn('ip6', search_opts) self.assertEqual(search_opts['ip6'], 'ffff.*') return objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid)]) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?ip6=ffff.*', use_admin_context=True) servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) def test_get_servers_allows_ip6_with_new_version(self): """Test getting servers by ip6 with new version requested and no admin context """ server_uuid = str(uuid.uuid4()) def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertIsNotNone(search_opts) self.assertIn('ip6', search_opts) self.assertEqual(search_opts['ip6'], 'ffff.*') return objects.InstanceList( objects=[fakes.stub_instance_obj(100, uuid=server_uuid)]) self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers?ip6=ffff.*') req.api_version_request = api_version_request.APIVersionRequest('2.5') servers = self.controller.index(req)['servers'] self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) def test_get_all_server_details(self): expected_flavor = { "id": "2", "links": [ { "rel": "bookmark", "href": 'http://localhost/fake/flavors/2', }, ], } expected_image = { "id": "10", "links": [ { "rel": "bookmark", "href": 'http://localhost/fake/images/10', }, ], } req = self.req('/fake/servers/detail') res_dict = self.controller.detail(req) for i, s in enumerate(res_dict['servers']): self.assertEqual(s['id'], fakes.get_fake_uuid(i)) self.assertEqual(s['hostId'], '') self.assertEqual(s['name'], 'server%d' % (i + 1)) self.assertEqual(s['image'], expected_image) self.assertEqual(s['flavor'], expected_flavor) self.assertEqual(s['status'], 'BUILD') self.assertEqual(s['metadata']['seq'], str(i + 1)) def test_get_all_server_details_with_host(self): """We want to make sure that if two instances are on the same host, then they return the same hostId. If two instances are on different hosts, they should return different hostIds. In this test, there are 5 instances - 2 on one host and 3 on another. """ def return_servers_with_host(*args, **kwargs): return objects.InstanceList( objects=[fakes.stub_instance_obj(None, id=i + 1, user_id='fake', project_id='fake', host=i % 2, uuid=fakes.get_fake_uuid(i)) for i in range(5)]) self.stubs.Set(compute_api.API, 'get_all', return_servers_with_host) req = self.req('/fake/servers/detail') res_dict = self.controller.detail(req) server_list = res_dict['servers'] host_ids = [server_list[0]['hostId'], server_list[1]['hostId']] self.assertTrue(host_ids[0] and host_ids[1]) self.assertNotEqual(host_ids[0], host_ids[1]) for i, s in enumerate(server_list): self.assertEqual(s['id'], fakes.get_fake_uuid(i)) self.assertEqual(s['hostId'], host_ids[i % 2]) self.assertEqual(s['name'], 'server%d' % (i + 1)) def test_get_servers_joins_pci_devices(self): def fake_get_all(compute_self, context, search_opts=None, limit=None, marker=None, want_objects=False, expected_attrs=None, sort_keys=None, sort_dirs=None): self.assertEqual(['pci_devices'], expected_attrs) return [] self.stubs.Set(compute_api.API, 'get_all', fake_get_all) req = self.req('/fake/servers', use_admin_context=True) self.assertIn('servers', self.controller.index(req)) class ServersControllerTestV29(ServersControllerTest): wsgi_api_version = '2.9' def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark, status="ACTIVE", progress=100): server_dict = super(ServersControllerTestV29, self)._get_server_data_dict(uuid, image_bookmark, flavor_bookmark, status, progress) server_dict['server']['locked'] = False return server_dict @mock.patch.object(compute_api.API, 'get') def _test_get_server_with_lock(self, locked_by, get_mock): image_bookmark = "http://localhost/fake/images/10" flavor_bookmark = "http://localhost/fake/flavors/2" uuid = FAKE_UUID get_mock.side_effect = fakes.fake_compute_get(id=2, locked_by=locked_by, uuid=uuid) req = self.req('/fake/servers/%s' % uuid) res_dict = self.controller.show(req, uuid) expected_server = self._get_server_data_dict(uuid, image_bookmark, flavor_bookmark, status="BUILD", progress=0) expected_server['server']['locked'] = True if locked_by else False self.assertThat(res_dict, matchers.DictMatches(expected_server)) return res_dict def test_get_server_with_locked_by_admin(self): res_dict = self._test_get_server_with_lock('admin') self.assertTrue(res_dict['server']['locked']) def test_get_server_with_locked_by_owner(self): res_dict = self._test_get_server_with_lock('owner') self.assertTrue(res_dict['server']['locked']) def test_get_server_not_locked(self): res_dict = self._test_get_server_with_lock(None) self.assertFalse(res_dict['server']['locked']) @mock.patch.object(compute_api.API, 'get_all') def _test_list_server_detail_with_lock(self, s1_locked, s2_locked, get_all_mock): get_all_mock.return_value = fake_instance_get_all_with_locked( context, [s1_locked, s2_locked]) req = self.req('/fake/servers/detail') servers_list = self.controller.detail(req) # Check that each returned server has the same 'locked' value # and 'id' as they were created. for locked in [s1_locked, s2_locked]: server = next(server for server in servers_list['servers'] if (server['id'] == fakes.get_fake_uuid(locked))) expected = False if locked == 'not_locked' else True self.assertEqual(expected, server['locked']) def test_list_server_detail_with_locked_s1_admin_s2_owner(self): self._test_list_server_detail_with_lock('admin', 'owner') def test_list_server_detail_with_locked_s1_owner_s2_admin(self): self._test_list_server_detail_with_lock('owner', 'admin') def test_list_server_detail_with_locked_s1_admin_s2_admin(self): self._test_list_server_detail_with_lock('admin', 'admin') def test_list_server_detail_with_locked_s1_admin_s2_not_locked(self): self._test_list_server_detail_with_lock('admin', 'not_locked') def test_list_server_detail_with_locked_s1_s2_not_locked(self): self._test_list_server_detail_with_lock('not_locked', 'not_locked') @mock.patch.object(compute_api.API, 'get_all') def test_get_servers_remove_non_search_options(self, get_all_mock): req = fakes.HTTPRequestV21.blank('/servers' '?sort_key=id1&sort_dir=asc' '&sort_key=id2&sort_dir=desc' '&limit=1&marker=123', use_admin_context=True) self.controller.index(req) kwargs = get_all_mock.call_args[1] search_opts = kwargs['search_opts'] for key in ('sort_key', 'sort_dir', 'limit', 'marker'): self.assertNotIn(key, search_opts) class ServersControllerDeleteTest(ControllerTest): def setUp(self): super(ServersControllerDeleteTest, self).setUp() self.server_delete_called = False def fake_delete(api, context, instance): if instance.uuid == 'non-existent-uuid': raise exception.InstanceNotFound(instance_id=instance.uuid) self.server_delete_called = True self.stubs.Set(compute_api.API, 'delete', fake_delete) def _create_delete_request(self, uuid): fakes.stub_out_instance_quota(self.stubs, 0, 10) req = fakes.HTTPRequestV21.blank('/fake/servers/%s' % uuid) req.method = 'DELETE' return req def _delete_server_instance(self, uuid=FAKE_UUID): req = self._create_delete_request(uuid) fake_get = fakes.fake_compute_get(uuid=uuid, vm_state=vm_states.ACTIVE) self.stubs.Set(compute_api.API, 'get', lambda api, *a, **k: fake_get(*a, **k)) self.controller.delete(req, uuid) def test_delete_server_instance(self): self._delete_server_instance() self.assertTrue(self.server_delete_called) def test_delete_server_instance_not_found(self): self.assertRaises(webob.exc.HTTPNotFound, self._delete_server_instance, uuid='non-existent-uuid') def test_delete_server_instance_while_building(self): req = self._create_delete_request(FAKE_UUID) self.controller.delete(req, FAKE_UUID) self.assertTrue(self.server_delete_called) def test_delete_locked_server(self): req = self._create_delete_request(FAKE_UUID) self.stubs.Set(compute_api.API, 'soft_delete', fakes.fake_actions_to_locked_server) self.stubs.Set(compute_api.API, 'delete', fakes.fake_actions_to_locked_server) self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, req, FAKE_UUID) def test_delete_server_instance_while_resize(self): req = self._create_delete_request(FAKE_UUID) fake_get = fakes.fake_compute_get(vm_state=vm_states.ACTIVE, task_state=task_states.RESIZE_PREP) self.stubs.Set(compute_api.API, 'get', lambda api, *a, **k: fake_get(*a, **k)) self.controller.delete(req, FAKE_UUID) def test_delete_server_instance_if_not_launched(self): self.flags(reclaim_instance_interval=3600) req = fakes.HTTPRequestV21.blank('/fake/servers/%s' % FAKE_UUID) req.method = 'DELETE' self.server_delete_called = False fake_get = fakes.fake_compute_get(launched_at=None) self.stubs.Set(compute_api.API, 'get', lambda api, *a, **k: fake_get(*a, **k)) def instance_destroy_mock(*args, **kwargs): self.server_delete_called = True deleted_at = timeutils.utcnow() return fake_instance.fake_db_instance(deleted_at=deleted_at) self.stubs.Set(db, 'instance_destroy', instance_destroy_mock) self.controller.delete(req, FAKE_UUID) # delete() should be called for instance which has never been active, # even if reclaim_instance_interval has been set. self.assertEqual(self.server_delete_called, True) class ServersControllerRebuildInstanceTest(ControllerTest): image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' image_href = 'http://localhost/v2/fake/images/%s' % image_uuid def setUp(self): super(ServersControllerRebuildInstanceTest, self).setUp() def fake_get(ctrl, ctxt, uuid): if uuid == 'test_inst': raise webob.exc.HTTPNotFound(explanation='fakeout') return fakes.stub_instance_obj(None, vm_state=vm_states.ACTIVE) self.useFixture( fixtures.MonkeyPatch('nova.api.openstack.compute.servers.' 'ServersController._get_instance', fake_get)) fake_get = fakes.fake_compute_get(vm_state=vm_states.ACTIVE) self.stubs.Set(compute_api.API, 'get', lambda api, *a, **k: fake_get(*a, **k)) self.body = { 'rebuild': { 'name': 'new_name', 'imageRef': self.image_href, 'metadata': { 'open': 'stack', }, }, } self.req = fakes.HTTPRequest.blank('/fake/servers/a/action') self.req.method = 'POST' self.req.headers["content-type"] = "application/json" def test_rebuild_instance_name_with_spaces_in_the_middle(self): self.body['rebuild']['name'] = 'abc def' self.req.body = jsonutils.dumps(self.body) self.controller._action_rebuild(self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_name_with_leading_trailing_spaces(self): self.body['rebuild']['name'] = ' abc def ' self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller._action_rebuild, self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_name_with_leading_trailing_spaces_compat_mode( self): self.body['rebuild']['name'] = ' abc def ' self.req.body = jsonutils.dumps(self.body) self.req.set_legacy_v2() def fake_rebuild(*args, **kwargs): self.assertEqual('abc def', kwargs['display_name']) with mock.patch.object(compute_api.API, 'rebuild') as mock_rebuild: mock_rebuild.side_effect = fake_rebuild self.controller._action_rebuild(self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_with_blank_metadata_key(self): self.body['rebuild']['metadata'][''] = 'world' self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller._action_rebuild, self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_with_metadata_key_too_long(self): self.body['rebuild']['metadata'][('a' * 260)] = 'world' self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller._action_rebuild, self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_with_metadata_value_too_long(self): self.body['rebuild']['metadata']['key1'] = ('a' * 260) self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller._action_rebuild, self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_with_metadata_value_not_string(self): self.body['rebuild']['metadata']['key1'] = 1 self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller._action_rebuild, self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_fails_when_min_ram_too_small(self): # make min_ram larger than our instance ram size def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='active', properties={'key1': 'value1'}, min_ram="4096", min_disk="10") self.stubs.Set(fake._FakeImageService, 'show', fake_get_image) self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._action_rebuild, self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_fails_when_min_disk_too_small(self): # make min_disk larger than our instance disk size def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='active', properties={'key1': 'value1'}, min_ram="128", min_disk="100000") self.stubs.Set(fake._FakeImageService, 'show', fake_get_image) self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._action_rebuild, self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_image_too_large(self): # make image size larger than our instance disk size size = str(1000 * (1024 ** 3)) def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='active', size=size) self.stubs.Set(fake._FakeImageService, 'show', fake_get_image) self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._action_rebuild, self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_name_all_blank(self): def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='active') self.stubs.Set(fake._FakeImageService, 'show', fake_get_image) self.body['rebuild']['name'] = ' ' self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller._action_rebuild, self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_with_deleted_image(self): def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='DELETED') self.stubs.Set(fake._FakeImageService, 'show', fake_get_image) self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._action_rebuild, self.req, FAKE_UUID, body=self.body) def test_rebuild_instance_onset_file_limit_over_quota(self): def fake_get_image(self, context, image_href, **kwargs): return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', name='public image', is_public=True, status='active') with test.nested( mock.patch.object(fake._FakeImageService, 'show', side_effect=fake_get_image), mock.patch.object(self.controller.compute_api, 'rebuild', side_effect=exception.OnsetFileLimitExceeded) ) as ( show_mock, rebuild_mock ): self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPForbidden, self.controller._action_rebuild, self.req, FAKE_UUID, body=self.body) def test_rebuild_bad_personality(self): body = { "rebuild": { "imageRef": self.image_href, "personality": [{ "path": "/path/to/file", "contents": "INVALID b64", }] }, } self.assertRaises(exception.ValidationError, self.controller._action_rebuild, self.req, FAKE_UUID, body=body) def test_rebuild_personality(self): body = { "rebuild": { "imageRef": self.image_href, "personality": [{ "path": "/path/to/file", "contents": base64.b64encode("Test String"), }] }, } body = self.controller._action_rebuild(self.req, FAKE_UUID, body=body).obj self.assertNotIn('personality', body['server']) def test_start(self): self.mox.StubOutWithMock(compute_api.API, 'start') compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID) body = dict(start="") self.controller._start_server(req, FAKE_UUID, body) def test_start_policy_failed(self): rules = { "os_compute_api:servers:start": "project_id:non_fake" } policy.set_rules(oslo_policy.Rules.from_dict(rules)) req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID) body = dict(start="") exc = self.assertRaises(exception.PolicyNotAuthorized, self.controller._start_server, req, FAKE_UUID, body) self.assertIn("os_compute_api:servers:start", exc.format_message()) def test_start_not_ready(self): self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready) req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID) body = dict(start="") self.assertRaises(webob.exc.HTTPConflict, self.controller._start_server, req, FAKE_UUID, body) def test_start_locked_server(self): self.stubs.Set(compute_api.API, 'start', fakes.fake_actions_to_locked_server) req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID) body = dict(start="") self.assertRaises(webob.exc.HTTPConflict, self.controller._start_server, req, FAKE_UUID, body) def test_start_invalid(self): self.stubs.Set(compute_api.API, 'start', fake_start_stop_invalid_state) req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID) body = dict(start="") self.assertRaises(webob.exc.HTTPConflict, self.controller._start_server, req, FAKE_UUID, body) def test_stop(self): self.mox.StubOutWithMock(compute_api.API, 'stop') compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID) body = dict(stop="") self.controller._stop_server(req, FAKE_UUID, body) def test_stop_policy_failed(self): rules = { "os_compute_api:servers:stop": "project_id:non_fake" } policy.set_rules(oslo_policy.Rules.from_dict(rules)) req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID) body = dict(stop='') exc = self.assertRaises(exception.PolicyNotAuthorized, self.controller._stop_server, req, FAKE_UUID, body) self.assertIn("os_compute_api:servers:stop", exc.format_message()) def test_stop_not_ready(self): self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready) req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID) body = dict(stop="") self.assertRaises(webob.exc.HTTPConflict, self.controller._stop_server, req, FAKE_UUID, body) def test_stop_locked_server(self): self.stubs.Set(compute_api.API, 'stop', fakes.fake_actions_to_locked_server) req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID) body = dict(stop="") self.assertRaises(webob.exc.HTTPConflict, self.controller._stop_server, req, FAKE_UUID, body) def test_stop_invalid_state(self): self.stubs.Set(compute_api.API, 'stop', fake_start_stop_invalid_state) req = fakes.HTTPRequestV21.blank('/fake/servers/%s/action' % FAKE_UUID) body = dict(start="") self.assertRaises(webob.exc.HTTPConflict, self.controller._stop_server, req, FAKE_UUID, body) def test_start_with_bogus_id(self): self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid_not_found) req = fakes.HTTPRequestV21.blank('/fake/servers/test_inst/action') body = dict(start="") self.assertRaises(webob.exc.HTTPNotFound, self.controller._start_server, req, 'test_inst', body) def test_stop_with_bogus_id(self): self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid_not_found) req = fakes.HTTPRequestV21.blank('/fake/servers/test_inst/action') body = dict(stop="") self.assertRaises(webob.exc.HTTPNotFound, self.controller._stop_server, req, 'test_inst', body) class ServersControllerUpdateTest(ControllerTest): def _get_request(self, body=None, options=None): if options: fake_get = fakes.fake_compute_get(**options) self.stubs.Set(compute_api.API, 'get', lambda api, *a, **k: fake_get(*a, **k)) req = fakes.HTTPRequestV21.blank('/fake/servers/%s' % FAKE_UUID) req.method = 'PUT' req.content_type = 'application/json' req.body = jsonutils.dumps(body) return req def test_update_server_all_attributes(self): body = {'server': { 'name': 'server_test', }} req = self._get_request(body, {'name': 'server_test'}) res_dict = self.controller.update(req, FAKE_UUID, body=body) self.assertEqual(res_dict['server']['id'], FAKE_UUID) self.assertEqual(res_dict['server']['name'], 'server_test') def test_update_server_name(self): body = {'server': {'name': 'server_test'}} req = self._get_request(body, {'name': 'server_test'}) res_dict = self.controller.update(req, FAKE_UUID, body=body) self.assertEqual(res_dict['server']['id'], FAKE_UUID) self.assertEqual(res_dict['server']['name'], 'server_test') def test_update_server_name_too_long(self): body = {'server': {'name': 'x' * 256}} req = self._get_request(body, {'name': 'server_test'}) self.assertRaises(exception.ValidationError, self.controller.update, req, FAKE_UUID, body=body) def test_update_server_name_all_blank_spaces(self): self.stubs.Set(db, 'instance_get', fakes.fake_instance_get(name='server_test')) req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID) req.method = 'PUT' req.content_type = 'application/json' body = {'server': {'name': ' ' * 64}} req.body = jsonutils.dumps(body) self.assertRaises(exception.ValidationError, self.controller.update, req, FAKE_UUID, body=body) def test_update_server_name_with_spaces_in_the_middle(self): self.stubs.Set(db, 'instance_get', fakes.fake_instance_get(name='server_test')) req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID) req.method = 'PUT' req.content_type = 'application/json' body = {'server': {'name': 'abc def'}} req.body = jsonutils.dumps(body) self.controller.update(req, FAKE_UUID, body=body) def test_update_server_name_with_leading_trailing_spaces(self): self.stubs.Set(db, 'instance_get', fakes.fake_instance_get(name='server_test')) req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID) req.method = 'PUT' req.content_type = 'application/json' body = {'server': {'name': ' abc def '}} req.body = jsonutils.dumps(body) self.assertRaises(exception.ValidationError, self.controller.update, req, FAKE_UUID, body=body) def test_update_server_name_with_leading_trailing_spaces_compat_mode(self): self.stubs.Set(db, 'instance_get', fakes.fake_instance_get(name='server_test')) req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID) req.method = 'PUT' req.content_type = 'application/json' body = {'server': {'name': ' abc def '}} req.body = jsonutils.dumps(body) req.set_legacy_v2() self.controller.update(req, FAKE_UUID, body=body) def test_update_server_admin_password_extra_arg(self): inst_dict = dict(name='server_test', admin_password='bacon') body = dict(server=inst_dict) req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID) req.method = 'PUT' req.content_type = "application/json" req.body = jsonutils.dumps(body) self.assertRaises(exception.ValidationError, self.controller.update, req, FAKE_UUID, body=body) def test_update_server_host_id(self): inst_dict = dict(host_id='123') body = dict(server=inst_dict) req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID) req.method = 'PUT' req.content_type = "application/json" req.body = jsonutils.dumps(body) self.assertRaises(exception.ValidationError, self.controller.update, req, FAKE_UUID, body=body) def test_update_server_not_found(self): def fake_get(*args, **kwargs): raise exception.InstanceNotFound(instance_id='fake') self.stubs.Set(compute_api.API, 'get', fake_get) body = {'server': {'name': 'server_test'}} req = self._get_request(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, FAKE_UUID, body=body) def test_update_server_not_found_on_update(self): def fake_update(*args, **kwargs): raise exception.InstanceNotFound(instance_id='fake') self.stubs.Set(db, 'instance_update_and_get_original', fake_update) body = {'server': {'name': 'server_test'}} req = self._get_request(body) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req, FAKE_UUID, body=body) def test_update_server_policy_fail(self): rule = {'compute:update': 'role:admin'} policy.set_rules(oslo_policy.Rules.from_dict(rule)) body = {'server': {'name': 'server_test'}} req = self._get_request(body, {'name': 'server_test'}) self.assertRaises(exception.PolicyNotAuthorized, self.controller.update, req, FAKE_UUID, body=body) class ServerStatusTest(test.TestCase): def setUp(self): super(ServerStatusTest, self).setUp() fakes.stub_out_nw_api(self.stubs) ext_info = extension_info.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) def _get_with_state(self, vm_state, task_state=None): self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get(vm_state=vm_state, task_state=task_state)) request = fakes.HTTPRequestV21.blank('/fake/servers/%s' % FAKE_UUID) return self.controller.show(request, FAKE_UUID) def test_active(self): response = self._get_with_state(vm_states.ACTIVE) self.assertEqual(response['server']['status'], 'ACTIVE') def test_reboot(self): response = self._get_with_state(vm_states.ACTIVE, task_states.REBOOTING) self.assertEqual(response['server']['status'], 'REBOOT') def test_reboot_hard(self): response = self._get_with_state(vm_states.ACTIVE, task_states.REBOOTING_HARD) self.assertEqual(response['server']['status'], 'HARD_REBOOT') def test_reboot_resize_policy_fail(self): def fake_get_server(context, req, id): return fakes.stub_instance(id) self.stubs.Set(self.controller, '_get_server', fake_get_server) rule = {'compute:reboot': 'role:admin'} policy.set_rules(oslo_policy.Rules.from_dict(rule)) req = fakes.HTTPRequestV21.blank('/fake/servers/1234/action') self.assertRaises(exception.PolicyNotAuthorized, self.controller._action_reboot, req, '1234', body={'reboot': {'type': 'HARD'}}) def test_rebuild(self): response = self._get_with_state(vm_states.ACTIVE, task_states.REBUILDING) self.assertEqual(response['server']['status'], 'REBUILD') def test_rebuild_error(self): response = self._get_with_state(vm_states.ERROR) self.assertEqual(response['server']['status'], 'ERROR') def test_resize(self): response = self._get_with_state(vm_states.ACTIVE, task_states.RESIZE_PREP) self.assertEqual(response['server']['status'], 'RESIZE') def test_confirm_resize_policy_fail(self): def fake_get_server(context, req, id): return fakes.stub_instance(id) self.stubs.Set(self.controller, '_get_server', fake_get_server) rule = {'compute:confirm_resize': 'role:admin'} policy.set_rules(oslo_policy.Rules.from_dict(rule)) req = fakes.HTTPRequestV21.blank('/fake/servers/1234/action') self.assertRaises(exception.PolicyNotAuthorized, self.controller._action_confirm_resize, req, '1234', {}) def test_verify_resize(self): response = self._get_with_state(vm_states.RESIZED, None) self.assertEqual(response['server']['status'], 'VERIFY_RESIZE') def test_revert_resize(self): response = self._get_with_state(vm_states.RESIZED, task_states.RESIZE_REVERTING) self.assertEqual(response['server']['status'], 'REVERT_RESIZE') def test_revert_resize_policy_fail(self): def fake_get_server(context, req, id): return fakes.stub_instance(id) self.stubs.Set(self.controller, '_get_server', fake_get_server) rule = {'compute:revert_resize': 'role:admin'} policy.set_rules(oslo_policy.Rules.from_dict(rule)) req = fakes.HTTPRequestV21.blank('/fake/servers/1234/action') self.assertRaises(exception.PolicyNotAuthorized, self.controller._action_revert_resize, req, '1234', {}) def test_password_update(self): response = self._get_with_state(vm_states.ACTIVE, task_states.UPDATING_PASSWORD) self.assertEqual(response['server']['status'], 'PASSWORD') def test_stopped(self): response = self._get_with_state(vm_states.STOPPED) self.assertEqual(response['server']['status'], 'SHUTOFF') class ServersControllerCreateTest(test.TestCase): image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/123/flavors/3' def setUp(self): """Shared implementation for tests below that create instance.""" super(ServersControllerCreateTest, self).setUp() self.flags(verbose=True, enable_instance_password=True) self.instance_cache_num = 0 self.instance_cache_by_id = {} self.instance_cache_by_uuid = {} fakes.stub_out_nw_api(self.stubs) ext_info = extension_info.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) def instance_create(context, inst): inst_type = flavors.get_flavor_by_flavor_id(3) image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' def_image_ref = 'http://localhost/fake/images/%s' % image_uuid self.instance_cache_num += 1 instance = fake_instance.fake_db_instance(**{ 'id': self.instance_cache_num, 'display_name': inst['display_name'] or 'test', 'uuid': FAKE_UUID, 'instance_type': inst_type, 'image_ref': inst.get('image_ref', def_image_ref), 'user_id': 'fake', 'project_id': 'fake', 'reservation_id': inst['reservation_id'], "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "config_drive": None, "progress": 0, "fixed_ips": [], "task_state": "", "vm_state": "", "root_device_name": inst.get('root_device_name', 'vda'), }) self.instance_cache_by_id[instance['id']] = instance self.instance_cache_by_uuid[instance['uuid']] = instance return instance def instance_get(context, instance_id): """Stub for compute/api create() pulling in instance after scheduling """ return self.instance_cache_by_id[instance_id] def instance_update(context, uuid, values): instance = self.instance_cache_by_uuid[uuid] instance.update(values) return instance def server_update_and_get_original( context, instance_uuid, params, columns_to_join=None): inst = self.instance_cache_by_uuid[instance_uuid] inst.update(params) return (inst, inst) def fake_method(*args, **kwargs): pass def project_get_networks(context, user_id): return dict(id='1', host='localhost') fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) fake.stub_out_image_service(self.stubs) self.stubs.Set(uuid, 'uuid4', fake_gen_uuid) self.stubs.Set(db, 'project_get_networks', project_get_networks) self.stubs.Set(db, 'instance_create', instance_create) self.stubs.Set(db, 'instance_system_metadata_update', fake_method) self.stubs.Set(db, 'instance_get', instance_get) self.stubs.Set(db, 'instance_update', instance_update) self.stubs.Set(db, 'instance_update_and_get_original', server_update_and_get_original) self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip', fake_method) self.body = { 'server': { 'name': 'server_test', 'imageRef': self.image_uuid, 'flavorRef': self.flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', }, 'personality': [ { "path": "/etc/banner.txt", "contents": "MQ==", }, ], }, } self.bdm = [{'delete_on_termination': 1, 'device_name': 123, 'volume_size': 1, 'volume_id': '11111111-1111-1111-1111-111111111111'}] self.req = fakes.HTTPRequest.blank('/fake/servers') self.req.method = 'POST' self.req.headers["content-type"] = "application/json" def _check_admin_password_len(self, server_dict): """utility function - check server_dict for admin_password length.""" self.assertEqual(CONF.password_length, len(server_dict["adminPass"])) def _check_admin_password_missing(self, server_dict): """utility function - check server_dict for admin_password absence.""" self.assertNotIn("adminPass", server_dict) def _test_create_instance(self, flavor=2): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' self.body['server']['imageRef'] = image_uuid self.body['server']['flavorRef'] = flavor self.req.body = jsonutils.dumps(self.body) server = self.controller.create(self.req, body=self.body).obj['server'] self._check_admin_password_len(server) self.assertEqual(FAKE_UUID, server['id']) def test_create_instance_private_flavor(self): values = { 'name': 'fake_name', 'memory_mb': 512, 'vcpus': 1, 'root_gb': 10, 'ephemeral_gb': 10, 'flavorid': '1324', 'swap': 0, 'rxtx_factor': 0.5, 'vcpu_weight': 1, 'disabled': False, 'is_public': False, } db.flavor_create(context.get_admin_context(), values) self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_instance, flavor=1324) def test_create_server_bad_image_href(self): image_href = 1 self.body['server']['min_count'] = 1 self.body['server']['imageRef'] = image_href, self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller.create, self.req, body=self.body) # TODO(cyeoh): bp-v3-api-unittests # This needs to be ported to the os-networks extension tests # def test_create_server_with_invalid_networks_parameter(self): # self.ext_mgr.extensions = {'os-networks': 'fake'} # image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' # flavor_ref = 'http://localhost/123/flavors/3' # body = { # 'server': { # 'name': 'server_test', # 'imageRef': image_href, # 'flavorRef': flavor_ref, # 'networks': {'uuid': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'}, # } # } # req = fakes.HTTPRequest.blank('/fake/servers') # req.method = 'POST' # req.body = jsonutils.dumps(body) # req.headers["content-type"] = "application/json" # self.assertRaises(webob.exc.HTTPBadRequest, # self.controller.create, # req, # body) def test_create_server_with_deleted_image(self): # Get the fake image service so we can set the status to deleted (image_service, image_id) = glance.get_remote_image_service( context, '') image_service.update(context, self.image_uuid, {'status': 'DELETED'}) self.addCleanup(image_service.update, context, self.image_uuid, {'status': 'active'}) self.body['server']['flavorRef'] = 2 self.req.body = jsonutils.dumps(self.body) with testtools.ExpectedException( webob.exc.HTTPBadRequest, 'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'): self.controller.create(self.req, body=self.body) def test_create_server_image_too_large(self): # Get the fake image service so we can update the size of the image (image_service, image_id) = glance.get_remote_image_service( context, self.image_uuid) image = image_service.show(context, image_id) orig_size = image['size'] new_size = str(1000 * (1024 ** 3)) image_service.update(context, self.image_uuid, {'size': new_size}) self.addCleanup(image_service.update, context, self.image_uuid, {'size': orig_size}) self.body['server']['flavorRef'] = 2 self.req.body = jsonutils.dumps(self.body) with testtools.ExpectedException( webob.exc.HTTPBadRequest, "Flavor's disk is too small for requested image."): self.controller.create(self.req, body=self.body) def test_create_instance_image_ref_is_bookmark(self): image_href = 'http://localhost/fake/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj server = res['server'] self.assertEqual(FAKE_UUID, server['id']) def test_create_instance_image_ref_is_invalid(self): image_uuid = 'this_is_not_a_valid_uuid' image_href = 'http://localhost/fake/images/%s' % image_uuid flavor_ref = 'http://localhost/fake/flavors/3' self.body['server']['imageRef'] = image_href self.body['server']['flavorRef'] = flavor_ref self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) def test_create_instance_no_key_pair(self): fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False) self._test_create_instance() def _test_create_extra(self, params, no_image=False): self.body['server']['flavorRef'] = 2 if no_image: self.body['server'].pop('imageRef', None) self.body['server'].update(params) self.req.body = jsonutils.dumps(self.body) self.req.headers["content-type"] = "application/json" self.controller.create(self.req, body=self.body).obj['server'] # TODO(cyeoh): bp-v3-api-unittests # This needs to be ported to the os-keypairs extension tests # def test_create_instance_with_keypairs_enabled(self): # self.ext_mgr.extensions = {'os-keypairs': 'fake'} # key_name = 'green' # # params = {'key_name': key_name} # old_create = compute_api.API.create # # # NOTE(sdague): key pair goes back to the database, # # so we need to stub it out for tests # def key_pair_get(context, user_id, name): # return {'public_key': 'FAKE_KEY', # 'fingerprint': 'FAKE_FINGERPRINT', # 'name': name} # # def create(*args, **kwargs): # self.assertEqual(kwargs['key_name'], key_name) # return old_create(*args, **kwargs) # # self.stubs.Set(db, 'key_pair_get', key_pair_get) # self.stubs.Set(compute_api.API, 'create', create) # self._test_create_extra(params) # # TODO(cyeoh): bp-v3-api-unittests # This needs to be ported to the os-networks extension tests # def test_create_instance_with_networks_enabled(self): # self.ext_mgr.extensions = {'os-networks': 'fake'} # net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' # requested_networks = [{'uuid': net_uuid}] # params = {'networks': requested_networks} # old_create = compute_api.API.create # def create(*args, **kwargs): # result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None)] # self.assertEqual(kwargs['requested_networks'], result) # return old_create(*args, **kwargs) # self.stubs.Set(compute_api.API, 'create', create) # self._test_create_extra(params) def test_create_instance_with_port_with_no_fixed_ips(self): port_id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'port': port_id}] params = {'networks': requested_networks} def fake_create(*args, **kwargs): raise exception.PortRequiresFixedIP(port_id=port_id) self.stubs.Set(compute_api.API, 'create', fake_create) self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, params) @mock.patch.object(compute_api.API, 'create') def test_create_instance_raise_user_data_too_large(self, mock_create): mock_create.side_effect = exception.InstanceUserDataTooLarge( maxsize=1, length=2) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) def test_create_instance_with_network_with_no_subnet(self): network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'uuid': network}] params = {'networks': requested_networks} def fake_create(*args, **kwargs): raise exception.NetworkRequiresSubnet(network_uuid=network) self.stubs.Set(compute_api.API, 'create', fake_create) self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, params) def test_create_instance_with_non_unique_secgroup_name(self): network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'uuid': network}] params = {'networks': requested_networks, 'security_groups': [{'name': 'dup'}, {'name': 'dup'}]} def fake_create(*args, **kwargs): raise exception.NoUniqueMatch("No Unique match found for ...") self.stubs.Set(compute_api.API, 'create', fake_create) self.assertRaises(webob.exc.HTTPConflict, self._test_create_extra, params) def test_create_instance_secgroup_leading_trailing_spaces(self): network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'uuid': network}] params = {'networks': requested_networks, 'security_groups': [{'name': ' sg '}]} self.assertRaises(exception.ValidationError, self._test_create_extra, params) def test_create_instance_secgroup_leading_trailing_spaces_compat_mode( self): network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'uuid': network}] params = {'networks': requested_networks, 'security_groups': [{'name': ' sg '}]} def fake_create(*args, **kwargs): self.assertEqual([' sg '], kwargs['security_group']) return (objects.InstanceList(objects=[fakes.stub_instance_obj( self.req.environ['nova.context'])]), None) self.stubs.Set(compute_api.API, 'create', fake_create) self.req.set_legacy_v2() self._test_create_extra(params) def test_create_instance_with_networks_disabled_neutronv2(self): self.flags(network_api_class='nova.network.neutronv2.api.API') net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' requested_networks = [{'uuid': net_uuid}] params = {'networks': requested_networks} old_create = compute_api.API.create def create(*args, **kwargs): result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None, None, None)] self.assertEqual(result, kwargs['requested_networks'].as_tuples()) return old_create(*args, **kwargs) self.stubs.Set(compute_api.API, 'create', create) self._test_create_extra(params) def test_create_instance_with_networks_disabled(self): net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' requested_networks = [{'uuid': net_uuid}] params = {'networks': requested_networks} old_create = compute_api.API.create def create(*args, **kwargs): self.assertIsNone(kwargs['requested_networks']) return old_create(*args, **kwargs) self.stubs.Set(compute_api.API, 'create', create) self._test_create_extra(params) def test_create_instance_with_pass_disabled(self): # test with admin passwords disabled See lp bug 921814 self.flags(enable_instance_password=False) # proper local hrefs must start with 'http://localhost/v2/' self.flags(enable_instance_password=False) image_href = 'http://localhost/v2/fake/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj server = res['server'] self._check_admin_password_missing(server) self.assertEqual(FAKE_UUID, server['id']) def test_create_instance_name_too_long(self): # proper local hrefs must start with 'http://localhost/v2/' image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['name'] = 'X' * 256 self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller.create, self.req, body=self.body) def test_create_instance_name_with_spaces_in_the_middle(self): # proper local hrefs must start with 'http://localhost/v2/' image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['name'] = 'abc def' self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) self.controller.create(self.req, body=self.body) def test_create_instance_name_with_leading_trailing_spaces(self): # proper local hrefs must start with 'http://localhost/v2/' image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['name'] = ' abc def ' self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller.create, self.req, body=self.body) def test_create_instance_name_with_leading_trailing_spaces_in_compat_mode( self): # proper local hrefs must start with 'http://localhost/v2/' image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['name'] = ' abc def ' self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) self.req.set_legacy_v2() self.controller.create(self.req, body=self.body) def test_create_instance_name_all_blank_spaces(self): # proper local hrefs must start with 'http://localhost/v2/' image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' image_href = 'http://localhost/v2/images/%s' % image_uuid flavor_ref = 'http://localhost/fake/flavors/3' body = { 'server': { 'name': ' ' * 64, 'imageRef': image_href, 'flavorRef': flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', }, }, } req = fakes.HTTPRequest.blank('/fake/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_create_az_with_leading_trailing_spaces(self): # proper local hrefs must start with 'http://localhost/v2/' image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.body['server']['availability_zone'] = ' zone1 ' self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller.create, self.req, body=self.body) def test_create_az_with_leading_trailing_spaces_in_compat_mode( self): # proper local hrefs must start with 'http://localhost/v2/' image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['name'] = ' abc def ' self.body['server']['imageRef'] = image_href self.body['server']['availability_zones'] = ' zone1 ' self.req.body = jsonutils.dumps(self.body) self.req.set_legacy_v2() with mock.patch.object(availability_zones, 'get_availability_zones', return_value=[' zone1 ']): self.controller.create(self.req, body=self.body) def test_create_instance(self): # proper local hrefs must start with 'http://localhost/v2/' image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj server = res['server'] self._check_admin_password_len(server) self.assertEqual(FAKE_UUID, server['id']) def test_create_instance_extension_create_exception(self): def fake_keypair_server_create(self, server_dict, create_kwargs): raise KeyError self.stubs.Set(keypairs.Keypairs, 'server_create', fake_keypair_server_create) # proper local hrefs must start with 'http://localhost/v2/' image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' image_href = 'http://localhost/v2/images/%s' % image_uuid flavor_ref = 'http://localhost/123/flavors/3' body = { 'server': { 'name': 'server_test', 'imageRef': image_href, 'flavorRef': flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', }, }, } req = fakes.HTTPRequestV21.blank('/fake/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.create, req, body=body) def test_create_instance_pass_disabled(self): self.flags(enable_instance_password=False) # proper local hrefs must start with 'http://localhost/v2/' image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj server = res['server'] self._check_admin_password_missing(server) self.assertEqual(FAKE_UUID, server['id']) @mock.patch('nova.virt.hardware.numa_get_constraints') def _test_create_instance_numa_topology_wrong(self, exc, numa_constraints_mock): numa_constraints_mock.side_effect = exc(**{'name': None, 'cpunum': 0, 'cpumax': 0, 'cpuset': None, 'memsize': 0, 'memtotal': 0}) image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) def test_create_instance_numa_topology_wrong(self): for exc in [exception.ImageNUMATopologyIncomplete, exception.ImageNUMATopologyForbidden, exception.ImageNUMATopologyAsymmetric, exception.ImageNUMATopologyCPUOutOfRange, exception.ImageNUMATopologyCPUDuplicates, exception.ImageNUMATopologyCPUsUnassigned, exception.ImageNUMATopologyMemoryOutOfRange]: self._test_create_instance_numa_topology_wrong(exc) def test_create_instance_too_much_metadata(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.body['server']['metadata']['vote'] = 'fiddletown' self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, self.req, body=self.body) def test_create_instance_metadata_key_too_long(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.body['server']['metadata'] = {('a' * 260): '12345'} self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller.create, self.req, body=self.body) def test_create_instance_metadata_value_too_long(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.body['server']['metadata'] = {'key1': ('a' * 260)} self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller.create, self.req, body=self.body) def test_create_instance_metadata_key_blank(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.body['server']['metadata'] = {'': 'abcd'} self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller.create, self.req, body=self.body) def test_create_instance_metadata_not_dict(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.body['server']['metadata'] = 'string' self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller.create, self.req, body=self.body) def test_create_instance_metadata_key_not_string(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.body['server']['metadata'] = {1: 'test'} self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller.create, self.req, body=self.body) def test_create_instance_metadata_value_not_string(self): self.flags(quota_metadata_items=1) image_href = 'http://localhost/v2/images/%s' % self.image_uuid self.body['server']['imageRef'] = image_href self.body['server']['metadata'] = {'test': ['a', 'list']} self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller.create, self.req, body=self.body) def test_create_user_data_malformed_bad_request(self): params = {'user_data': 'u1234'} self.assertRaises(exception.ValidationError, self._test_create_extra, params) def test_create_instance_invalid_key_name(self): image_href = 'http://localhost/v2/images/2' self.body['server']['imageRef'] = image_href self.body['server']['key_name'] = 'nonexistentkey' self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) def test_create_instance_valid_key_name(self): self.body['server']['key_name'] = 'key' self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj self.assertEqual(FAKE_UUID, res["server"]["id"]) self._check_admin_password_len(res["server"]) def test_create_instance_invalid_flavor_href(self): image_href = 'http://localhost/v2/images/2' flavor_ref = 'http://localhost/v2/flavors/asdf' self.body['server']['imageRef'] = image_href self.body['server']['flavorRef'] = flavor_ref self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) def test_create_instance_invalid_flavor_id_int(self): image_href = 'http://localhost/v2/images/2' flavor_ref = -1 self.body['server']['imageRef'] = image_href self.body['server']['flavorRef'] = flavor_ref self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) def test_create_instance_invalid_flavor_id_empty(self): flavor_ref = "" self.body['server']['flavorRef'] = flavor_ref self.req.body = jsonutils.dumps(self.body) self.assertRaises(exception.ValidationError, self.controller.create, self.req, body=self.body) def test_create_instance_bad_flavor_href(self): image_href = 'http://localhost/v2/images/2' flavor_ref = 'http://localhost/v2/flavors/17' self.body['server']['imageRef'] = image_href self.body['server']['flavorRef'] = flavor_ref self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) def test_create_instance_bad_href(self): image_href = 'asdf' self.body['server']['imageRef'] = image_href self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) def test_create_instance_local_href(self): self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj server = res['server'] self.assertEqual(FAKE_UUID, server['id']) def test_create_instance_admin_password(self): self.body['server']['flavorRef'] = 3 self.body['server']['adminPass'] = 'testpass' self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj server = res['server'] self.assertEqual(server['adminPass'], self.body['server']['adminPass']) def test_create_instance_admin_password_pass_disabled(self): self.flags(enable_instance_password=False) self.body['server']['flavorRef'] = 3 self.body['server']['adminPass'] = 'testpass' self.req.body = jsonutils.dumps(self.body) res = self.controller.create(self.req, body=self.body).obj self.assertIn('server', res) self.assertIn('adminPass', self.body['server']) def test_create_instance_admin_password_empty(self): self.body['server']['flavorRef'] = 3 self.body['server']['adminPass'] = '' self.req.body = jsonutils.dumps(self.body) # The fact that the action doesn't raise is enough validation self.controller.create(self.req, body=self.body) def test_create_location(self): selfhref = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID self.req.body = jsonutils.dumps(self.body) robj = self.controller.create(self.req, body=self.body) self.assertEqual(robj['Location'], selfhref) def _do_test_create_instance_above_quota(self, resource, allowed, quota, expected_msg): fakes.stub_out_instance_quota(self.stubs, allowed, quota, resource) self.body['server']['flavorRef'] = 3 self.req.body = jsonutils.dumps(self.body) try: self.controller.create(self.req, body=self.body).obj['server'] self.fail('expected quota to be exceeded') except webob.exc.HTTPForbidden as e: self.assertEqual(e.explanation, expected_msg) def test_create_instance_above_quota_instances(self): msg = ('Quota exceeded for instances: Requested 1, but' ' already used 10 of 10 instances') self._do_test_create_instance_above_quota('instances', 0, 10, msg) def test_create_instance_above_quota_ram(self): msg = ('Quota exceeded for ram: Requested 4096, but' ' already used 8192 of 10240 ram') self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg) def test_create_instance_above_quota_cores(self): msg = ('Quota exceeded for cores: Requested 2, but' ' already used 9 of 10 cores') self._do_test_create_instance_above_quota('cores', 1, 10, msg) def test_create_instance_above_quota_server_group_members(self): ctxt = self.req.environ['nova.context'] fake_group = objects.InstanceGroup(ctxt) fake_group.project_id = ctxt.project_id fake_group.user_id = ctxt.user_id fake_group.create() def fake_count(context, name, group, user_id): self.assertEqual(name, "server_group_members") self.assertEqual(group.uuid, fake_group.uuid) self.assertEqual(user_id, self.req.environ['nova.context'].user_id) return 10 def fake_limit_check(context, **kwargs): if 'server_group_members' in kwargs: raise exception.OverQuota(overs={}) def fake_instance_destroy(context, uuid, constraint): return fakes.stub_instance(1) self.stubs.Set(fakes.QUOTAS, 'count', fake_count) self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check) self.stubs.Set(db, 'instance_destroy', fake_instance_destroy) self.body['os:scheduler_hints'] = {'group': fake_group.uuid} self.req.body = jsonutils.dumps(self.body) expected_msg = "Quota exceeded, too many servers in group" try: self.controller.create(self.req, body=self.body).obj self.fail('expected quota to be exceeded') except webob.exc.HTTPForbidden as e: self.assertEqual(e.explanation, expected_msg) def test_create_instance_with_group_hint(self): ctxt = self.req.environ['nova.context'] test_group = objects.InstanceGroup(ctxt) test_group.project_id = ctxt.project_id test_group.user_id = ctxt.user_id test_group.create() def fake_instance_destroy(context, uuid, constraint): return fakes.stub_instance(1) self.stubs.Set(db, 'instance_destroy', fake_instance_destroy) self.body['os:scheduler_hints'] = {'group': test_group.uuid} self.req.body = jsonutils.dumps(self.body) server = self.controller.create(self.req, body=self.body).obj['server'] test_group = objects.InstanceGroup.get_by_uuid(ctxt, test_group.uuid) self.assertIn(server['id'], test_group.members) def test_create_instance_with_neutronv2_port_in_use(self): network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'uuid': network, 'port': port}] params = {'networks': requested_networks} def fake_create(*args, **kwargs): raise exception.PortInUse(port_id=port) self.stubs.Set(compute_api.API, 'create', fake_create) self.assertRaises(webob.exc.HTTPConflict, self._test_create_extra, params) @mock.patch.object(compute_api.API, 'create') def test_create_instance_public_network_non_admin(self, mock_create): public_network_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' params = {'networks': [{'uuid': public_network_uuid}]} self.req.body = jsonutils.dumps(self.body) mock_create.side_effect = exception.ExternalNetworkAttachForbidden( network_uuid=public_network_uuid) self.assertRaises(webob.exc.HTTPForbidden, self._test_create_extra, params) @mock.patch.object(compute_api.API, 'create') def test_create_multiple_instance_with_specified_ip_neutronv2(self, _api_mock): _api_mock.side_effect = exception.InvalidFixedIpAndMaxCountRequest( reason="") network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' address = '10.0.0.1' requested_networks = [{'uuid': network, 'fixed_ip': address, 'port': port}] params = {'networks': requested_networks} self.body['server']['max_count'] = 2 self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, params) def test_create_multiple_instance_with_neutronv2_port(self): network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'uuid': network, 'port': port}] params = {'networks': requested_networks} self.body['server']['max_count'] = 2 def fake_create(*args, **kwargs): msg = ("Unable to launch multiple instances with" " a single configured port ID. Please launch your" " instance one by one with different ports.") raise exception.MultiplePortsNotApplicable(reason=msg) self.stubs.Set(compute_api.API, 'create', fake_create) self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, params) def test_create_instance_with_neutronv2_not_found_network(self): network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' requested_networks = [{'uuid': network}] params = {'networks': requested_networks} def fake_create(*args, **kwargs): raise exception.NetworkNotFound(network_id=network) self.stubs.Set(compute_api.API, 'create', fake_create) self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, params) def test_create_instance_with_neturonv2_network_duplicated(self): network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' requested_networks = [{'uuid': network}, {'uuid': network}] params = {'networks': requested_networks} def fake_create(*args, **kwargs): raise exception.NetworkDuplicated(network_id=network) self.stubs.Set(compute_api.API, 'create', fake_create) self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, params) def test_create_instance_with_neutronv2_port_not_found(self): network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee' requested_networks = [{'uuid': network, 'port': port}] params = {'networks': requested_networks} def fake_create(*args, **kwargs): raise exception.PortNotFound(port_id=port) self.stubs.Set(compute_api.API, 'create', fake_create) self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, params) @mock.patch.object(compute_api.API, 'create') def test_create_instance_with_network_ambiguous(self, mock_create): mock_create.side_effect = exception.NetworkAmbiguous() self.assertRaises(webob.exc.HTTPConflict, self._test_create_extra, {}) @mock.patch.object(compute_api.API, 'create', side_effect=exception.InstanceExists( name='instance-name')) def test_create_instance_raise_instance_exists(self, mock_create): self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.req, body=self.body) @mock.patch.object(compute_api.API, 'create', side_effect=exception.InvalidBDMEphemeralSize) def test_create_instance_raise_invalid_bdm_ephsize(self, mock_create): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) @mock.patch.object(compute_api.API, 'create', side_effect=exception.InvalidBDMFormat(details='')) def test_create_instance_raise_invalid_bdm_format(self, mock_create): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) @mock.patch.object(compute_api.API, 'create', side_effect=exception.InvalidBDMSwapSize) def test_create_instance_raise_invalid_bdm_swapsize(self, mock_create): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) @mock.patch.object(compute_api.API, 'create', side_effect=exception.ImageBadRequest( image_id='dummy', response='dummy')) def test_create_instance_raise_image_bad_request(self, mock_create): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) @mock.patch.object(compute_api.API, 'create', side_effect=exception.FixedIpNotFoundForAddress( address='dummy')) def test_create_instance_raise_fixed_ip_not_found_bad_request(self, mock_create): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) @mock.patch.object(compute_api.API, 'create') def test_create_instance_invalid_personality(self, mock_create): codec = 'utf8' content = 'b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==' start_position = 19 end_position = 20 msg = 'invalid start byte' mock_create.side_effect = UnicodeDecodeError(codec, content, start_position, end_position, msg) self.body['server']['personality'] = [ { "path": "/etc/banner.txt", "contents": "b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==", }, ] self.req.body = jsonutils.dumps(self.body) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=self.body) def test_create_instance_with_extra_personality_arg(self): self.body['server']['personality'] = [ { "path": "/etc/banner.txt", "contents": "b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==", "extra_arg": "extra value" }, ] self.assertRaises(exception.ValidationError, self.controller.create, self.req, body=self.body) class ServersControllerCreateTestWithMock(test.TestCase): image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/123/flavors/3' def setUp(self): """Shared implementation for tests below that create instance.""" super(ServersControllerCreateTestWithMock, self).setUp() self.flags(verbose=True, enable_instance_password=True) self.instance_cache_num = 0 self.instance_cache_by_id = {} self.instance_cache_by_uuid = {} ext_info = extension_info.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) self.body = { 'server': { 'name': 'server_test', 'imageRef': self.image_uuid, 'flavorRef': self.flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', }, }, } self.req = fakes.HTTPRequest.blank('/fake/servers') self.req.method = 'POST' self.req.headers["content-type"] = "application/json" def _test_create_extra(self, params, no_image=False): self.body['server']['flavorRef'] = 2 if no_image: self.body['server'].pop('imageRef', None) self.body['server'].update(params) self.req.body = jsonutils.dumps(self.body) self.req.headers["content-type"] = "application/json" self.controller.create(self.req, body=self.body).obj['server'] @mock.patch.object(compute_api.API, 'create') def test_create_instance_with_neutronv2_fixed_ip_already_in_use(self, create_mock): network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' address = '10.0.2.3' requested_networks = [{'uuid': network, 'fixed_ip': address}] params = {'networks': requested_networks} create_mock.side_effect = exception.FixedIpAlreadyInUse( address=address, instance_uuid=network) self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, params) self.assertEqual(1, len(create_mock.call_args_list)) @mock.patch.object(compute_api.API, 'create') def test_create_instance_with_neutronv2_invalid_fixed_ip(self, create_mock): self.flags(network_api_class='nova.network.neutronv2.api.API') network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' address = '999.0.2.3' requested_networks = [{'uuid': network, 'fixed_ip': address}] params = {'networks': requested_networks} self.assertRaises(exception.ValidationError, self._test_create_extra, params) self.assertFalse(create_mock.called) @mock.patch.object(compute_api.API, 'create', side_effect=exception.InvalidVolume(reason='error')) def test_create_instance_with_invalid_volume_error(self, create_mock): # Tests that InvalidVolume is translated to a 400 error. self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, {}) class ServersViewBuilderTest(test.TestCase): def setUp(self): super(ServersViewBuilderTest, self).setUp() CONF.set_override('host', 'localhost', group='glance') self.flags(use_ipv6=True) nw_cache_info = self._generate_nw_cache_info() db_inst = fakes.stub_instance( id=1, image_ref="5", uuid="deadbeef-feed-edee-beef-d0ea7beefedd", display_name="test_server", include_fake_metadata=False, nw_cache=nw_cache_info) privates = ['172.19.0.1'] publics = ['192.168.0.3'] public6s = ['b33f::fdee:ddff:fecc:bbaa'] def nw_info(*args, **kwargs): return [(None, {'label': 'public', 'ips': [dict(ip=ip) for ip in publics], 'ip6s': [dict(ip=ip) for ip in public6s]}), (None, {'label': 'private', 'ips': [dict(ip=ip) for ip in privates]})] fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info) self.uuid = db_inst['uuid'] self.view_builder = views.servers.ViewBuilderV21() self.request = fakes.HTTPRequestV21.blank("/fake") self.request.context = context.RequestContext('fake', 'fake') self.instance = fake_instance.fake_instance_obj( self.request.context, expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **db_inst) self.self_link = "http://localhost/v2/fake/servers/%s" % self.uuid self.bookmark_link = "http://localhost/fake/servers/%s" % self.uuid def _generate_nw_cache_info(self): fixed_ipv4 = ('192.168.1.100', '192.168.2.100', '192.168.3.100') fixed_ipv6 = ('2001:db8:0:1::1',) def _ip(ip): return {'address': ip, 'type': 'fixed'} nw_cache = [ {'address': 'aa:aa:aa:aa:aa:aa', 'id': 1, 'network': {'bridge': 'br0', 'id': 1, 'label': 'test1', 'subnets': [{'cidr': '192.168.1.0/24', 'ips': [_ip(fixed_ipv4[0])]}, {'cidr': 'b33f::/64', 'ips': [_ip(fixed_ipv6[0])]}]}}, {'address': 'bb:bb:bb:bb:bb:bb', 'id': 2, 'network': {'bridge': 'br0', 'id': 1, 'label': 'test1', 'subnets': [{'cidr': '192.168.2.0/24', 'ips': [_ip(fixed_ipv4[1])]}]}}, {'address': 'cc:cc:cc:cc:cc:cc', 'id': 3, 'network': {'bridge': 'br0', 'id': 2, 'label': 'test2', 'subnets': [{'cidr': '192.168.3.0/24', 'ips': [_ip(fixed_ipv4[2])]}]}}] return nw_cache def test_get_flavor_valid_instance_type(self): flavor_bookmark = "http://localhost/fake/flavors/1" expected = {"id": "1", "links": [{"rel": "bookmark", "href": flavor_bookmark}]} result = self.view_builder._get_flavor(self.request, self.instance) self.assertEqual(result, expected) def test_build_server(self): expected_server = { "server": { "id": self.uuid, "name": "test_server", "links": [ { "rel": "self", "href": self.self_link, }, { "rel": "bookmark", "href": self.bookmark_link, }, ], } } output = self.view_builder.basic(self.request, self.instance) self.assertThat(output, matchers.DictMatches(expected_server)) def test_build_server_with_project_id(self): expected_server = { "server": { "id": self.uuid, "name": "test_server", "links": [ { "rel": "self", "href": self.self_link, }, { "rel": "bookmark", "href": self.bookmark_link, }, ], } } output = self.view_builder.basic(self.request, self.instance) self.assertThat(output, matchers.DictMatches(expected_server)) def test_build_server_detail(self): image_bookmark = "http://localhost/fake/images/5" flavor_bookmark = "http://localhost/fake/flavors/1" expected_server = { "server": { "id": self.uuid, "user_id": "fake_user", "tenant_id": "fake_project", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "test_server", "status": "BUILD", "hostId": '', "image": { "id": "5", "links": [ { "rel": "bookmark", "href": image_bookmark, }, ], }, "flavor": { "id": "1", "links": [ { "rel": "bookmark", "href": flavor_bookmark, }, ], }, "addresses": { 'test1': [ {'version': 4, 'addr': '192.168.1.100', 'OS-EXT-IPS:type': 'fixed', 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}, {'version': 6, 'addr': '2001:db8:0:1::1', 'OS-EXT-IPS:type': 'fixed', 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}, {'version': 4, 'addr': '192.168.2.100', 'OS-EXT-IPS:type': 'fixed', 'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'} ], 'test2': [ {'version': 4, 'addr': '192.168.3.100', 'OS-EXT-IPS:type': 'fixed', 'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'}, ] }, "metadata": {}, "links": [ { "rel": "self", "href": self.self_link, }, { "rel": "bookmark", "href": self.bookmark_link, }, ], } } output = self.view_builder.show(self.request, self.instance) self.assertThat(output, matchers.DictMatches(expected_server)) def test_build_server_detail_with_fault(self): self.instance['vm_state'] = vm_states.ERROR self.instance['fault'] = fake_instance.fake_fault_obj( self.request.context, self.uuid) image_bookmark = "http://localhost/fake/images/5" flavor_bookmark = "http://localhost/fake/flavors/1" expected_server = { "server": { "id": self.uuid, "user_id": "fake_user", "tenant_id": "fake_project", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "name": "test_server", "status": "ERROR", "hostId": '', "image": { "id": "5", "links": [ { "rel": "bookmark", "href": image_bookmark, }, ], }, "flavor": { "id": "1", "links": [ { "rel": "bookmark", "href": flavor_bookmark, }, ], }, "addresses": { 'test1': [ {'version': 4, 'addr': '192.168.1.100', 'OS-EXT-IPS:type': 'fixed', 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}, {'version': 6, 'addr': '2001:db8:0:1::1', 'OS-EXT-IPS:type': 'fixed', 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}, {'version': 4, 'addr': '192.168.2.100', 'OS-EXT-IPS:type': 'fixed', 'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'} ], 'test2': [ {'version': 4, 'addr': '192.168.3.100', 'OS-EXT-IPS:type': 'fixed', 'OS-EXT-IPS-MAC:mac_addr': 'cc:cc:cc:cc:cc:cc'}, ] }, "metadata": {}, "links": [ { "rel": "self", "href": self.self_link, }, { "rel": "bookmark", "href": self.bookmark_link, }, ], "fault": { "code": 404, "created": "2010-10-10T12:00:00Z", "message": "HTTPNotFound", "details": "Stock details for test", }, } } self.request.context = context.RequestContext('fake', 'fake') output = self.view_builder.show(self.request, self.instance) self.assertThat(output, matchers.DictMatches(expected_server)) def test_build_server_detail_with_fault_that_has_been_deleted(self): self.instance['deleted'] = 1 self.instance['vm_state'] = vm_states.ERROR fault = fake_instance.fake_fault_obj(self.request.context, self.uuid, code=500, message="No valid host was found") self.instance['fault'] = fault expected_fault = {"code": 500, "created": "2010-10-10T12:00:00Z", "message": "No valid host was found"} self.request.context = context.RequestContext('fake', 'fake') output = self.view_builder.show(self.request, self.instance) # Regardless of vm_state deleted servers sholud be DELETED self.assertEqual("DELETED", output['server']['status']) self.assertThat(output['server']['fault'], matchers.DictMatches(expected_fault)) def test_build_server_detail_with_fault_no_details_not_admin(self): self.instance['vm_state'] = vm_states.ERROR self.instance['fault'] = fake_instance.fake_fault_obj( self.request.context, self.uuid, code=500, message='Error') expected_fault = {"code": 500, "created": "2010-10-10T12:00:00Z", "message": "Error"} self.request.context = context.RequestContext('fake', 'fake') output = self.view_builder.show(self.request, self.instance) self.assertThat(output['server']['fault'], matchers.DictMatches(expected_fault)) def test_build_server_detail_with_fault_admin(self): self.instance['vm_state'] = vm_states.ERROR self.instance['fault'] = fake_instance.fake_fault_obj( self.request.context, self.uuid, code=500, message='Error') expected_fault = {"code": 500, "created": "2010-10-10T12:00:00Z", "message": "Error", 'details': 'Stock details for test'} self.request.environ['nova.context'].is_admin = True output = self.view_builder.show(self.request, self.instance) self.assertThat(output['server']['fault'], matchers.DictMatches(expected_fault)) def test_build_server_detail_with_fault_no_details_admin(self): self.instance['vm_state'] = vm_states.ERROR self.instance['fault'] = fake_instance.fake_fault_obj( self.request.context, self.uuid, code=500, message='Error', details='') expected_fault = {"code": 500, "created": "2010-10-10T12:00:00Z", "message": "Error"} self.request.environ['nova.context'].is_admin = True output = self.view_builder.show(self.request, self.instance) self.assertThat(output['server']['fault'], matchers.DictMatches(expected_fault)) def test_build_server_detail_with_fault_but_active(self): self.instance['vm_state'] = vm_states.ACTIVE self.instance['progress'] = 100 self.instance['fault'] = fake_instance.fake_fault_obj( self.request.context, self.uuid) output = self.view_builder.show(self.request, self.instance) self.assertNotIn('fault', output['server']) def test_build_server_detail_active_status(self): # set the power state of the instance to running self.instance['vm_state'] = vm_states.ACTIVE self.instance['progress'] = 100 image_bookmark = "http://localhost/fake/images/5" flavor_bookmark = "http://localhost/fake/flavors/1" expected_server = { "server": { "id": self.uuid, "user_id": "fake_user", "tenant_id": "fake_project", "updated": "2010-11-11T11:00:00Z", "created": "2010-10-10T12:00:00Z", "progress": 100, "name": "test_server", "status": "ACTIVE", "hostId": '', "image": { "id": "5", "links": [ { "rel":
codeparrot/github-code-clean
""" There are three types of functions implemented in SymPy: 1) defined functions (in the sense that they can be evaluated) like exp or sin; they have a name and a body: f = exp 2) undefined function which have a name but no body. Undefined functions can be defined using a Function class as follows: f = Function('f') (the result will be a Function instance) 3) anonymous function (or lambda function) which have a body (defined with dummy variables) but have no name: f = Lambda(x, exp(x)*x) f = Lambda((x, y), exp(x)*y) The fourth type of functions are composites, like (sin + cos)(x); these work in SymPy core, but are not yet part of SymPy. Examples ======== >>> import sympy >>> f = sympy.Function("f") >>> from sympy.abc import x >>> f(x) f(x) >>> print(sympy.srepr(f(x).func)) Function('f') >>> f(x).args (x,) """ from __future__ import print_function, division from .add import Add from .assumptions import ManagedProperties from .basic import Basic from .cache import cacheit from .compatibility import iterable, is_sequence, as_int, ordered from .decorators import _sympifyit from .expr import Expr, AtomicExpr from .numbers import Rational, Float from .operations import LatticeOp from .rules import Transform from .singleton import S from .sympify import sympify from sympy.core.containers import Tuple, Dict from sympy.core.logic import fuzzy_and from sympy.core.compatibility import string_types, with_metaclass, range from sympy.utilities import default_sort_key from sympy.utilities.misc import filldedent from sympy.utilities.iterables import uniq from sympy.core.evaluate import global_evaluate import sys import mpmath import mpmath.libmp as mlib import inspect import collections def _coeff_isneg(a): """Return True if the leading Number is negative. Examples ======== >>> from sympy.core.function import _coeff_isneg >>> from sympy import S, Symbol, oo, pi >>> _coeff_isneg(-3*pi) True >>> _coeff_isneg(S(3)) False >>> _coeff_isneg(-oo) True >>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1 False """ if a.is_Mul: a = a.args[0] return a.is_Number and a.is_negative class PoleError(Exception): pass class ArgumentIndexError(ValueError): def __str__(self): return ("Invalid operation with argument number %s for Function %s" % (self.args[1], self.args[0])) def _getnargs(cls): if hasattr(cls, 'eval'): if sys.version_info < (3, ): return _getnargs_old(cls.eval) else: return _getnargs_new(cls.eval) else: return None def _getnargs_old(eval_): evalargspec = inspect.getargspec(eval_) if evalargspec.varargs: return None else: evalargs = len(evalargspec.args) - 1 # subtract 1 for cls if evalargspec.defaults: # if there are default args then they are optional; the # fewest args will occur when all defaults are used and # the most when none are used (i.e. all args are given) return tuple(range( evalargs - len(evalargspec.defaults), evalargs + 1)) return evalargs def _getnargs_new(eval_): parameters = inspect.signature(eval_).parameters.items() if [p for n,p in parameters if p.kind == p.VAR_POSITIONAL]: return None else: p_or_k = [p for n,p in parameters if p.kind == p.POSITIONAL_OR_KEYWORD] num_no_default = len(list(filter(lambda p:p.default == p.empty, p_or_k))) num_with_default = len(list(filter(lambda p:p.default != p.empty, p_or_k))) if not num_with_default: return num_no_default return tuple(range(num_no_default, num_no_default+num_with_default+1)) class FunctionClass(ManagedProperties): """ Base class for function classes. FunctionClass is a subclass of type. Use Function('<function name>' [ , signature ]) to create undefined function classes. """ _new = type.__new__ def __init__(cls, *args, **kwargs): # honor kwarg value or class-defined value before using # the number of arguments in the eval function (if present) nargs = kwargs.pop('nargs', cls.__dict__.get('nargs', _getnargs(cls))) super(FunctionClass, cls).__init__(args, kwargs) # Canonicalize nargs here; change to set in nargs. if is_sequence(nargs): if not nargs: raise ValueError(filldedent(''' Incorrectly specified nargs as %s: if there are no arguments, it should be `nargs = 0`; if there are any number of arguments, it should be `nargs = None`''' % str(nargs))) nargs = tuple(ordered(set(nargs))) elif nargs is not None: nargs = (as_int(nargs),) cls._nargs = nargs @property def __signature__(self): """ Allow Python 3's inspect.signature to give a useful signature for Function subclasses. """ # Python 3 only, but backports (like the one in IPython) still might # call this. try: from inspect import signature except ImportError: return None # TODO: Look at nargs return signature(self.eval) @property def nargs(self): """Return a set of the allowed number of arguments for the function. Examples ======== >>> from sympy.core.function import Function >>> from sympy.abc import x, y >>> f = Function('f') If the function can take any number of arguments, the set of whole numbers is returned: >>> Function('f').nargs Naturals0() If the function was initialized to accept one or more arguments, a corresponding set will be returned: >>> Function('f', nargs=1).nargs {1} >>> Function('f', nargs=(2, 1)).nargs {1, 2} The undefined function, after application, also has the nargs attribute; the actual number of arguments is always available by checking the ``args`` attribute: >>> f = Function('f') >>> f(1).nargs Naturals0() >>> len(f(1).args) 1 """ from sympy.sets.sets import FiniteSet # XXX it would be nice to handle this in __init__ but there are import # problems with trying to import FiniteSet there return FiniteSet(*self._nargs) if self._nargs else S.Naturals0 def __repr__(cls): return cls.__name__ class Application(with_metaclass(FunctionClass, Basic)): """ Base class for applied functions. Instances of Application represent the result of applying an application of any type to any object. """ is_Function = True @cacheit def __new__(cls, *args, **options): from sympy.sets.fancysets import Naturals0 from sympy.sets.sets import FiniteSet args = list(map(sympify, args)) evaluate = options.pop('evaluate', global_evaluate[0]) # WildFunction (and anything else like it) may have nargs defined # and we throw that value away here options.pop('nargs', None) if options: raise ValueError("Unknown options: %s" % options) if evaluate: evaluated = cls.eval(*args) if evaluated is not None: return evaluated obj = super(Application, cls).__new__(cls, *args, **options) # make nargs uniform here try: # things passing through here: # - functions subclassed from Function (e.g. myfunc(1).nargs) # - functions like cos(1).nargs # - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs # Canonicalize nargs here if is_sequence(obj.nargs): nargs = tuple(ordered(set(obj.nargs))) elif obj.nargs is not None: nargs = (as_int(obj.nargs),) else: nargs = None except AttributeError: # things passing through here: # - WildFunction('f').nargs # - AppliedUndef with no nargs like Function('f')(1).nargs nargs = obj._nargs # note the underscore here # convert to FiniteSet obj.nargs = FiniteSet(*nargs) if nargs else Naturals0() return obj @classmethod def eval(cls, *args): """ Returns a canonical form of cls applied to arguments args. The eval() method is called when the class cls is about to be instantiated and it should return either some simplified instance (possible of some other class), or if the class cls should be unmodified, return None. Examples of eval() for the function "sign" --------------------------------------------- @classmethod def eval(cls, arg): if arg is S.NaN: return S.NaN if arg is S.Zero: return S.Zero if arg.is_positive: return S.One if arg.is_negative: return S.NegativeOne if isinstance(arg, Mul): coeff, terms = arg.as_coeff_Mul(rational=True) if coeff is not S.One: return cls(coeff) * cls(terms) """ return @property def func(self): return self.__class__ def _eval_subs(self, old, new): if (old.is_Function and new.is_Function and callable(old) and callable(new) and old == self.func and len(self.args) in new.nargs): return new(*self.args) class Function(Application, Expr): """Base class for applied mathematical functions. It also serves as a constructor for undefined function classes. Examples ======== First example shows how to use Function as a constructor for undefined function classes: >>> from sympy import Function, Symbol >>> x = Symbol('x') >>> f = Function('f') >>> g = Function('g')(x) >>> f f >>> f(x) f(x) >>> g g(x) >>> f(x).diff(x) Derivative(f(x), x) >>> g.diff(x) Derivative(g(x), x) In the following example Function is used as a base class for ``my_func`` that represents a mathematical function *my_func*. Suppose that it is well known, that *my_func(0)* is *1* and *my_func* at infinity goes to *0*, so we want those two simplifications to occur automatically. Suppose also that *my_func(x)* is real exactly when *x* is real. Here is an implementation that honours those requirements: >>> from sympy import Function, S, oo, I, sin >>> class my_func(Function): ... ... @classmethod ... def eval(cls, x): ... if x.is_Number: ... if x is S.Zero: ... return S.One ... elif x is S.Infinity: ... return S.Zero ... ... def _eval_is_real(self): ... return self.args[0].is_real ... >>> x = S('x') >>> my_func(0) + sin(0) 1 >>> my_func(oo) 0 >>> my_func(3.54).n() # Not yet implemented for my_func. my_func(3.54) >>> my_func(I).is_real False In order for ``my_func`` to become useful, several other methods would need to be implemented. See source code of some of the already implemented functions for more complete examples. Also, if the function can take more than one argument, then ``nargs`` must be defined, e.g. if ``my_func`` can take one or two arguments then, >>> class my_func(Function): ... nargs = (1, 2) ... >>> """ @property def _diff_wrt(self): """Allow derivatives wrt functions. Examples ======== >>> from sympy import Function, Symbol >>> f = Function('f') >>> x = Symbol('x') >>> f(x)._diff_wrt True """ return True @cacheit def __new__(cls, *args, **options): # Handle calls like Function('f') if cls is Function: return UndefinedFunction(*args, **options) n = len(args) if n not in cls.nargs: # XXX: exception message must be in exactly this format to # make it work with NumPy's functions like vectorize(). See, # for example, https://github.com/numpy/numpy/issues/1697. # The ideal solution would be just to attach metadata to # the exception and change NumPy to take advantage of this. temp = ('%(name)s takes %(qual)s %(args)s ' 'argument%(plural)s (%(given)s given)') raise TypeError(temp % { 'name': cls, 'qual': 'exactly' if len(cls.nargs) == 1 else 'at least', 'args': min(cls.nargs), 'plural': 's'*(min(cls.nargs) != 1), 'given': n}) evaluate = options.get('evaluate', global_evaluate[0]) result = super(Function, cls).__new__(cls, *args, **options) if not evaluate or not isinstance(result, cls): return result pr = max(cls._should_evalf(a) for a in result.args) pr2 = min(cls._should_evalf(a) for a in result.args) if pr2 > 0: return result.evalf(mlib.libmpf.prec_to_dps(pr)) return result @classmethod def _should_evalf(cls, arg): """ Decide if the function should automatically evalf(). By default (in this implementation), this happens if (and only if) the ARG is a floating point number. This function is used by __new__. Returns the precision to evalf to, or -1 if it shouldn't evalf. """ from sympy.core.symbol import Wild if arg.is_Float: return arg._prec if not arg.is_Add: return -1 # Don't use as_real_imag() here, that's too much work a, b = Wild('a'), Wild('b') m = arg.match(a + b*S.ImaginaryUnit) if not m or not (m[a].is_Float or m[b].is_Float): return -1 l = [m[i]._prec for i in m if m[i].is_Float] l.append(-1) return max(l) @classmethod def class_key(cls): from sympy.sets.fancysets import Naturals0 funcs = { 'exp': 10, 'log': 11, 'sin': 20, 'cos': 21, 'tan': 22, 'cot': 23, 'sinh': 30, 'cosh': 31, 'tanh': 32, 'coth': 33, 'conjugate': 40, 're': 41, 'im': 42, 'arg': 43, } name = cls.__name__ try: i = funcs[name] except KeyError: i = 0 if isinstance(cls.nargs, Naturals0) else 10000 return 4, i, name @property def is_commutative(self): """ Returns whether the functon is commutative. """ if all(getattr(t, 'is_commutative') for t in self.args): return True else: return False def _eval_evalf(self, prec): # Lookup mpmath function based on name fname = self.func.__name__ try: if not hasattr(mpmath, fname): from sympy.utilities.lambdify import MPMATH_TRANSLATIONS fname = MPMATH_TRANSLATIONS[fname] func = getattr(mpmath, fname) except (AttributeError, KeyError): try: return Float(self._imp_(*self.args), prec) except (AttributeError, TypeError, ValueError): return # Convert all args to mpf or mpc # Convert the arguments to *higher* precision than requested for the # final result. # XXX + 5 is a guess, it is similar to what is used in evalf.py. Should # we be more intelligent about it? try: args = [arg._to_mpmath(prec + 5) for arg in self.args] def bad(m): from mpmath import mpf, mpc # the precision of an mpf value is the last element # if that is 1 (and m[1] is not 1 which would indicate a # power of 2), then the eval failed; so check that none of # the arguments failed to compute to a finite precision. # Note: An mpc value has two parts, the re and imag tuple; # check each of those parts, too. Anything else is allowed to # pass if isinstance(m, mpf): m = m._mpf_ return m[1] !=1 and m[-1] == 1 elif isinstance(m, mpc): m, n = m._mpc_ return m[1] !=1 and m[-1] == 1 and \ n[1] !=1 and n[-1] == 1 else: return False if any(bad(a) for a in args): raise ValueError # one or more args failed to compute with significance except ValueError: return with mpmath.workprec(prec): v = func(*args) return Expr._from_mpmath(v, prec) def _eval_derivative(self, s): # f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s) i = 0 l = [] for a in self.args: i += 1 da = a.diff(s) if da is S.Zero: continue try: df = self.fdiff(i) except ArgumentIndexError: df = Function.fdiff(self, i) l.append(df * da) return Add(*l) def _eval_is_commutative(self): return fuzzy_and(a.is_commutative for a in self.args) def _eval_is_complex(self): return fuzzy_and(a.is_complex for a in self.args) def as_base_exp(self): """ Returns the method as the 2-tuple (base, exponent). """ return self, S.One def _eval_aseries(self, n, args0, x, logx): """ Compute an asymptotic expansion around args0, in terms of self.args. This function is only used internally by _eval_nseries and should not be called directly; derived classes can overwrite this to implement asymptotic expansions. """ from sympy.utilities.misc import filldedent raise PoleError(filldedent(''' Asymptotic expansion of %s around %s is not implemented.''' % (type(self), args0))) def _eval_nseries(self, x, n, logx): """ This function does compute series for multivariate functions, but the expansion is always in terms of *one* variable. Examples ======== >>> from sympy import atan2 >>> from sympy.abc import x, y >>> atan2(x, y).series(x, n=2) atan2(0, y) + x/y + O(x**2) >>> atan2(x, y).series(y, n=2) -y/x + atan2(x, 0) + O(y**2) This function also computes asymptotic expansions, if necessary and possible: >>> from sympy import loggamma >>> loggamma(1/x)._eval_nseries(x,0,None) -1/x - log(x)/x + log(x)/2 + O(1) """ from sympy import Order from sympy.sets.sets import FiniteSet args = self.args args0 = [t.limit(x, 0) for t in args] if any(t.is_finite is False for t in args0): from sympy import oo, zoo, nan # XXX could use t.as_leading_term(x) here but it's a little # slower a = [t.compute_leading_term(x, logx=logx) for t in args] a0 = [t.limit(x, 0) for t in a] if any([t.has(oo, -oo, zoo, nan) for t in a0]): return self._eval_aseries(n, args0, x, logx) # Careful: the argument goes to oo, but only logarithmically so. We # are supposed to do a power series expansion "around the # logarithmic term". e.g. # f(1+x+log(x)) # -> f(1+logx) + x*f'(1+logx) + O(x**2) # where 'logx' is given in the argument a = [t._eval_nseries(x, n, logx) for t in args] z = [r - r0 for (r, r0) in zip(a, a0)] p = [Dummy() for t in z] q = [] v = None for ai, zi, pi in zip(a0, z, p): if zi.has(x): if v is not None: raise NotImplementedError q.append(ai + pi) v = pi else: q.append(ai) e1 = self.func(*q) if v is None: return e1 s = e1._eval_nseries(v, n, logx) o = s.getO() s = s.removeO() s = s.subs(v, zi).expand() + Order(o.expr.subs(v, zi), x) return s if (self.func.nargs is S.Naturals0 or (self.func.nargs == FiniteSet(1) and args0[0]) or any(c > 1 for c in self.func.nargs)): e = self e1 = e.expand() if e == e1: #for example when e = sin(x+1) or e = sin(cos(x)) #let's try the general algorithm term = e.subs(x, S.Zero) if term.is_finite is False or term is S.NaN: raise PoleError("Cannot expand %s around 0" % (self)) series = term fact = S.One _x = Dummy('x') e = e.subs(x, _x) for i in range(n - 1): i += 1 fact *= Rational(i) e = e.diff(_x) subs = e.subs(_x, S.Zero) if subs is S.NaN: # try to evaluate a limit if we have to subs = e.limit(_x, S.Zero) if subs.is_finite is False: raise PoleError("Cannot expand %s around 0" % (self)) term = subs*(x**i)/fact term = term.expand() series += term return series + Order(x**n, x) return e1.nseries(x, n=n, logx=logx) arg = self.args[0] l = [] g = None # try to predict a number of terms needed nterms = n + 2 cf = Order(arg.as_leading_term(x), x).getn() if cf != 0: nterms = int(nterms / cf) for i in range(nterms): g = self.taylor_term(i, arg, g) g = g.nseries(x, n=n, logx=logx) l.append(g) return Add(*l) + Order(x**n, x) def fdiff(self, argindex=1): """ Returns the first derivative of the function. """ if not (1 <= argindex <= len(self.args)): raise ArgumentIndexError(self, argindex) if self.args[argindex - 1].is_Symbol: for i in range(len(self.args)): if i == argindex - 1: continue # See issue 8510 if self.args[argindex - 1] in self.args[i].free_symbols: break else: return Derivative(self, self.args[argindex - 1], evaluate=False) # See issue 4624 and issue 4719 and issue 5600 arg_dummy = Dummy('xi_%i' % argindex) arg_dummy.dummy_index = hash(self.args[argindex - 1]) new_args = [arg for arg in self.args] new_args[argindex-1] = arg_dummy return Subs(Derivative(self.func(*new_args), arg_dummy), arg_dummy, self.args[argindex - 1]) def _eval_as_leading_term(self, x): """Stub that should be overridden by new Functions to return the first non-zero term in a series if ever an x-dependent argument whose leading term vanishes as x -> 0 might be encountered. See, for example, cos._eval_as_leading_term. """ from sympy import Order args = [a.as_leading_term(x) for a in self.args] o = Order(1, x) if any(x in a.free_symbols and o.contains(a) for a in args): # Whereas x and any finite number are contained in O(1, x), # expressions like 1/x are not. If any arg simplified to a # vanishing expression as x -> 0 (like x or x**2, but not # 3, 1/x, etc...) then the _eval_as_leading_term is needed # to supply the first non-zero term of the series, # # e.g. expression leading term # ---------- ------------ # cos(1/x) cos(1/x) # cos(cos(x)) cos(1) # cos(x) 1 <- _eval_as_leading_term needed # sin(x) x <- _eval_as_leading_term needed # raise NotImplementedError( '%s has no _eval_as_leading_term routine' % self.func) else: return self.func(*args) def _sage_(self): import sage.all as sage fname = self.func.__name__ func = getattr(sage, fname) args = [arg._sage_() for arg in self.args] return func(*args) class AppliedUndef(Function): """ Base class for expressions resulting from the application of an undefined function. """ def __new__(cls, *args, **options): args = list(map(sympify, args)) obj = super(AppliedUndef, cls).__new__(cls, *args, **options) return obj def _eval_as_leading_term(self, x): return self def _sage_(self): import sage.all as sage fname = str(self.func) args = [arg._sage_() for arg in self.args] func = sage.function(fname)(*args) return func class UndefinedFunction(FunctionClass): """ The (meta)class of undefined functions. """ def __new__(mcl, name, bases=(AppliedUndef,), __dict__=None, **kwargs): __dict__ = __dict__ or {} __dict__.update(kwargs) __dict__['__module__'] = None # For pickling ret = super(UndefinedFunction, mcl).__new__(mcl, name, bases, __dict__) return ret def __instancecheck__(cls, instance): return cls in type(instance).__mro__ UndefinedFunction.__eq__ = lambda s, o: (isinstance(o, s.__class__) and (s.class_key() == o.class_key())) class WildFunction(Function, AtomicExpr): """ A WildFunction function matches any function (with its arguments). Examples ======== >>> from sympy import WildFunction, Function, cos >>> from sympy.abc import x, y >>> F = WildFunction('F') >>> f = Function('f') >>> F.nargs Naturals0() >>> x.match(F) >>> F.match(F) {F_: F_} >>> f(x).match(F) {F_: f(x)} >>> cos(x).match(F) {F_: cos(x)} >>> f(x, y).match(F) {F_: f(x, y)} To match functions with a given number of arguments, set ``nargs`` to the desired value at instantiation: >>> F = WildFunction('F', nargs=2) >>> F.nargs {2} >>> f(x).match(F) >>> f(x, y).match(F) {F_: f(x, y)} To match functions with a range of arguments, set ``nargs`` to a tuple containing the desired number of arguments, e.g. if ``nargs = (1, 2)`` then functions with 1 or 2 arguments will be matched. >>> F = WildFunction('F', nargs=(1, 2)) >>> F.nargs {1, 2} >>> f(x).match(F) {F_: f(x)} >>> f(x, y).match(F) {F_: f(x, y)} >>> f(x, y, 1).match(F) """ include = set() def __init__(cls, name, **assumptions): from sympy.sets.sets import Set, FiniteSet cls.name = name nargs = assumptions.pop('nargs', S.Naturals0) if not isinstance(nargs, Set): # Canonicalize nargs here. See also FunctionClass. if is_sequence(nargs): nargs = tuple(ordered(set(nargs))) elif nargs is not None: nargs = (as_int(nargs),) nargs = FiniteSet(*nargs) cls.nargs = nargs def matches(self, expr, repl_dict={}, old=False): if not isinstance(expr, (AppliedUndef, Function)): return None if len(expr.args) not in self.nargs: return None repl_dict = repl_dict.copy() repl_dict[self] = expr return repl_dict class Derivative(Expr): """ Carries out differentiation of the given expression with respect to symbols. expr must define ._eval_derivative(symbol) method that returns the differentiation result. This function only needs to consider the non-trivial case where expr contains symbol and it should call the diff() method internally (not _eval_derivative); Derivative should be the only one to call _eval_derivative. Simplification of high-order derivatives: Because there can be a significant amount of simplification that can be done when multiple differentiations are performed, results will be automatically simplified in a fairly conservative fashion unless the keyword ``simplify`` is set to False. >>> from sympy import sqrt, diff >>> from sympy.abc import x >>> e = sqrt((x + 1)**2 + x) >>> diff(e, x, 5, simplify=False).count_ops() 136 >>> diff(e, x, 5).count_ops() 30 Ordering of variables: If evaluate is set to True and the expression can not be evaluated, the list of differentiation symbols will be sorted, that is, the expression is assumed to have continuous derivatives up to the order asked. This sorting assumes that derivatives wrt Symbols commute, derivatives wrt non-Symbols commute, but Symbol and non-Symbol derivatives don't commute with each other. Derivative wrt non-Symbols: This class also allows derivatives wrt non-Symbols that have _diff_wrt set to True, such as Function and Derivative. When a derivative wrt a non- Symbol is attempted, the non-Symbol is temporarily converted to a Symbol while the differentiation is performed. Note that this may seem strange, that Derivative allows things like f(g(x)).diff(g(x)), or even f(cos(x)).diff(cos(x)). The motivation for allowing this syntax is to make it easier to work with variational calculus (i.e., the Euler-Lagrange method). The best way to understand this is that the action of derivative with respect to a non-Symbol is defined by the above description: the object is substituted for a Symbol and the derivative is taken with respect to that. This action is only allowed for objects for which this can be done unambiguously, for example Function and Derivative objects. Note that this leads to what may appear to be mathematically inconsistent results. For example:: >>> from sympy import cos, sin, sqrt >>> from sympy.abc import x >>> (2*cos(x)).diff(cos(x)) 2 >>> (2*sqrt(1 - sin(x)**2)).diff(cos(x)) 0 This appears wrong because in fact 2*cos(x) and 2*sqrt(1 - sin(x)**2) are identically equal. However this is the wrong way to think of this. Think of it instead as if we have something like this:: >>> from sympy.abc import c, s >>> def F(u): ... return 2*u ... >>> def G(u): ... return 2*sqrt(1 - u**2) ... >>> F(cos(x)) 2*cos(x) >>> G(sin(x)) 2*sqrt(-sin(x)**2 + 1) >>> F(c).diff(c) 2 >>> F(c).diff(c) 2 >>> G(s).diff(c) 0 >>> G(sin(x)).diff(cos(x)) 0 Here, the Symbols c and s act just like the functions cos(x) and sin(x), respectively. Think of 2*cos(x) as f(c).subs(c, cos(x)) (or f(c) *at* c = cos(x)) and 2*sqrt(1 - sin(x)**2) as g(s).subs(s, sin(x)) (or g(s) *at* s = sin(x)), where f(u) == 2*u and g(u) == 2*sqrt(1 - u**2). Here, we define the function first and evaluate it at the function, but we can actually unambiguously do this in reverse in SymPy, because expr.subs(Function, Symbol) is well-defined: just structurally replace the function everywhere it appears in the expression. This is the same notational convenience used in the Euler-Lagrange method when one says F(t, f(t), f'(t)).diff(f(t)). What is actually meant is that the expression in question is represented by some F(t, u, v) at u = f(t) and v = f'(t), and F(t, f(t), f'(t)).diff(f(t)) simply means F(t, u, v).diff(u) at u = f(t). We do not allow derivatives to be taken with respect to expressions where this is not so well defined. For example, we do not allow expr.diff(x*y) because there are multiple ways of structurally defining where x*y appears in an expression, some of which may surprise the reader (for example, a very strict definition would have that (x*y*z).diff(x*y) == 0). >>> from sympy.abc import x, y, z >>> (x*y*z).diff(x*y) Traceback (most recent call last): ... ValueError: Can't differentiate wrt the variable: x*y, 1 Note that this definition also fits in nicely with the definition of the chain rule. Note how the chain rule in SymPy is defined using unevaluated Subs objects:: >>> from sympy import symbols, Function >>> f, g = symbols('f g', cls=Function) >>> f(2*g(x)).diff(x) 2*Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1,), (2*g(x),)) >>> f(g(x)).diff(x) Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1,), (g(x),)) Finally, note that, to be consistent with variational calculus, and to ensure that the definition of substituting a Function for a Symbol in an expression is well-defined, derivatives of functions are assumed to not be related to the function. In other words, we have:: >>> from sympy import diff >>> diff(f(x), x).diff(f(x)) 0 The same is true for derivatives of different orders:: >>> diff(f(x), x, 2).diff(diff(f(x), x, 1)) 0 >>> diff(f(x), x, 1).diff(diff(f(x), x, 2)) 0 Note, any class can allow derivatives to be taken with respect to itself. See the docstring of Expr._diff_wrt. Examples ======== Some basic examples: >>> from sympy import Derivative, Symbol, Function >>> f = Function('f') >>> g = Function('g') >>> x = Symbol('x') >>> y = Symbol('y') >>> Derivative(x**2, x, evaluate=True) 2*x >>> Derivative(Derivative(f(x,y), x), y) Derivative(f(x, y), x, y) >>> Derivative(f(x), x, 3) Derivative(f(x), x, x, x) >>> Derivative(f(x, y), y, x, evaluate=True) Derivative(f(x, y), x, y) Now some derivatives wrt functions: >>> Derivative(f(x)**2, f(x), evaluate=True) 2*f(x) >>> Derivative(f(g(x)), x, evaluate=True) Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1,), (g(x),)) """ is_Derivative = True @property def _diff_wrt(self): """Allow derivatives wrt Derivatives if it contains a function. Examples ======== >>> from sympy import Function, Symbol, Derivative >>> f = Function('f') >>> x = Symbol('x') >>> Derivative(f(x),x)._diff_wrt True >>> Derivative(x**2,x)._diff_wrt False """ if self.expr.is_Function: return True else: return False def __new__(cls, expr, *variables, **assumptions): expr = sympify(expr) # There are no variables, we differentiate wrt all of the free symbols # in expr. if not variables: variables = expr.free_symbols if len(variables) != 1: if expr.is_number: return S.Zero from sympy.utilities.misc import filldedent if len(variables) == 0: raise ValueError(filldedent(''' Since there are no variables in the expression, the variable(s) of differentiation must be supplied to differentiate %s''' % expr)) else: raise ValueError(filldedent(''' Since there is more than one variable in the expression, the variable(s) of differentiation must be supplied to differentiate %s''' % expr)) # Standardize the variables by sympifying them and making appending a # count of 1 if there is only one variable: diff(e,x)->diff(e,x,1). variables = list(sympify(variables)) if not variables[-1].is_Integer or len(variables) == 1: variables.append(S.One) # Split the list of variables into a list of the variables we are diff # wrt, where each element of the list has the form (s, count) where # s is the entity to diff wrt and count is the order of the # derivative. variable_count = [] all_zero = True i = 0 while i < len(variables) - 1: # process up to final Integer v, count = variables[i: i + 2] iwas = i if v._diff_wrt: # We need to test the more specific case of count being an # Integer first. if count.is_Integer: count = int(count) i += 2 elif count._diff_wrt: count = 1 i += 1 if i == iwas: # didn't get an update because of bad input from sympy.utilities.misc import filldedent last_digit = int(str(count)[-1]) ordinal = 'st' if last_digit == 1 else 'nd' if last_digit == 2 else 'rd' if last_digit == 3 else 'th' raise ValueError(filldedent(''' Can\'t calculate %s%s derivative wrt %s.''' % (count, ordinal, v))) if all_zero and not count == 0: all_zero = False if count: variable_count.append((v, count)) # We make a special case for 0th derivative, because there is no # good way to unambiguously print this. if all_zero: return expr # Pop evaluate because it is not really an assumption and we will need # to track it carefully below. evaluate = assumptions.pop('evaluate', False) # Look for a quick exit if there are symbols that don't appear in # expression at all. Note, this cannnot check non-symbols like # functions and Derivatives as those can be created by intermediate # derivatives. if evaluate and all(isinstance(sc[0], Symbol) for sc in variable_count): symbol_set = set(sc[0] for sc in variable_count) if symbol_set.difference(expr.free_symbols): return S.Zero # We make a generator so as to only generate a variable when necessary. # If a high order of derivative is requested and the expr becomes 0 # after a few differentiations, then we won't need the other variables. variablegen = (v for v, count in variable_count for i in range(count)) # If we can't compute the derivative of expr (but we wanted to) and # expr is itself not a Derivative, finish building an unevaluated # derivative class by calling Expr.__new__. if (not (hasattr(expr, '_eval_derivative') and evaluate) and (not isinstance(expr, Derivative))): variables = list(variablegen) # If we wanted to evaluate, we sort the variables into standard # order for later comparisons. This is too aggressive if evaluate # is False, so we don't do it in that case. if evaluate: #TODO: check if assumption of discontinuous derivatives exist variables = cls._sort_variables(variables) # Here we *don't* need to reinject evaluate into assumptions # because we are done with it and it is not an assumption that # Expr knows about. obj = Expr.__new__(cls, expr, *variables, **assumptions) return obj # Compute the derivative now by repeatedly calling the # _eval_derivative method of expr for each variable. When this method # returns None, the derivative couldn't be computed wrt that variable # and we save the variable for later. unhandled_variables = [] # Once we encouter a non_symbol that is unhandled, we stop taking # derivatives entirely. This is because derivatives wrt functions # don't commute with derivatives wrt symbols and we can't safely # continue. unhandled_non_symbol = False nderivs = 0 # how many derivatives were performed for v in variablegen: is_symbol = v.is_symbol if unhandled_non_symbol: obj = None else: if not is_symbol: new_v = Dummy('xi_%i' % i) new_v.dummy_index = hash(v) expr = expr.xreplace({v: new_v}) old_v = v v = new_v obj = expr._eval_derivative(v) nderivs += 1 if not is_symbol: if obj is not None: if not old_v.is_symbol and obj.is_Derivative: # Derivative evaluated at a point that is not a # symbol obj = Subs(obj, v, old_v) else: obj = obj.xreplace({v: old_v}) v = old_v if obj is None: unhandled_variables.append(v) if not is_symbol: unhandled_non_symbol = True elif obj is S.Zero: return S.Zero else: expr = obj if unhandled_variables: unhandled_variables = cls._sort_variables(unhandled_variables) expr = Expr.__new__(cls, expr, *unhandled_variables, **assumptions) else: # We got a Derivative at the end of it all, and we rebuild it by # sorting its variables. if isinstance(expr, Derivative): expr = cls( expr.args[0], *cls._sort_variables(expr.args[1:]) ) if nderivs > 1 and assumptions.get('simplify', True): from sympy.core.exprtools import factor_terms from sympy.simplify.simplify import signsimp expr = factor_terms(signsimp(expr)) return expr @classmethod def _sort_variables(cls, vars): """Sort variables, but disallow sorting of non-symbols. When taking derivatives, the following rules usually hold: * Derivative wrt different symbols commute. * Derivative wrt different non-symbols commute. * Derivatives wrt symbols and non-symbols don't commute. Examples ======== >>> from sympy import Derivative, Function, symbols >>> vsort = Derivative._sort_variables >>> x, y, z = symbols('x y z') >>> f, g, h = symbols('f g h', cls=Function) >>> vsort((x,y,z)) [x, y, z] >>> vsort((h(x),g(x),f(x))) [f(x), g(x), h(x)] >>> vsort((z,y,x,h(x),g(x),f(x))) [x, y, z, f(x), g(x), h(x)] >>> vsort((x,f(x),y,f(y))) [x, f(x), y, f(y)] >>> vsort((y,x,g(x),f(x),z,h(x),y,x)) [x, y, f(x), g(x), z, h(x), x, y] >>> vsort((z,y,f(x),x,f(x),g(x))) [y, z, f(x), x, f(x), g(x)] >>> vsort((z,y,f(x),x,f(x),g(x),z,z,y,x)) [y, z, f(x), x, f(x), g(x), x, y, z, z] """ sorted_vars = [] symbol_part = [] non_symbol_part = [] for v in vars: if not v.is_symbol: if len(symbol_part) > 0: sorted_vars.extend(sorted(symbol_part, key=default_sort_key)) symbol_part = [] non_symbol_part.append(v) else: if len(non_symbol_part) > 0: sorted_vars.extend(sorted(non_symbol_part, key=default_sort_key)) non_symbol_part = [] symbol_part.append(v) if len(non_symbol_part) > 0: sorted_vars.extend(sorted(non_symbol_part, key=default_sort_key)) if len(symbol_part) > 0: sorted_vars.extend(sorted(symbol_part, key=default_sort_key)) return sorted_vars def _eval_is_commutative(self): return self.expr.is_commutative def _eval_derivative(self, v): # If the variable s we are diff wrt is not in self.variables, we # assume that we might be able to take the derivative. if v not in self.variables: obj = self.expr.diff(v) if obj is S.Zero: return S.Zero if isinstance(obj, Derivative): return obj.func(obj.expr, *(self.variables + obj.variables)) # The derivative wrt s could have simplified things such that the # derivative wrt things in self.variables can now be done. Thus, # we set evaluate=True to see if there are any other derivatives # that can be done. The most common case is when obj is a simple # number so that the derivative wrt anything else will vanish. return self.func(obj, *self.variables, evaluate=True) # In this case s was in self.variables so the derivatve wrt s has # already been attempted and was not computed, either because it # couldn't be or evaluate=False originally. return self.func(self.expr, *(self.variables + (v, )), evaluate=False) def doit(self, **hints): expr = self.expr if hints.get('deep', True): expr = expr.doit(**hints) hints['evaluate'] = True return self.func(expr, *self.variables, **hints) @_sympifyit('z0', NotImplementedError) def doit_numerically(self, z0): """ Evaluate the derivative at z numerically. When we can represent derivatives at a point, this should be folded into the normal evalf. For now, we need a special method. """ import mpmath from sympy.core.expr import Expr if len(self.free_symbols) != 1 or len(self.variables) != 1: raise NotImplementedError('partials and higher order derivatives') z = list(self.free_symbols)[0] def eval(x): f0 = self.expr.subs(z, Expr._from_mpmath(x, prec=mpmath.mp.prec)) f0 = f0.evalf(mlib.libmpf.prec_to_dps(mpmath.mp.prec)) return f0._to_mpmath(mpmath.mp.prec) return Expr._from_mpmath(mpmath.diff(eval, z0._to_mpmath(mpmath.mp.prec)), mpmath.mp.prec) @property def expr(self): return self._args[0] @property def variables(self): return self._args[1:] @property def free_symbols(self): return self.expr.free_symbols def _eval_subs(self, old, new): if old in self.variables and not new._diff_wrt: # issue 4719 return Subs(self, old, new) # If both are Derivatives with the same expr, check if old is # equivalent to self or if old is a subderivative of self. if old.is_Derivative and old.expr == self.expr: # Check if canonnical order of variables is equal. old_vars = collections.Counter(old.variables) self_vars = collections.Counter(self.variables) if old_vars == self_vars: return new # collections.Counter doesn't have __le__ def _subset(a, b): return all(a[i] <= b[i] for i in a) if _subset(old_vars, self_vars): return Derivative(new, *(self_vars - old_vars).elements()) return Derivative(*(x._subs(old, new) for x in self.args)) def _eval_lseries(self, x, logx): dx = self.variables for term in self.expr.lseries(x, logx=logx): yield self.func(term, *dx) def _eval_nseries(self, x, n, logx): arg = self.expr.nseries(x, n=n, logx=logx) o = arg.getO() dx = self.variables rv = [self.func(a, *dx) for a in Add.make_args(arg.removeO())] if o: rv.append(o/x) return Add(*rv) def _eval_as_leading_term(self, x): series_gen = self.expr.lseries(x) d = S.Zero for leading_term in series_gen: d = diff(leading_term, *self.variables) if d != 0: break return d def _sage_(self): import sage.all as sage args = [arg._sage_() for arg in self.args] return sage.derivative(*args) def as_finite_difference(self, points=1, x0=None, wrt=None): """ Expresses a Derivative instance as a finite difference. Parameters ========== points : sequence or coefficient, optional If sequence: discrete values (length >= order+1) of the independent variable used for generating the finite difference weights. If it is a coefficient, it will be used as the step-size for generating an equidistant sequence of length order+1 centered around ``x0``. Default: 1 (step-size 1) x0 : number or Symbol, optional the value of the independent variable (``wrt``) at which the derivative is to be approximated. Default: same as ``wrt``. wrt : Symbol, optional "with respect to" the variable for which the (partial) derivative is to be approximated for. If not provided it is required that the derivative is ordinary. Default: ``None``. Examples ======== >>> from sympy import symbols, Function, exp, sqrt, Symbol >>> x, h = symbols('x h') >>> f = Function('f') >>> f(x).diff(x).as_finite_difference() -f(x - 1/2) + f(x + 1/2) The default step size and number of points are 1 and ``order + 1`` respectively. We can change the step size by passing a symbol as a parameter: >>> f(x).diff(x).as_finite_difference(h) -f(-h/2 + x)/h + f(h/2 + x)/h We can also specify the discretized values to be used in a sequence: >>> f(x).diff(x).as_finite_difference([x, x+h, x+2*h]) -3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h) The algorithm is not restricted to use equidistant spacing, nor do we need to make the approximation around ``x0``, but we can get an expression estimating the derivative at an offset: >>> e, sq2 = exp(1), sqrt(2) >>> xl = [x-h, x+h, x+e*h] >>> f(x).diff(x, 1).as_finite_difference(xl, x+h*sq2) # doctest: +ELLIPSIS 2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/... Partial derivatives are also supported: >>> y = Symbol('y') >>> d2fdxdy=f(x,y).diff(x,y) >>> d2fdxdy.as_finite_difference(wrt=x) -Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y) We can apply ``as_finite_difference`` to ``Derivative`` instances in compound expressions using ``replace``: >>> (1 + 42**f(x).diff(x)).replace(lambda arg: arg.is_Derivative, ... lambda arg: arg.as_finite_difference()) 42**(-f(x - 1/2) + f(x + 1/2)) + 1 See also ======== sympy.calculus.finite_diff.apply_finite_diff sympy.calculus.finite_diff.differentiate_finite sympy.calculus.finite_diff.finite_diff_weights """ from ..calculus.finite_diff import _as_finite_diff return _as_finite_diff(self, points, x0, wrt) class Lambda(Expr): """ Lambda(x, expr) represents a lambda function similar to Python's 'lambda x: expr'. A function of several variables is written as Lambda((x, y, ...), expr). A simple example: >>> from sympy import Lambda >>> from sympy.abc import x >>> f = Lambda(x, x**2) >>> f(4) 16 For multivariate functions, use: >>> from sympy.abc import y, z, t >>> f2 = Lambda((x, y, z, t), x + y**z + t**z) >>> f2(1, 2, 3, 4) 73 A handy shortcut for lots of arguments: >>> p = x, y, z >>> f = Lambda(p, x + y*z) >>> f(*p) x + y*z """ is_Function = True def __new__(cls, variables, expr): from sympy.sets.sets import FiniteSet v = list(variables) if iterable(variables) else [variables] for i in v: if not getattr(i, 'is_Symbol', False): raise TypeError('variable is not a symbol: %s' % i) if len(v) == 1 and v[0] == expr: return S.IdentityFunction obj = Expr.__new__(cls, Tuple(*v), sympify(expr)) obj.nargs = FiniteSet(len(v)) return obj @property def variables(self): """The variables used in the internal representation of the function""" return self._args[0] @property def expr(self): """The return value of the function""" return self._args[1] @property def free_symbols(self): return self.expr.free_symbols - set(self.variables) def __call__(self, *args): n = len(args) if n not in self.nargs: # Lambda only ever has 1 value in nargs # XXX: exception message must be in exactly this format to # make it work with NumPy's functions like vectorize(). See, # for example, https://github.com/numpy/numpy/issues/1697. # The ideal solution would be just to attach metadata to # the exception and change NumPy to take advantage of this. ## XXX does this apply to Lambda? If not, remove this comment. temp = ('%(name)s takes exactly %(args)s ' 'argument%(plural)s (%(given)s given)') raise TypeError(temp % { 'name': self, 'args': list(self.nargs)[0], 'plural': 's'*(list(self.nargs)[0] != 1), 'given': n}) return self.expr.xreplace(dict(list(zip(self.variables, args)))) def __eq__(self, other): if not isinstance(other, Lambda): return False if self.nargs != other.nargs: return False selfexpr = self.args[1] otherexpr = other.args[1] otherexpr = otherexpr.xreplace(dict(list(zip(other.args[0], self.args[0])))) return selfexpr == otherexpr def __ne__(self, other): return not(self == other) def __hash__(self): return super(Lambda, self).__hash__() def _hashable_content(self): return (self.expr.xreplace(self.canonical_variables),) @property def is_identity(self): """Return ``True`` if this ``Lambda`` is an identity function. """ if len(self.args) == 2: return self.args[0] == self.args[1] else: return None class Subs(Expr): """ Represents unevaluated substitutions of an expression. ``Subs(expr, x, x0)`` receives 3 arguments: an expression, a variable or list of distinct variables and a point or list of evaluation points corresponding to those variables. ``Subs`` objects are generally useful to represent unevaluated derivatives calculated at a point. The variables may be expressions, but they are subjected to the limitations of subs(), so it is usually a good practice to use only symbols for variables, since in that case there can be no ambiguity. There's no automatic expansion - use the method .doit() to effect all possible substitutions of the object and also of objects inside the expression. When evaluating derivatives at a point that is not a symbol, a Subs object is returned. One is also able to calculate derivatives of Subs objects - in this case the expression is always expanded (for the unevaluated form, use Derivative()). A simple example: >>> from sympy import Subs, Function, sin >>> from sympy.abc import x, y, z >>> f = Function('f') >>> e = Subs(f(x).diff(x), x, y) >>> e.subs(y, 0) Subs(Derivative(f(x), x), (x,), (0,)) >>> e.subs(f, sin).doit() cos(y) An example with several variables: >>> Subs(f(x)*sin(y) + z, (x, y), (0, 1)) Subs(z + f(x)*sin(y), (x, y), (0, 1)) >>> _.doit() z + f(0)*sin(1) """ def __new__(cls, expr, variables, point, **assumptions): from sympy import Symbol if not is_sequence(variables, Tuple): variables = [variables] variables = list(sympify(variables)) if list(uniq(variables)) != variables: repeated = [ v for v in set(variables) if variables.count(v) > 1 ] raise ValueError('cannot substitute expressions %s more than ' 'once.' % repeated) point = Tuple(*(point if is_sequence(point, Tuple) else [point])) if len(point) != len(variables): raise ValueError('Number of point values must be the same as ' 'the number of variables.') expr = sympify(expr) # use symbols with names equal to the point value (with preppended _) # to give a variable-independent expression pre = "_" pts = sorted(set(point), key=default_sort_key) from sympy.printing import StrPrinter class CustomStrPrinter(StrPrinter): def _print_Dummy(self, expr): return str(expr) + str(expr.dummy_index) def mystr(expr, **settings): p = CustomStrPrinter(settings) return p.doprint(expr) while 1: s_pts = {p: Symbol(pre + mystr(p)) for p in pts} reps = [(v, s_pts[p]) for v, p in zip(variables, point)] # if any underscore-preppended symbol is already a free symbol # and is a variable with a different point value, then there # is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, _1), (1, 0)) # because the new symbol that would be created is _1 but _1 # is already mapped to 0 so __0 and __1 are used for the new # symbols if any(r in expr.free_symbols and r in variables and Symbol(pre + mystr(point[variables.index(r)])) != r for _, r in reps): pre += "_" continue break obj = Expr.__new__(cls, expr, Tuple(*variables), point) obj._expr = expr.subs(reps) return obj def _eval_is_commutative(self): return self.expr.is_commutative def doit(self): return self.expr.doit().subs(list(zip(self.variables, self.point))) def evalf(self, prec=None, **options): return self.doit().evalf(prec, **options) n = evalf @property def variables(self): """The variables to be evaluated""" return self._args[1] @property def expr(self): """The expression on which the substitution operates""" return self._args[0] @property def point(self): """The values for which the variables are to be substituted""" return self._args[2] @property def free_symbols(self): return (self.expr.free_symbols - set(self.variables) | set(self.point.free_symbols)) def _has(self, pattern): if pattern in self.variables and pattern not in self.point: return False return super(Subs, self)._has(pattern) def __eq__(self, other): if not isinstance(other, Subs): return False return self._expr == other._expr def __ne__(self, other): return not(self == other) def __hash__(self): return super(Subs, self).__hash__() def _hashable_content(self): return (self._expr.xreplace(self.canonical_variables),) def _eval_subs(self, old, new): if old in self.variables: if old in self.point: newpoint = tuple(new if i == old else i for i in self.point) return self.func(self.expr, self.variables, newpoint) return self def _eval_derivative(self, s): if s not in self.free_symbols: return S.Zero return self.func(self.expr.diff(s), self.variables, self.point).doit() \ + Add(*[ Subs(point.diff(s) * self.expr.diff(arg), self.variables, self.point).doit() for arg, point in zip(self.variables, self.point) ]) def _eval_nseries(self, x, n, logx): if x in self.point: # x is the variable being substituted into apos = self.point.index(x) other = self.variables[apos] arg = self.expr.nseries(other, n=n, logx=logx) o = arg.getO() subs_args = [self.func(a, *self.args[1:]) for a in arg.removeO().args] return Add(*subs_args) + o.subs(other, x) arg = self.expr.nseries(x, n=n, logx=logx) o = arg.getO() subs_args = [self.func(a, *self.args[1:]) for a in arg.removeO().args] return Add(*subs_args) + o def _eval_as_leading_term(self, x): if x in self.point: ipos = self.point.index(x) xvar = self.variables[ipos] return self.expr.as_leading_term(xvar) if x in self.variables: # if `x` is a dummy variable, it means it won't exist after the # substitution has been performed: return self # The variable is independent of the substitution: return self.expr.as_leading_term(x) def diff(f, *symbols, **kwargs): """ Differentiate f with respect to symbols. This is just a wrapper to unify .diff() and the Derivative class; its interface is similar to that of integrate(). You can use the same shortcuts for multiple variables as with Derivative. For example, diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative of f(x). You can pass evaluate=False to get an unevaluated Derivative class. Note that if there are 0 symbols (such as diff(f(x), x, 0), then the result will be the function (the zeroth derivative), even if evaluate=False. Examples ======== >>> from sympy import sin, cos, Function, diff >>> from sympy.abc import x, y >>> f = Function('f') >>> diff(sin(x), x) cos(x) >>> diff(f(x), x, x, x) Derivative(f(x), x, x, x) >>> diff(f(x), x, 3) Derivative(f(x), x, x, x) >>> diff(sin(x)*cos(y), x, 2, y, 2) sin(x)*cos(y) >>> type(diff(sin(x), x)) cos >>> type(diff(sin(x), x, evaluate=False)) <class 'sympy.core.function.Derivative'> >>> type(diff(sin(x), x, 0)) sin >>> type(diff(sin(x), x, 0, evaluate=False)) sin >>> diff(sin(x)) cos(x) >>> diff(sin(x*y)) Traceback (most recent call last): ... ValueError: specify differentiation variables to differentiate sin(x*y) Note that ``diff(sin(x))`` syntax is meant only for convenience in interactive sessions and should be avoided in library code. References ========== http://reference.wolfram.com/legacy/v5_2/Built-inFunctions/AlgebraicComputation/Calculus/D.html See Also ======== Derivative sympy.geometry.util.idiff: computes the derivative implicitly """ kwargs.setdefault('evaluate', True) try: return f._eval_diff(*symbols, **kwargs) except AttributeError: pass return Derivative(f, *symbols, **kwargs) def expand(e, deep=True, modulus=None, power_base=True, power_exp=True, mul=True, log=True, multinomial=True, basic=True, **hints): """ Expand an expression using methods given as hints. Hints evaluated unless explicitly set to False are: ``basic``, ``log``, ``multinomial``, ``mul``, ``power_base``, and ``power_exp`` The following hints are supported but not applied unless set to True: ``complex``, ``func``, and ``trig``. In addition, the following meta-hints are supported by some or all of the other hints: ``frac``, ``numer``, ``denom``, ``modulus``, and ``force``. ``deep`` is supported by all hints. Additionally, subclasses of Expr may define their own hints or meta-hints. The ``basic`` hint is used for any special rewriting of an object that should be done automatically (along with the other hints like ``mul``) when expand is called. This is a catch-all hint to handle any sort of expansion that may not be described by the existing hint names. To use this hint an object should override the ``_eval_expand_basic`` method. Objects may also define their own expand methods, which are not run by default. See the API section below. If ``deep`` is set to ``True`` (the default), things like arguments of functions are recursively expanded. Use ``deep=False`` to only expand on the top level. If the ``force`` hint is used, assumptions about variables will be ignored in making the expansion. Hints ===== These hints are run by default mul --- Distributes multiplication over addition: >>> from sympy import cos, exp, sin >>> from sympy.abc import x, y, z >>> (y*(x + z)).expand(mul=True) x*y + y*z multinomial ----------- Expand (x + y + ...)**n where n is a positive integer. >>> ((x + y + z)**2).expand(multinomial=True) x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2 power_exp --------- Expand addition in exponents into multiplied bases. >>> exp(x + y).expand(power_exp=True) exp(x)*exp(y) >>> (2**(x + y)).expand(power_exp=True) 2**x*2**y power_base ---------- Split powers of multiplied bases. This only happens by default if assumptions allow, or if the ``force`` meta-hint is used: >>> ((x*y)**z).expand(power_base=True) (x*y)**z >>> ((x*y)**z).expand(power_base=True, force=True) x**z*y**z >>> ((2*y)**z).expand(power_base=True) 2**z*y**z Note that in some cases where this expansion always holds, SymPy performs it automatically: >>> (x*y)**2 x**2*y**2 log --- Pull out power of an argument as a coefficient and split logs products into sums of logs. Note that these only work if the arguments of the log function have the proper assumptions--the arguments must be positive and the exponents must be real--or else the ``force`` hint must be True: >>> from sympy import log, symbols >>> log(x**2*y).expand(log=True) log(x**2*y) >>> log(x**2*y).expand(log=True, force=True) 2*log(x) + log(y) >>> x, y = symbols('x,y', positive=True) >>> log(x**2*y).expand(log=True) 2*log(x) + log(y) basic ----- This hint is intended primarily as a way for custom subclasses to enable expansion by default. These hints are not run by default: complex ------- Split an expression into real and imaginary parts. >>> x, y = symbols('x,y') >>> (x + y).expand(complex=True) re(x) + re(y) + I*im(x) + I*im(y) >>> cos(x).expand(complex=True) -I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x)) Note that this is just a wrapper around ``as_real_imag()``. Most objects that wish to redefine ``_eval_expand_complex()`` should consider redefining ``as_real_imag()`` instead. func ---- Expand other functions. >>> from sympy import gamma >>> gamma(x + 1).expand(func=True) x*gamma(x) trig ---- Do trigonometric expansions. >>> cos(x + y).expand(trig=True) -sin(x)*sin(y) + cos(x)*cos(y) >>> sin(2*x).expand(trig=True) 2*sin(x)*cos(x) Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)`` and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x) = 1`. The current implementation uses the form obtained from Chebyshev polynomials, but this may change. See `this MathWorld article <http://mathworld.wolfram.com/Multiple-AngleFormulas.html>`_ for more information. Notes ===== - You can shut off unwanted methods:: >>> (exp(x + y)*(x + y)).expand() x*exp(x)*exp(y) + y*exp(x)*exp(y) >>> (exp(x + y)*(x + y)).expand(power_exp=False) x*exp(x + y) + y*exp(x + y) >>> (exp(x + y)*(x + y)).expand(mul=False) (x + y)*exp(x)*exp(y) - Use deep=False to only expand on the top level:: >>> exp(x + exp(x + y)).expand() exp(x)*exp(exp(x)*exp(y)) >>> exp(x + exp(x + y)).expand(deep=False) exp(x)*exp(exp(x + y)) - Hints are applied in an arbitrary, but consistent order (in the current implementation, they are applied in alphabetical order, except multinomial comes before mul, but this may change). Because of this, some hints may prevent expansion by other hints if they are applied first. For example, ``mul`` may distribute multiplications and prevent ``log`` and ``power_base`` from expanding them. Also, if ``mul`` is applied before ``multinomial`, the expression might not be fully distributed. The solution is to use the various ``expand_hint`` helper functions or to use ``hint=False`` to this function to finely control which hints are applied. Here are some examples:: >>> from sympy import expand, expand_mul, expand_power_base >>> x, y, z = symbols('x,y,z', positive=True) >>> expand(log(x*(y + z))) log(x) + log(y + z) Here, we see that ``log`` was applied before ``mul``. To get the mul expanded form, either of the following will work:: >>> expand_mul(log(x*(y + z))) log(x*y + x*z) >>> expand(log(x*(y + z)), log=False) log(x*y + x*z) A similar thing can happen with the ``power_base`` hint:: >>> expand((x*(y + z))**x) (x*y + x*z)**x To get the ``power_base`` expanded form, either of the following will work:: >>> expand((x*(y + z))**x, mul=False) x**x*(y + z)**x >>> expand_power_base((x*(y + z))**x) x**x*(y + z)**x >>> expand((x + y)*y/x) y + y**2/x The parts of a rational expression can be targeted:: >>> expand((x + y)*y/x/(x + 1), frac=True) (x*y + y**2)/(x**2 + x) >>> expand((x + y)*y/x/(x + 1), numer=True) (x*y + y**2)/(x*(x + 1)) >>> expand((x + y)*y/x/(x + 1), denom=True) y*(x + y)/(x**2 + x) - The ``modulus`` meta-hint can be used to reduce the coefficients of an expression post-expansion:: >>> expand((3*x + 1)**2) 9*x**2 + 6*x + 1 >>> expand((3*x + 1)**2, modulus=5) 4*x**2 + x + 1 - Either ``expand()`` the function or ``.expand()`` the method can be used. Both are equivalent:: >>> expand((x + 1)**2) x**2 + 2*x + 1 >>> ((x + 1)**2).expand() x**2 + 2*x + 1 API === Objects can define their own expand hints by defining ``_eval_expand_hint()``. The function should take the form:: def _eval_expand_hint(self, **hints): # Only apply the method to the top-level expression ... See also the example below. Objects should define ``_eval_expand_hint()`` methods only if ``hint`` applies to that specific object. The generic ``_eval_expand_hint()`` method defined in Expr will handle the no-op case. Each hint should be responsible for expanding that hint only. Furthermore, the expansion should be applied to the top-level expression only. ``expand()`` takes care of the recursion that happens when ``deep=True``. You should only call ``_eval_expand_hint()`` methods directly if you are 100% sure that the object has the method, as otherwise you are liable to get unexpected ``AttributeError``s. Note, again, that you do not need to recursively apply the hint to args of your object: this is handled automatically by ``expand()``. ``_eval_expand_hint()`` should generally not be used at all outside of an ``_eval_expand_hint()`` method. If you want to apply a specific expansion from within another method, use the public ``expand()`` function, method, or ``expand_hint()`` functions. In order for expand to work, objects must be rebuildable by their args, i.e., ``obj.func(*obj.args) == obj`` must hold. Expand methods are passed ``**hints`` so that expand hints may use 'metahints'--hints that control how different expand methods are applied. For example, the ``force=True`` hint described above that causes ``expand(log=True)`` to ignore assumptions is such a metahint. The ``deep`` meta-hint is handled exclusively by ``expand()`` and is not passed to ``_eval_expand_hint()`` methods. Note that expansion hints should generally be methods that perform some kind of 'expansion'. For hints that simply rewrite an expression, use the .rewrite() API. Examples ======== >>> from sympy import Expr, sympify >>> class MyClass(Expr): ... def __new__(cls, *args): ... args = sympify(args) ... return Expr.__new__(cls, *args) ... ... def _eval_expand_double(self, **hints): ... ''' ... Doubles the args of MyClass. ... ... If there more than four args, doubling is not performed, ... unless force=True is also used (False by default). ... ''' ... force = hints.pop('force', False) ... if not force and len(self.args) > 4: ... return self ... return self.func(*(self.args + self.args)) ... >>> a = MyClass(1, 2, MyClass(3, 4)) >>> a MyClass(1, 2, MyClass(3, 4)) >>> a.expand(double=True) MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4)) >>> a.expand(double=True, deep=False) MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4)) >>> b = MyClass(1, 2, 3, 4, 5) >>> b.expand(double=True) MyClass(1, 2, 3, 4, 5) >>> b.expand(double=True, force=True) MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5) See Also ======== expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig, expand_power_base, expand_power_exp, expand_func, hyperexpand """ # don't modify this; modify the Expr.expand method hints['power_base'] = power_base hints['power_exp'] = power_exp hints['mul'] = mul hints['log'] = log hints['multinomial'] = multinomial hints['basic'] = basic return sympify(e).expand(deep=deep, modulus=modulus, **hints) # This is a special application of two hints def _mexpand(expr, recursive=False): # expand multinomials and then expand products; this may not always # be sufficient to give a fully expanded expression (see # test_issue_8247_8354 in test_arit) if expr is None: return was = None while was != expr: was, expr = expr, expand_mul(expand_multinomial(expr)) if not recursive: break return expr # These are simple wrappers around single hints. def expand_mul(expr, deep=True): """ Wrapper around expand that only uses the mul hint. See the expand docstring for more information. Examples ======== >>> from sympy import symbols, expand_mul, exp, log >>> x, y = symbols('x,y', positive=True) >>> expand_mul(exp(x+y)*(x+y)*log(x*y**2)) x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2) """ return sympify(expr).expand(deep=deep, mul=True, power_exp=False, power_base=False, basic=False, multinomial=False, log=False) def expand_multinomial(expr, deep=True): """ Wrapper around expand that only uses the multinomial hint. See the expand docstring for more information. Examples ======== >>> from sympy import symbols, expand_multinomial, exp >>> x, y = symbols('x y', positive=True) >>> expand_multinomial((x + exp(x + 1))**2) x**2 + 2*x*exp(x + 1) + exp(2*x + 2) """ return sympify(expr).expand(deep=deep, mul=False, power_exp=False, power_base=False, basic=False, multinomial=True, log=False) def expand_log(expr, deep=True, force=False): """ Wrapper around expand that only uses the log hint. See the expand docstring for more information. Examples ======== >>> from sympy import symbols, expand_log, exp, log >>> x, y = symbols('x,y', positive=True) >>> expand_log(exp(x+y)*(x+y)*log(x*y**2)) (x + y)*(log(x) + 2*log(y))*exp(x + y) """ return sympify(expr).expand(deep=deep, log=True, mul=False, power_exp=False, power_base=False, multinomial=False, basic=False, force=force) def expand_func(expr, deep=True): """ Wrapper around expand that only uses the func hint. See the expand docstring for more information. Examples ======== >>> from sympy import expand_func, gamma >>> from sympy.abc import x >>> expand_func(gamma(x + 2)) x*(x + 1)*gamma(x) """ return sympify(expr).expand(deep=deep, func=True, basic=False, log=False, mul=False, power_exp=False, power_base=False, multinomial=False) def expand_trig(expr, deep=True): """ Wrapper around expand that only uses the trig hint. See the expand docstring for more information. Examples ======== >>> from sympy import expand_trig, sin >>> from sympy.abc import x, y >>> expand_trig(sin(x+y)*(x+y)) (x + y)*(sin(x)*cos(y) + sin(y)*cos(x)) """ return sympify(expr).expand(deep=deep, trig=True, basic=False, log=False, mul=False, power_exp=False, power_base=False, multinomial=False) def expand_complex(expr, deep=True): """ Wrapper around expand that only uses the complex hint. See the expand docstring for more information. Examples ======== >>> from sympy import expand_complex, exp, sqrt, I >>> from sympy.abc import z >>> expand_complex(exp(z)) I*exp(re(z))*sin(im(z)) + exp(re(z))*cos(im(z)) >>> expand_complex(sqrt(I)) sqrt(2)/2 + sqrt(2)*I/2 See Also ======== Expr.as_real_imag """ return sympify(expr).expand(deep=deep, complex=True, basic=False, log=False, mul=False, power_exp=False, power_base=False, multinomial=False) def expand_power_base(expr, deep=True, force=False): """ Wrapper around expand that only uses the power_base hint. See the expand docstring for more information. A wrapper to expand(power_base=True) which separates a power with a base that is a Mul into a product of powers, without performing any other expansions, provided that assumptions about the power's base and exponent allow. deep=False (default is True) will only apply to the top-level expression. force=True (default is False) will cause the expansion to ignore assumptions about the base and exponent. When False, the expansion will only happen if the base is non-negative or the exponent is an integer. >>> from sympy.abc import x, y, z >>> from sympy import expand_power_base, sin, cos, exp >>> (x*y)**2 x**2*y**2 >>> (2*x)**y (2*x)**y >>> expand_power_base(_) 2**y*x**y >>> expand_power_base((x*y)**z) (x*y)**z >>> expand_power_base((x*y)**z, force=True) x**z*y**z >>> expand_power_base(sin((x*y)**z), deep=False) sin((x*y)**z) >>> expand_power_base(sin((x*y)**z), force=True) sin(x**z*y**z) >>> expand_power_base((2*sin(x))**y + (2*cos(x))**y) 2**y*sin(x)**y + 2**y*cos(x)**y >>> expand_power_base((2*exp(y))**x) 2**x*exp(y)**x >>> expand_power_base((2*cos(x))**y) 2**y*cos(x)**y Notice that sums are left untouched. If this is not the desired behavior, apply full ``expand()`` to the expression: >>> expand_power_base(((x+y)*z)**2) z**2*(x + y)**2 >>> (((x+y)*z)**2).expand() x**2*z**2 + 2*x*y*z**2 + y**2*z**2 >>> expand_power_base((2*y)**(1+z)) 2**(z + 1)*y**(z + 1) >>> ((2*y)**(1+z)).expand() 2*2**z*y*y**z """ return sympify(expr).expand(deep=deep, log=False, mul=False, power_exp=False, power_base=True, multinomial=False, basic=False, force=force) def expand_power_exp(expr, deep=True): """ Wrapper around expand that only uses the power_exp hint. See the expand docstring for more information. Examples ======== >>> from sympy import expand_power_exp >>> from sympy.abc import x, y >>> expand_power_exp(x**(y + 2)) x**2*x**y """ return sympify(expr).expand(deep=deep, complex=False, basic=False, log=False, mul=False, power_exp=True, power_base=False, multinomial=False) def count_ops(expr, visual=False): """ Return a representation (integer or expression) of the operations in expr. If ``visual`` is ``False`` (default) then the sum of the coefficients of the visual expression will be returned. If ``visual`` is ``True`` then the number of each type of operation is shown with the core class types (or their virtual equivalent) multiplied by the number of times they occur. If expr is an iterable, the sum of the op counts of the items will be returned. Examples ======== >>> from sympy.abc import a, b, x, y >>> from sympy import sin, count_ops Although there isn't a SUB object, minus signs are interpreted as either negations or subtractions: >>> (x - y).count_ops(visual=True) SUB >>> (-x).count_ops(visual=True) NEG Here, there are two Adds and a Pow: >>> (1 + a + b**2).count_ops(visual=True) 2*ADD + POW In the following, an Add, Mul, Pow and two functions: >>> (sin(x)*x + sin(x)**2).count_ops(visual=True) ADD + MUL + POW + 2*SIN for a total of 5: >>> (sin(x)*x + sin(x)**2).count_ops(visual=False) 5 Note that "what you type" is not always what you get. The expression 1/x/y is translated by sympy into 1/(x*y) so it gives a DIV and MUL rather than two DIVs: >>> (1/x/y).count_ops(visual=True) DIV + MUL The visual option can be used to demonstrate the difference in operations for expressions in different forms. Here, the Horner representation is compared with the expanded form of a polynomial: >>> eq=x*(1 + x*(2 + x*(3 + x))) >>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True) -MUL + 3*POW The count_ops function also handles iterables: >>> count_ops([x, sin(x), None, True, x + 2], visual=False) 2 >>> count_ops([x, sin(x), None, True, x + 2], visual=True) ADD + SIN >>> count_ops({x: sin(x), x + 2: y + 1}, visual=True) 2*ADD + SIN """ from sympy import Integral, Symbol from sympy.simplify.radsimp import fraction from sympy.logic.boolalg import BooleanFunction expr = sympify(expr) if isinstance(expr, Expr): ops = [] args = [expr] NEG = Symbol('NEG') DIV = Symbol('DIV') SUB = Symbol('SUB') ADD = Symbol('ADD') while args: a = args.pop() # XXX: This is a hack to support non-Basic args if isinstance(a, string_types): continue if a.is_Rational: #-1/3 = NEG + DIV if a is not S.One: if a.p < 0: ops.append(NEG) if a.q != 1: ops.append(DIV) continue elif a.is_Mul: if _coeff_isneg(a): ops.append(NEG) if a.args[0] is S.NegativeOne: a = a.as_two_terms()[1] else: a = -a n, d = fraction(a) if n.is_Integer: ops.append(DIV) if n < 0: ops.append(NEG) args.append(d) continue # won't be -Mul but could be Add elif d is not S.One: if not d.is_Integer: args.append(d) ops.append(DIV) args.append(n) continue # could be -Mul elif a.is_Add: aargs = list(a.args) negs = 0 for i, ai in enumerate(aargs): if _coeff_isneg(ai): negs += 1 args.append(-ai) if i > 0: ops.append(SUB) else: args.append(ai) if i > 0: ops.append(ADD) if negs == len(aargs): # -x - y = NEG + SUB ops.append(NEG) elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD ops.append(SUB - ADD) continue if a.is_Pow and a.exp is S.NegativeOne: ops.append(DIV) args.append(a.base) # won't be -Mul but could be Add continue if (a.is_Mul or a.is_Pow or a.is_Function or isinstance(a, Derivative) or isinstance(a, Integral)): o = Symbol(a.func.__name__.upper()) # count the args if (a.is_Mul or isinstance(a, LatticeOp)): ops.append(o*(len(a.args) - 1)) else: ops.append(o) if not a.is_Symbol: args.extend(a.args) elif type(expr) is dict: ops = [count_ops(k, visual=visual) + count_ops(v, visual=visual) for k, v in expr.items()] elif iterable(expr): ops = [count_ops(i, visual=visual) for i in expr] elif isinstance(expr, BooleanFunction): ops = [] for arg in expr.args: ops.append(count_ops(arg, visual=True)) o = Symbol(expr.func.__name__.upper()) ops.append(o) elif not isinstance(expr, Basic): ops = [] else: # it's Basic not isinstance(expr, Expr): if not isinstance(expr, Basic): raise TypeError("Invalid type of expr") else: ops = [] args = [expr] while args: a = args.pop() # XXX: This is a hack to support non-Basic args if isinstance(a, string_types): continue if a.args: o = Symbol(a.func.__name__.upper()) if a.is_Boolean: ops.append(o*(len(a.args)-1)) else: ops.append(o) args.extend(a.args) if not ops: if visual: return S.Zero return 0 ops = Add(*ops) if visual: return ops if ops.is_Number: return int(ops) return sum(int((a.args or [1])[0]) for a in Add.make_args(ops)) def nfloat(expr, n=15, exponent=False): """Make all Rationals in expr Floats except those in exponents (unless the exponents flag is set to True). Examples ======== >>> from sympy.core.function import nfloat >>> from sympy.abc import x, y >>> from sympy import cos, pi, sqrt >>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y)) x**4 + 0.5*x + sqrt(y) + 1.5 >>> nfloat(x**4 + sqrt(y), exponent=True) x**4.0 + y**0.5 """ from sympy.core.power import Pow from sympy.polys.rootoftools import RootOf if iterable(expr, exclude=string_types): if isinstance(expr, (dict, Dict)): return type(expr)([(k, nfloat(v, n, exponent)) for k, v in list(expr.items())]) return type(expr)([nfloat(a, n, exponent) for a in expr]) rv = sympify(expr) if rv.is_Number: return Float(rv, n) elif rv.is_number: # evalf doesn't always set the precision rv = rv.n(n) if rv.is_Number: rv = Float(rv.n(n), n) else: pass # pure_complex(rv) is likely True return rv # watch out for RootOf instances that don't like to have # their exponents replaced with Dummies and also sometimes have # problems with evaluating at low precision (issue 6393) rv = rv.xreplace({ro: ro.n(n) for ro in rv.atoms(RootOf)}) if not exponent: reps = [(p, Pow(p.base, Dummy())) for p in rv.atoms(Pow)] rv = rv.xreplace(dict(reps)) rv = rv.n(n) if not exponent: rv = rv.xreplace({d.exp: p.exp for p, d in reps}) else: # Pow._eval_evalf special cases Integer exponents so if # exponent is suppose to be handled we have to do so here rv = rv.xreplace(Transform( lambda x: Pow(x.base, Float(x.exp, n)), lambda x: x.is_Pow and x.exp.is_Integer)) return rv.xreplace(Transform( lambda x: x.func(*nfloat(x.args, n, exponent)), lambda x: isinstance(x, Function))) from sympy.core.symbol import Dummy, Symbol
codeparrot/github-code-clean
""" Logistic Regression """ # Author: Gael Varoquaux <gael.varoquaux@normalesup.org> # Fabian Pedregosa <f@bianp.net> # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Manoj Kumar <manojkumarsivaraj334@gmail.com> # Lars Buitinck # Simon Wu <s8wu@uwaterloo.ca> # Arthur Mensch <arthur.mensch@m4x.org import numbers import warnings import numpy as np from scipy import optimize from joblib import Parallel, effective_n_jobs from ._base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator from ._linear_loss import LinearModelLoss from ._sag import sag_solver from .._loss.loss import HalfBinomialLoss, HalfMultinomialLoss from ..preprocessing import LabelEncoder, LabelBinarizer from ..svm._base import _fit_liblinear from ..utils import check_array, check_consistent_length, compute_class_weight from ..utils import check_random_state from ..utils.extmath import softmax from ..utils.extmath import row_norms from ..utils.optimize import _newton_cg, _check_optimize_result from ..utils.validation import check_is_fitted, _check_sample_weight from ..utils.multiclass import check_classification_targets from ..utils.fixes import delayed from ..model_selection import check_cv from ..metrics import get_scorer _LOGISTIC_SOLVER_CONVERGENCE_MSG = ( "Please also refer to the documentation for alternative solver options:\n" " https://scikit-learn.org/stable/modules/linear_model.html" "#logistic-regression" ) def _check_solver(solver, penalty, dual): all_solvers = ["liblinear", "newton-cg", "lbfgs", "sag", "saga"] if solver not in all_solvers: raise ValueError( "Logistic Regression supports only solvers in %s, got %s." % (all_solvers, solver) ) all_penalties = ["l1", "l2", "elasticnet", "none"] if penalty not in all_penalties: raise ValueError( "Logistic Regression supports only penalties in %s, got %s." % (all_penalties, penalty) ) if solver not in ["liblinear", "saga"] and penalty not in ("l2", "none"): raise ValueError( "Solver %s supports only 'l2' or 'none' penalties, got %s penalty." % (solver, penalty) ) if solver != "liblinear" and dual: raise ValueError( "Solver %s supports only dual=False, got dual=%s" % (solver, dual) ) if penalty == "elasticnet" and solver != "saga": raise ValueError( "Only 'saga' solver supports elasticnet penalty, got solver={}.".format( solver ) ) if solver == "liblinear" and penalty == "none": raise ValueError("penalty='none' is not supported for the liblinear solver") return solver def _check_multi_class(multi_class, solver, n_classes): if multi_class == "auto": if solver == "liblinear": multi_class = "ovr" elif n_classes > 2: multi_class = "multinomial" else: multi_class = "ovr" if multi_class not in ("multinomial", "ovr"): raise ValueError( "multi_class should be 'multinomial', 'ovr' or 'auto'. Got %s." % multi_class ) if multi_class == "multinomial" and solver == "liblinear": raise ValueError("Solver %s does not support a multinomial backend." % solver) return multi_class def _logistic_regression_path( X, y, pos_class=None, Cs=10, fit_intercept=True, max_iter=100, tol=1e-4, verbose=0, solver="lbfgs", coef=None, class_weight=None, dual=False, penalty="l2", intercept_scaling=1.0, multi_class="auto", random_state=None, check_input=True, max_squared_sum=None, sample_weight=None, l1_ratio=None, n_threads=1, ): """Compute a Logistic Regression model for a list of regularization parameters. This is an implementation that uses the result of the previous model to speed up computations along the set of solutions, making it faster than sequentially calling LogisticRegression for the different parameters. Note that there will be no speedup with liblinear solver, since it does not handle warm-starting. Read more in the :ref:`User Guide <logistic_regression>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Input data, target values. pos_class : int, default=None The class with respect to which we perform a one-vs-all fit. If None, then it is assumed that the given problem is binary. Cs : int or array-like of shape (n_cs,), default=10 List of values for the regularization parameter or integer specifying the number of regularization parameters that should be used. In this case, the parameters will be chosen in a logarithmic scale between 1e-4 and 1e4. fit_intercept : bool, default=True Whether to fit an intercept for the model. In this case the shape of the returned array is (n_cs, n_features + 1). max_iter : int, default=100 Maximum number of iterations for the solver. tol : float, default=1e-4 Stopping criterion. For the newton-cg and lbfgs solvers, the iteration will stop when ``max{|g_i | i = 1, ..., n} <= tol`` where ``g_i`` is the i-th component of the gradient. verbose : int, default=0 For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}, \ default='lbfgs' Numerical solver to use. coef : array-like of shape (n_features,), default=None Initialization value for coefficients of logistic regression. Useless for liblinear solver. class_weight : dict or 'balanced', default=None Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. dual : bool, default=False Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. penalty : {'l1', 'l2', 'elasticnet'}, default='l2' Used to specify the norm used in the penalization. The 'newton-cg', 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is only supported by the 'saga' solver. intercept_scaling : float, default=1. Useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equal to intercept_scaling is appended to the instance vector. The intercept becomes ``intercept_scaling * synthetic_feature_weight``. Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. multi_class : {'ovr', 'multinomial', 'auto'}, default='auto' If the option chosen is 'ovr', then a binary problem is fit for each label. For 'multinomial' the loss minimised is the multinomial loss fit across the entire probability distribution, *even when the data is binary*. 'multinomial' is unavailable when solver='liblinear'. 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', and otherwise selects 'multinomial'. .. versionadded:: 0.18 Stochastic Average Gradient descent solver for 'multinomial' case. .. versionchanged:: 0.22 Default changed from 'ovr' to 'auto' in 0.22. random_state : int, RandomState instance, default=None Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the data. See :term:`Glossary <random_state>` for details. check_input : bool, default=True If False, the input arrays X and y will not be checked. max_squared_sum : float, default=None Maximum squared sum of X over samples. Used only in SAG solver. If None, it will be computed, going through all the samples. The value should be precomputed to speed up cross validation. sample_weight : array-like of shape(n_samples,), default=None Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. l1_ratio : float, default=None The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2. n_threads : int, default=1 Number of OpenMP threads to use. Returns ------- coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1) List of coefficients for the Logistic Regression model. If fit_intercept is set to True then the second dimension will be n_features + 1, where the last item represents the intercept. For ``multiclass='multinomial'``, the shape is (n_classes, n_cs, n_features) or (n_classes, n_cs, n_features + 1). Cs : ndarray Grid of Cs used for cross-validation. n_iter : array of shape (n_cs,) Actual number of iteration for each Cs. Notes ----- You might get slightly different results with the solver liblinear than with the others since this uses LIBLINEAR which penalizes the intercept. .. versionchanged:: 0.19 The "copy" parameter was removed. """ if isinstance(Cs, numbers.Integral): Cs = np.logspace(-4, 4, Cs) solver = _check_solver(solver, penalty, dual) # Preprocessing. if check_input: X = check_array( X, accept_sparse="csr", dtype=np.float64, accept_large_sparse=solver not in ["liblinear", "sag", "saga"], ) y = check_array(y, ensure_2d=False, dtype=None) check_consistent_length(X, y) _, n_features = X.shape classes = np.unique(y) random_state = check_random_state(random_state) multi_class = _check_multi_class(multi_class, solver, len(classes)) if pos_class is None and multi_class != "multinomial": if classes.size > 2: raise ValueError("To fit OvR, use the pos_class argument") # np.unique(y) gives labels in sorted order. pos_class = classes[1] # If sample weights exist, convert them to array (support for lists) # and check length # Otherwise set them to 1 for all examples sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype, copy=True) # If class_weights is a dict (provided by the user), the weights # are assigned to the original labels. If it is "balanced", then # the class_weights are assigned after masking the labels with a OvR. le = LabelEncoder() if isinstance(class_weight, dict) or multi_class == "multinomial": class_weight_ = compute_class_weight(class_weight, classes=classes, y=y) sample_weight *= class_weight_[le.fit_transform(y)] # For doing a ovr, we need to mask the labels first. for the # multinomial case this is not necessary. if multi_class == "ovr": w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype) mask = y == pos_class y_bin = np.ones(y.shape, dtype=X.dtype) if solver in ["lbfgs", "newton-cg"]: # HalfBinomialLoss, used for those solvers, represents y in [0, 1] instead # of in [-1, 1]. mask_classes = np.array([0, 1]) y_bin[~mask] = 0.0 else: mask_classes = np.array([-1, 1]) y_bin[~mask] = -1.0 # for compute_class_weight if class_weight == "balanced": class_weight_ = compute_class_weight( class_weight, classes=mask_classes, y=y_bin ) sample_weight *= class_weight_[le.fit_transform(y_bin)] else: if solver in ["sag", "saga", "lbfgs", "newton-cg"]: # SAG, lbfgs and newton-cg multinomial solvers need LabelEncoder, # not LabelBinarizer, i.e. y as a 1d-array of integers. # LabelEncoder also saves memory compared to LabelBinarizer, especially # when n_classes is large. le = LabelEncoder() Y_multi = le.fit_transform(y).astype(X.dtype, copy=False) else: # For liblinear solver, apply LabelBinarizer, i.e. y is one-hot encoded. lbin = LabelBinarizer() Y_multi = lbin.fit_transform(y) if Y_multi.shape[1] == 1: Y_multi = np.hstack([1 - Y_multi, Y_multi]) w0 = np.zeros( (classes.size, n_features + int(fit_intercept)), order="F", dtype=X.dtype ) if coef is not None: # it must work both giving the bias term and not if multi_class == "ovr": if coef.size not in (n_features, w0.size): raise ValueError( "Initialization coef is of shape %d, expected shape %d or %d" % (coef.size, n_features, w0.size) ) w0[: coef.size] = coef else: # For binary problems coef.shape[0] should be 1, otherwise it # should be classes.size. n_classes = classes.size if n_classes == 2: n_classes = 1 if coef.shape[0] != n_classes or coef.shape[1] not in ( n_features, n_features + 1, ): raise ValueError( "Initialization coef is of shape (%d, %d), expected " "shape (%d, %d) or (%d, %d)" % ( coef.shape[0], coef.shape[1], classes.size, n_features, classes.size, n_features + 1, ) ) if n_classes == 1: w0[0, : coef.shape[1]] = -coef w0[1, : coef.shape[1]] = coef else: w0[:, : coef.shape[1]] = coef if multi_class == "multinomial": if solver in ["lbfgs", "newton-cg"]: # scipy.optimize.minimize and newton-cg accept only ravelled parameters, # i.e. 1d-arrays. LinearModelLoss expects classes to be contiguous and # reconstructs the 2d-array via w0.reshape((n_classes, -1), order="F"). # As w0 is F-contiguous, ravel(order="F") also avoids a copy. w0 = w0.ravel(order="F") loss = LinearModelLoss( base_loss=HalfMultinomialLoss(n_classes=classes.size), fit_intercept=fit_intercept, ) target = Y_multi if solver in "lbfgs": func = loss.loss_gradient elif solver == "newton-cg": func = loss.loss grad = loss.gradient hess = loss.gradient_hessian_product # hess = [gradient, hessp] warm_start_sag = {"coef": w0.T} else: target = y_bin if solver == "lbfgs": loss = LinearModelLoss( base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept ) func = loss.loss_gradient elif solver == "newton-cg": loss = LinearModelLoss( base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept ) func = loss.loss grad = loss.gradient hess = loss.gradient_hessian_product # hess = [gradient, hessp] warm_start_sag = {"coef": np.expand_dims(w0, axis=1)} coefs = list() n_iter = np.zeros(len(Cs), dtype=np.int32) for i, C in enumerate(Cs): if solver == "lbfgs": l2_reg_strength = 1.0 / C iprint = [-1, 50, 1, 100, 101][ np.searchsorted(np.array([0, 1, 2, 3]), verbose) ] opt_res = optimize.minimize( func, w0, method="L-BFGS-B", jac=True, args=(X, target, sample_weight, l2_reg_strength, n_threads), options={"iprint": iprint, "gtol": tol, "maxiter": max_iter}, ) n_iter_i = _check_optimize_result( solver, opt_res, max_iter, extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG, ) w0, loss = opt_res.x, opt_res.fun elif solver == "newton-cg": l2_reg_strength = 1.0 / C args = (X, target, sample_weight, l2_reg_strength, n_threads) w0, n_iter_i = _newton_cg( hess, func, grad, w0, args=args, maxiter=max_iter, tol=tol ) elif solver == "liblinear": coef_, intercept_, n_iter_i, = _fit_liblinear( X, target, C, fit_intercept, intercept_scaling, None, penalty, dual, verbose, max_iter, tol, random_state, sample_weight=sample_weight, ) if fit_intercept: w0 = np.concatenate([coef_.ravel(), intercept_]) else: w0 = coef_.ravel() elif solver in ["sag", "saga"]: if multi_class == "multinomial": target = target.astype(X.dtype, copy=False) loss = "multinomial" else: loss = "log" # alpha is for L2-norm, beta is for L1-norm if penalty == "l1": alpha = 0.0 beta = 1.0 / C elif penalty == "l2": alpha = 1.0 / C beta = 0.0 else: # Elastic-Net penalty alpha = (1.0 / C) * (1 - l1_ratio) beta = (1.0 / C) * l1_ratio w0, n_iter_i, warm_start_sag = sag_solver( X, target, sample_weight, loss, alpha, beta, max_iter, tol, verbose, random_state, False, max_squared_sum, warm_start_sag, is_saga=(solver == "saga"), ) else: raise ValueError( "solver must be one of {'liblinear', 'lbfgs', " "'newton-cg', 'sag'}, got '%s' instead" % solver ) if multi_class == "multinomial": n_classes = max(2, classes.size) if solver in ["lbfgs", "newton-cg"]: multi_w0 = np.reshape(w0, (n_classes, -1), order="F") else: multi_w0 = w0 if n_classes == 2: multi_w0 = multi_w0[1][np.newaxis, :] coefs.append(multi_w0.copy()) else: coefs.append(w0.copy()) n_iter[i] = n_iter_i return np.array(coefs), np.array(Cs), n_iter # helper function for LogisticCV def _log_reg_scoring_path( X, y, train, test, pos_class=None, Cs=10, scoring=None, fit_intercept=False, max_iter=100, tol=1e-4, class_weight=None, verbose=0, solver="lbfgs", penalty="l2", dual=False, intercept_scaling=1.0, multi_class="auto", random_state=None, max_squared_sum=None, sample_weight=None, l1_ratio=None, ): """Computes scores across logistic_regression_path Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target labels. train : list of indices The indices of the train set. test : list of indices The indices of the test set. pos_class : int, default=None The class with respect to which we perform a one-vs-all fit. If None, then it is assumed that the given problem is binary. Cs : int or list of floats, default=10 Each of the values in Cs describes the inverse of regularization strength. If Cs is as an int, then a grid of Cs values are chosen in a logarithmic scale between 1e-4 and 1e4. If not provided, then a fixed set of values for Cs are used. scoring : callable, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. For a list of scoring functions that can be used, look at :mod:`sklearn.metrics`. The default scoring option used is accuracy_score. fit_intercept : bool, default=False If False, then the bias term is set to zero. Else the last term of each coef_ gives us the intercept. max_iter : int, default=100 Maximum number of iterations for the solver. tol : float, default=1e-4 Tolerance for stopping criteria. class_weight : dict or 'balanced', default=None Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. verbose : int, default=0 For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}, \ default='lbfgs' Decides which solver to use. penalty : {'l1', 'l2', 'elasticnet'}, default='l2' Used to specify the norm used in the penalization. The 'newton-cg', 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is only supported by the 'saga' solver. dual : bool, default=False Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. intercept_scaling : float, default=1. Useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. multi_class : {'auto', 'ovr', 'multinomial'}, default='auto' If the option chosen is 'ovr', then a binary problem is fit for each label. For 'multinomial' the loss minimised is the multinomial loss fit across the entire probability distribution, *even when the data is binary*. 'multinomial' is unavailable when solver='liblinear'. random_state : int, RandomState instance, default=None Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the data. See :term:`Glossary <random_state>` for details. max_squared_sum : float, default=None Maximum squared sum of X over samples. Used only in SAG solver. If None, it will be computed, going through all the samples. The value should be precomputed to speed up cross validation. sample_weight : array-like of shape(n_samples,), default=None Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. l1_ratio : float, default=None The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2. Returns ------- coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1) List of coefficients for the Logistic Regression model. If fit_intercept is set to True then the second dimension will be n_features + 1, where the last item represents the intercept. Cs : ndarray Grid of Cs used for cross-validation. scores : ndarray of shape (n_cs,) Scores obtained for each Cs. n_iter : ndarray of shape(n_cs,) Actual number of iteration for each Cs. """ X_train = X[train] X_test = X[test] y_train = y[train] y_test = y[test] if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X) sample_weight = sample_weight[train] coefs, Cs, n_iter = _logistic_regression_path( X_train, y_train, Cs=Cs, l1_ratio=l1_ratio, fit_intercept=fit_intercept, solver=solver, max_iter=max_iter, class_weight=class_weight, pos_class=pos_class, multi_class=multi_class, tol=tol, verbose=verbose, dual=dual, penalty=penalty, intercept_scaling=intercept_scaling, random_state=random_state, check_input=False, max_squared_sum=max_squared_sum, sample_weight=sample_weight, ) log_reg = LogisticRegression(solver=solver, multi_class=multi_class) # The score method of Logistic Regression has a classes_ attribute. if multi_class == "ovr": log_reg.classes_ = np.array([-1, 1]) elif multi_class == "multinomial": log_reg.classes_ = np.unique(y_train) else: raise ValueError( "multi_class should be either multinomial or ovr, got %d" % multi_class ) if pos_class is not None: mask = y_test == pos_class y_test = np.ones(y_test.shape, dtype=np.float64) y_test[~mask] = -1.0 scores = list() scoring = get_scorer(scoring) for w in coefs: if multi_class == "ovr": w = w[np.newaxis, :] if fit_intercept: log_reg.coef_ = w[:, :-1] log_reg.intercept_ = w[:, -1] else: log_reg.coef_ = w log_reg.intercept_ = 0.0 if scoring is None: scores.append(log_reg.score(X_test, y_test)) else: scores.append(scoring(log_reg, X_test, y_test)) return coefs, Cs, np.array(scores), n_iter class LogisticRegression(LinearClassifierMixin, SparseCoefMixin, BaseEstimator): """ Logistic Regression (aka logit, MaxEnt) classifier. In the multiclass case, the training algorithm uses the one-vs-rest (OvR) scheme if the 'multi_class' option is set to 'ovr', and uses the cross-entropy loss if the 'multi_class' option is set to 'multinomial'. (Currently the 'multinomial' option is supported only by the 'lbfgs', 'sag', 'saga' and 'newton-cg' solvers.) This class implements regularized logistic regression using the 'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note that regularization is applied by default**. It can handle both dense and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit floats for optimal performance; any other input format will be converted (and copied). The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization with primal formulation, or no regularization. The 'liblinear' solver supports both L1 and L2 regularization, with a dual formulation only for the L2 penalty. The Elastic-Net regularization is only supported by the 'saga' solver. Read more in the :ref:`User Guide <logistic_regression>`. Parameters ---------- penalty : {'l1', 'l2', 'elasticnet', 'none'}, default='l2' Specify the norm of the penalty: - `'none'`: no penalty is added; - `'l2'`: add a L2 penalty term and it is the default choice; - `'l1'`: add a L1 penalty term; - `'elasticnet'`: both L1 and L2 penalty terms are added. .. warning:: Some penalties may not work with some solvers. See the parameter `solver` below, to know the compatibility between the penalty and solver. .. versionadded:: 0.19 l1 penalty with SAGA solver (allowing 'multinomial' + L1) dual : bool, default=False Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. tol : float, default=1e-4 Tolerance for stopping criteria. C : float, default=1.0 Inverse of regularization strength; must be a positive float. Like in support vector machines, smaller values specify stronger regularization. fit_intercept : bool, default=True Specifies if a constant (a.k.a. bias or intercept) should be added to the decision function. intercept_scaling : float, default=1 Useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equal to intercept_scaling is appended to the instance vector. The intercept becomes ``intercept_scaling * synthetic_feature_weight``. Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. class_weight : dict or 'balanced', default=None Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. .. versionadded:: 0.17 *class_weight='balanced'* random_state : int, RandomState instance, default=None Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the data. See :term:`Glossary <random_state>` for details. solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \ default='lbfgs' Algorithm to use in the optimization problem. Default is 'lbfgs'. To choose a solver, you might want to consider the following aspects: - For small datasets, 'liblinear' is a good choice, whereas 'sag' and 'saga' are faster for large ones; - For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs' handle multinomial loss; - 'liblinear' is limited to one-versus-rest schemes. .. warning:: The choice of the algorithm depends on the penalty chosen: Supported penalties by solver: - 'newton-cg' - ['l2', 'none'] - 'lbfgs' - ['l2', 'none'] - 'liblinear' - ['l1', 'l2'] - 'sag' - ['l2', 'none'] - 'saga' - ['elasticnet', 'l1', 'l2', 'none'] .. note:: 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a scaler from :mod:`sklearn.preprocessing`. .. seealso:: Refer to the User Guide for more information regarding :class:`LogisticRegression` and more specifically the `Table <https://scikit-learn.org/dev/modules/linear_model.html#logistic-regression>`_ summarazing solver/penalty supports. <!-- # noqa: E501 --> .. versionadded:: 0.17 Stochastic Average Gradient descent solver. .. versionadded:: 0.19 SAGA solver. .. versionchanged:: 0.22 The default solver changed from 'liblinear' to 'lbfgs' in 0.22. max_iter : int, default=100 Maximum number of iterations taken for the solvers to converge. multi_class : {'auto', 'ovr', 'multinomial'}, default='auto' If the option chosen is 'ovr', then a binary problem is fit for each label. For 'multinomial' the loss minimised is the multinomial loss fit across the entire probability distribution, *even when the data is binary*. 'multinomial' is unavailable when solver='liblinear'. 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', and otherwise selects 'multinomial'. .. versionadded:: 0.18 Stochastic Average Gradient descent solver for 'multinomial' case. .. versionchanged:: 0.22 Default changed from 'ovr' to 'auto' in 0.22. verbose : int, default=0 For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. warm_start : bool, default=False When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. Useless for liblinear solver. See :term:`the Glossary <warm_start>`. .. versionadded:: 0.17 *warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers. n_jobs : int, default=None Number of CPU cores used when parallelizing over classes if multi_class='ovr'". This parameter is ignored when the ``solver`` is set to 'liblinear' regardless of whether 'multi_class' is specified or not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. l1_ratio : float, default=None The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2. Attributes ---------- classes_ : ndarray of shape (n_classes, ) A list of class labels known to the classifier. coef_ : ndarray of shape (1, n_features) or (n_classes, n_features) Coefficient of the features in the decision function. `coef_` is of shape (1, n_features) when the given problem is binary. In particular, when `multi_class='multinomial'`, `coef_` corresponds to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False). intercept_ : ndarray of shape (1,) or (n_classes,) Intercept (a.k.a. bias) added to the decision function. If `fit_intercept` is set to False, the intercept is set to zero. `intercept_` is of shape (1,) when the given problem is binary. In particular, when `multi_class='multinomial'`, `intercept_` corresponds to outcome 1 (True) and `-intercept_` corresponds to outcome 0 (False). n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 n_iter_ : ndarray of shape (n_classes,) or (1, ) Actual number of iterations for all classes. If binary or multinomial, it returns only 1 element. For liblinear solver, only the maximum number of iteration across all classes is given. .. versionchanged:: 0.20 In SciPy <= 1.0.0 the number of lbfgs iterations may exceed ``max_iter``. ``n_iter_`` will now report at most ``max_iter``. See Also -------- SGDClassifier : Incrementally trained logistic regression (when given the parameter ``loss="log"``). LogisticRegressionCV : Logistic regression with built-in cross validation. Notes ----- The underlying C implementation uses a random number generator to select features when fitting the model. It is thus not uncommon, to have slightly different results for the same input data. If that happens, try with a smaller tol parameter. Predict output may not match that of standalone liblinear in certain cases. See :ref:`differences from liblinear <liblinear_differences>` in the narrative documentation. References ---------- L-BFGS-B -- Software for Large-scale Bound-constrained Optimization Ciyou Zhu, Richard Byrd, Jorge Nocedal and Jose Luis Morales. http://users.iems.northwestern.edu/~nocedal/lbfgsb.html LIBLINEAR -- A Library for Large Linear Classification https://www.csie.ntu.edu.tw/~cjlin/liblinear/ SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach Minimizing Finite Sums with the Stochastic Average Gradient https://hal.inria.fr/hal-00860051/document SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014). :arxiv:`"SAGA: A Fast Incremental Gradient Method With Support for Non-Strongly Convex Composite Objectives" <1407.0202>` Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent methods for logistic regression and maximum entropy models. Machine Learning 85(1-2):41-75. https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn.linear_model import LogisticRegression >>> X, y = load_iris(return_X_y=True) >>> clf = LogisticRegression(random_state=0).fit(X, y) >>> clf.predict(X[:2, :]) array([0, 0]) >>> clf.predict_proba(X[:2, :]) array([[9.8...e-01, 1.8...e-02, 1.4...e-08], [9.7...e-01, 2.8...e-02, ...e-08]]) >>> clf.score(X, y) 0.97... """ def __init__( self, penalty="l2", *, dual=False, tol=1e-4, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver="lbfgs", max_iter=100, multi_class="auto", verbose=0, warm_start=False, n_jobs=None, l1_ratio=None, ): self.penalty = penalty self.dual = dual self.tol = tol self.C = C self.fit_intercept = fit_intercept self.intercept_scaling = intercept_scaling self.class_weight = class_weight self.random_state = random_state self.solver = solver self.max_iter = max_iter self.multi_class = multi_class self.verbose = verbose self.warm_start = warm_start self.n_jobs = n_jobs self.l1_ratio = l1_ratio def fit(self, X, y, sample_weight=None): """ Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target vector relative to X. sample_weight : array-like of shape (n_samples,) default=None Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. .. versionadded:: 0.17 *sample_weight* support to LogisticRegression. Returns ------- self Fitted estimator. Notes ----- The SAGA solver supports both float64 and float32 bit arrays. """ solver = _check_solver(self.solver, self.penalty, self.dual) if not isinstance(self.C, numbers.Number) or self.C < 0: raise ValueError("Penalty term must be positive; got (C=%r)" % self.C) if self.penalty == "elasticnet": if ( not isinstance(self.l1_ratio, numbers.Number) or self.l1_ratio < 0 or self.l1_ratio > 1 ): raise ValueError( "l1_ratio must be between 0 and 1; got (l1_ratio=%r)" % self.l1_ratio ) elif self.l1_ratio is not None: warnings.warn( "l1_ratio parameter is only used when penalty is " "'elasticnet'. Got " "(penalty={})".format(self.penalty) ) if self.penalty == "none": if self.C != 1.0: # default values warnings.warn( "Setting penalty='none' will ignore the C and l1_ratio parameters" ) # Note that check for l1_ratio is done right above C_ = np.inf penalty = "l2" else: C_ = self.C penalty = self.penalty if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0: raise ValueError( "Maximum number of iteration must be positive; got (max_iter=%r)" % self.max_iter ) if not isinstance(self.tol, numbers.Number) or self.tol < 0: raise ValueError( "Tolerance for stopping criteria must be positive; got (tol=%r)" % self.tol ) if solver == "lbfgs": _dtype = np.float64 else: _dtype = [np.float64, np.float32] X, y = self._validate_data( X, y, accept_sparse="csr", dtype=_dtype, order="C", accept_large_sparse=solver not in ["liblinear", "sag", "saga"], ) check_classification_targets(y) self.classes_ = np.unique(y) multi_class = _check_multi_class(self.multi_class, solver, len(self.classes_)) if solver == "liblinear": if effective_n_jobs(self.n_jobs) != 1: warnings.warn( "'n_jobs' > 1 does not have any effect when" " 'solver' is set to 'liblinear'. Got 'n_jobs'" " = {}.".format(effective_n_jobs(self.n_jobs)) ) self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear( X, y, self.C, self.fit_intercept, self.intercept_scaling, self.class_weight, self.penalty, self.dual, self.verbose, self.max_iter, self.tol, self.random_state, sample_weight=sample_weight, ) return self if solver in ["sag", "saga"]: max_squared_sum = row_norms(X, squared=True).max() else: max_squared_sum = None n_classes = len(self.classes_) classes_ = self.classes_ if n_classes < 2: raise ValueError( "This solver needs samples of at least 2 classes" " in the data, but the data contains only one" " class: %r" % classes_[0] ) if len(self.classes_) == 2: n_classes = 1 classes_ = classes_[1:] if self.warm_start: warm_start_coef = getattr(self, "coef_", None) else: warm_start_coef = None if warm_start_coef is not None and self.fit_intercept: warm_start_coef = np.append( warm_start_coef, self.intercept_[:, np.newaxis], axis=1 ) # Hack so that we iterate only once for the multinomial case. if multi_class == "multinomial": classes_ = [None] warm_start_coef = [warm_start_coef] if warm_start_coef is None: warm_start_coef = [None] * n_classes path_func = delayed(_logistic_regression_path) # The SAG solver releases the GIL so it's more efficient to use # threads for this solver. if solver in ["sag", "saga"]: prefer = "threads" else: prefer = "processes" # TODO: Refactor this to avoid joblib parallelism entirely when doing binary # and multinomial multiclass classification and use joblib only for the # one-vs-rest multiclass case. if ( solver in ["lbfgs", "newton-cg"] and len(classes_) == 1 and effective_n_jobs(self.n_jobs) == 1 ): # In the future, we would like n_threads = _openmp_effective_n_threads() # For the time being, we just do n_threads = 1 else: n_threads = 1 fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)( path_func( X, y, pos_class=class_, Cs=[C_], l1_ratio=self.l1_ratio, fit_intercept=self.fit_intercept, tol=self.tol, verbose=self.verbose, solver=solver, multi_class=multi_class, max_iter=self.max_iter, class_weight=self.class_weight, check_input=False, random_state=self.random_state, coef=warm_start_coef_, penalty=penalty, max_squared_sum=max_squared_sum, sample_weight=sample_weight, n_threads=n_threads, ) for class_, warm_start_coef_ in zip(classes_, warm_start_coef) ) fold_coefs_, _, n_iter_ = zip(*fold_coefs_) self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0] n_features = X.shape[1] if multi_class == "multinomial": self.coef_ = fold_coefs_[0][0] else: self.coef_ = np.asarray(fold_coefs_) self.coef_ = self.coef_.reshape( n_classes, n_features + int(self.fit_intercept) ) if self.fit_intercept: self.intercept_ = self.coef_[:, -1] self.coef_ = self.coef_[:, :-1] else: self.intercept_ = np.zeros(n_classes) return self def predict_proba(self, X): """ Probability estimates. The returned estimates for all classes are ordered by the label of classes. For a multi_class problem, if multi_class is set to be "multinomial" the softmax function is used to find the predicted probability of each class. Else use a one-vs-rest approach, i.e calculate the probability of each class assuming it to be positive using the logistic function. and normalize these values across all the classes. Parameters ---------- X : array-like of shape (n_samples, n_features) Vector to be scored, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- T : array-like of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``. """ check_is_fitted(self) ovr = self.multi_class in ["ovr", "warn"] or ( self.multi_class == "auto" and (self.classes_.size <= 2 or self.solver == "liblinear") ) if ovr: return super()._predict_proba_lr(X) else: decision = self.decision_function(X) if decision.ndim == 1: # Workaround for multi_class="multinomial" and binary outcomes # which requires softmax prediction with only a 1D decision. decision_2d = np.c_[-decision, decision] else: decision_2d = decision return softmax(decision_2d, copy=False) def predict_log_proba(self, X): """ Predict logarithm of probability estimates. The returned estimates for all classes are ordered by the label of classes. Parameters ---------- X : array-like of shape (n_samples, n_features) Vector to be scored, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- T : array-like of shape (n_samples, n_classes) Returns the log-probability of the sample for each class in the model, where classes are ordered as they are in ``self.classes_``. """ return np.log(self.predict_proba(X)) class LogisticRegressionCV(LogisticRegression, LinearClassifierMixin, BaseEstimator): """Logistic Regression CV (aka logit, MaxEnt) classifier. See glossary entry for :term:`cross-validation estimator`. This class implements logistic regression using liblinear, newton-cg, sag of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2 regularization with primal formulation. The liblinear solver supports both L1 and L2 regularization, with a dual formulation only for the L2 penalty. Elastic-Net penalty is only supported by the saga solver. For the grid of `Cs` values and `l1_ratios` values, the best hyperparameter is selected by the cross-validator :class:`~sklearn.model_selection.StratifiedKFold`, but it can be changed using the :term:`cv` parameter. The 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers can warm-start the coefficients (see :term:`Glossary<warm_start>`). Read more in the :ref:`User Guide <logistic_regression>`. Parameters ---------- Cs : int or list of floats, default=10 Each of the values in Cs describes the inverse of regularization strength. If Cs is as an int, then a grid of Cs values are chosen in a logarithmic scale between 1e-4 and 1e4. Like in support vector machines, smaller values specify stronger regularization. fit_intercept : bool, default=True Specifies if a constant (a.k.a. bias or intercept) should be added to the decision function. cv : int or cross-validation generator, default=None The default cross-validation generator used is Stratified K-Folds. If an integer is provided, then it is the number of folds used. See the module :mod:`sklearn.model_selection` module for the list of possible cross-validation objects. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. dual : bool, default=False Dual or primal formulation. Dual formulation is only implemented for l2 penalty with liblinear solver. Prefer dual=False when n_samples > n_features. penalty : {'l1', 'l2', 'elasticnet'}, default='l2' Specify the norm of the penalty: - `'l2'`: add a L2 penalty term (used by default); - `'l1'`: add a L1 penalty term; - `'elasticnet'`: both L1 and L2 penalty terms are added. .. warning:: Some penalties may not work with some solvers. See the parameter `solver` below, to know the compatibility between the penalty and solver. scoring : str or callable, default=None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. For a list of scoring functions that can be used, look at :mod:`sklearn.metrics`. The default scoring option used is 'accuracy'. solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \ default='lbfgs' Algorithm to use in the optimization problem. Default is 'lbfgs'. To choose a solver, you might want to consider the following aspects: - For small datasets, 'liblinear' is a good choice, whereas 'sag' and 'saga' are faster for large ones; - For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs' handle multinomial loss; - 'liblinear' might be slower in :class:`LogisticRegressionCV` because it does not handle warm-starting. 'liblinear' is limited to one-versus-rest schemes. .. warning:: The choice of the algorithm depends on the penalty chosen: - 'newton-cg' - ['l2'] - 'lbfgs' - ['l2'] - 'liblinear' - ['l1', 'l2'] - 'sag' - ['l2'] - 'saga' - ['elasticnet', 'l1', 'l2'] .. note:: 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a scaler from :mod:`sklearn.preprocessing`. .. versionadded:: 0.17 Stochastic Average Gradient descent solver. .. versionadded:: 0.19 SAGA solver. tol : float, default=1e-4 Tolerance for stopping criteria. max_iter : int, default=100 Maximum number of iterations of the optimization algorithm. class_weight : dict or 'balanced', default=None Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``. Note that these weights will be multiplied with sample_weight (passed through the fit method) if sample_weight is specified. .. versionadded:: 0.17 class_weight == 'balanced' n_jobs : int, default=None Number of CPU cores used during the cross-validation loop. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any positive number for verbosity. refit : bool, default=True If set to True, the scores are averaged across all folds, and the coefs and the C that corresponds to the best score is taken, and a final refit is done using these parameters. Otherwise the coefs, intercepts and C that correspond to the best scores across folds are averaged. intercept_scaling : float, default=1 Useful only when the solver 'liblinear' is used and self.fit_intercept is set to True. In this case, x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equal to intercept_scaling is appended to the instance vector. The intercept becomes ``intercept_scaling * synthetic_feature_weight``. Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. multi_class : {'auto, 'ovr', 'multinomial'}, default='auto' If the option chosen is 'ovr', then a binary problem is fit for each label. For 'multinomial' the loss minimised is the multinomial loss fit across the entire probability distribution, *even when the data is binary*. 'multinomial' is unavailable when solver='liblinear'. 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', and otherwise selects 'multinomial'. .. versionadded:: 0.18 Stochastic Average Gradient descent solver for 'multinomial' case. .. versionchanged:: 0.22 Default changed from 'ovr' to 'auto' in 0.22. random_state : int, RandomState instance, default=None Used when `solver='sag'`, 'saga' or 'liblinear' to shuffle the data. Note that this only applies to the solver and not the cross-validation generator. See :term:`Glossary <random_state>` for details. l1_ratios : list of float, default=None The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only used if ``penalty='elasticnet'``. A value of 0 is equivalent to using ``penalty='l2'``, while 1 is equivalent to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2. Attributes ---------- classes_ : ndarray of shape (n_classes, ) A list of class labels known to the classifier. coef_ : ndarray of shape (1, n_features) or (n_classes, n_features) Coefficient of the features in the decision function. `coef_` is of shape (1, n_features) when the given problem is binary. intercept_ : ndarray of shape (1,) or (n_classes,) Intercept (a.k.a. bias) added to the decision function. If `fit_intercept` is set to False, the intercept is set to zero. `intercept_` is of shape(1,) when the problem is binary. Cs_ : ndarray of shape (n_cs) Array of C i.e. inverse of regularization parameter values used for cross-validation. l1_ratios_ : ndarray of shape (n_l1_ratios) Array of l1_ratios used for cross-validation. If no l1_ratio is used (i.e. penalty is not 'elasticnet'), this is set to ``[None]`` coefs_paths_ : ndarray of shape (n_folds, n_cs, n_features) or \ (n_folds, n_cs, n_features + 1) dict with classes as the keys, and the path of coefficients obtained during cross-validating across each fold and then across each Cs after doing an OvR for the corresponding class as values. If the 'multi_class' option is set to 'multinomial', then the coefs_paths are the coefficients corresponding to each class. Each dict value has shape ``(n_folds, n_cs, n_features)`` or ``(n_folds, n_cs, n_features + 1)`` depending on whether the intercept is fit or not. If ``penalty='elasticnet'``, the shape is ``(n_folds, n_cs, n_l1_ratios_, n_features)`` or ``(n_folds, n_cs, n_l1_ratios_, n_features + 1)``. scores_ : dict dict with classes as the keys, and the values as the grid of scores obtained during cross-validating each fold, after doing an OvR for the corresponding class. If the 'multi_class' option given is 'multinomial' then the same scores are repeated across all classes, since this is the multinomial class. Each dict value has shape ``(n_folds, n_cs`` or ``(n_folds, n_cs, n_l1_ratios)`` if ``penalty='elasticnet'``. C_ : ndarray of shape (n_classes,) or (n_classes - 1,) Array of C that maps to the best scores across every class. If refit is set to False, then for each class, the best C is the average of the C's that correspond to the best scores for each fold. `C_` is of shape(n_classes,) when the problem is binary. l1_ratio_ : ndarray of shape (n_classes,) or (n_classes - 1,) Array of l1_ratio that maps to the best scores across every class. If refit is set to False, then for each class, the best l1_ratio is the average of the l1_ratio's that correspond to the best scores for each fold. `l1_ratio_` is of shape(n_classes,) when the problem is binary. n_iter_ : ndarray of shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs) Actual number of iterations for all classes, folds and Cs. In the binary or multinomial cases, the first dimension is equal to 1. If ``penalty='elasticnet'``, the shape is ``(n_classes, n_folds, n_cs, n_l1_ratios)`` or ``(1, n_folds, n_cs, n_l1_ratios)``. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- LogisticRegression : Logistic regression without tuning the hyperparameter `C`. Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn.linear_model import LogisticRegressionCV >>> X, y = load_iris(return_X_y=True) >>> clf = LogisticRegressionCV(cv=5, random_state=0).fit(X, y) >>> clf.predict(X[:2, :]) array([0, 0]) >>> clf.predict_proba(X[:2, :]).shape (2, 3) >>> clf.score(X, y) 0.98... """ def __init__( self, *, Cs=10, fit_intercept=True, cv=None, dual=False, penalty="l2", scoring=None, solver="lbfgs", tol=1e-4, max_iter=100, class_weight=None, n_jobs=None, verbose=0, refit=True, intercept_scaling=1.0, multi_class="auto", random_state=None, l1_ratios=None, ): self.Cs = Cs self.fit_intercept = fit_intercept self.cv = cv self.dual = dual self.penalty = penalty self.scoring = scoring self.tol = tol self.max_iter = max_iter self.class_weight = class_weight self.n_jobs = n_jobs self.verbose = verbose self.solver = solver self.refit = refit self.intercept_scaling = intercept_scaling self.multi_class = multi_class self.random_state = random_state self.l1_ratios = l1_ratios def fit(self, X, y, sample_weight=None): """Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target vector relative to X. sample_weight : array-like of shape (n_samples,) default=None Array of weights that are assigned to individual samples. If not provided, then each sample is given unit weight. Returns ------- self : object Fitted LogisticRegressionCV estimator. """ solver = _check_solver(self.solver, self.penalty, self.dual) if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0: raise ValueError( "Maximum number of iteration must be positive; got (max_iter=%r)" % self.max_iter ) if not isinstance(self.tol, numbers.Number) or self.tol < 0: raise ValueError( "Tolerance for stopping criteria must be positive; got (tol=%r)" % self.tol ) if self.penalty == "elasticnet": if ( self.l1_ratios is None or len(self.l1_ratios) == 0 or any( ( not isinstance(l1_ratio, numbers.Number) or l1_ratio < 0 or l1_ratio > 1 ) for l1_ratio in self.l1_ratios ) ): raise ValueError( "l1_ratios must be a list of numbers between " "0 and 1; got (l1_ratios=%r)" % self.l1_ratios ) l1_ratios_ = self.l1_ratios else: if self.l1_ratios is not None: warnings.warn( "l1_ratios parameter is only used when penalty " "is 'elasticnet'. Got (penalty={})".format(self.penalty) ) l1_ratios_ = [None] if self.penalty == "none": raise ValueError( "penalty='none' is not useful and not supported by " "LogisticRegressionCV." ) X, y = self._validate_data( X, y, accept_sparse="csr", dtype=np.float64, order="C", accept_large_sparse=solver not in ["liblinear", "sag", "saga"], ) check_classification_targets(y) class_weight = self.class_weight # Encode for string labels label_encoder = LabelEncoder().fit(y) y = label_encoder.transform(y) if isinstance(class_weight, dict): class_weight = { label_encoder.transform([cls])[0]: v for cls, v in class_weight.items() } # The original class labels classes = self.classes_ = label_encoder.classes_ encoded_labels = label_encoder.transform(label_encoder.classes_) multi_class = _check_multi_class(self.multi_class, solver, len(classes)) if solver in ["sag", "saga"]: max_squared_sum = row_norms(X, squared=True).max() else: max_squared_sum = None # init cross-validation generator cv = check_cv(self.cv, y, classifier=True) folds = list(cv.split(X, y)) # Use the label encoded classes n_classes = len(encoded_labels) if n_classes < 2: raise ValueError( "This solver needs samples of at least 2 classes" " in the data, but the data contains only one" " class: %r" % classes[0] ) if n_classes == 2: # OvR in case of binary problems is as good as fitting # the higher label n_classes = 1 encoded_labels = encoded_labels[1:] classes = classes[1:] # We need this hack to iterate only once over labels, in the case of # multi_class = multinomial, without changing the value of the labels. if multi_class == "multinomial": iter_encoded_labels = iter_classes = [None] else: iter_encoded_labels = encoded_labels iter_classes = classes # compute the class weights for the entire dataset y if class_weight == "balanced": class_weight = compute_class_weight( class_weight, classes=np.arange(len(self.classes_)), y=y ) class_weight = dict(enumerate(class_weight)) path_func = delayed(_log_reg_scoring_path) # The SAG solver releases the GIL so it's more efficient to use # threads for this solver. if self.solver in ["sag", "saga"]: prefer = "threads" else: prefer = "processes" fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)( path_func( X, y, train, test, pos_class=label, Cs=self.Cs, fit_intercept=self.fit_intercept, penalty=self.penalty, dual=self.dual, solver=solver, tol=self.tol, max_iter=self.max_iter, verbose=self.verbose, class_weight=class_weight, scoring=self.scoring, multi_class=multi_class, intercept_scaling=self.intercept_scaling, random_state=self.random_state, max_squared_sum=max_squared_sum, sample_weight=sample_weight, l1_ratio=l1_ratio, ) for label in iter_encoded_labels for train, test in folds for l1_ratio in l1_ratios_ ) # _log_reg_scoring_path will output different shapes depending on the # multi_class param, so we need to reshape the outputs accordingly. # Cs is of shape (n_classes . n_folds . n_l1_ratios, n_Cs) and all the # rows are equal, so we just take the first one. # After reshaping, # - scores is of shape (n_classes, n_folds, n_Cs . n_l1_ratios) # - coefs_paths is of shape # (n_classes, n_folds, n_Cs . n_l1_ratios, n_features) # - n_iter is of shape # (n_classes, n_folds, n_Cs . n_l1_ratios) or # (1, n_folds, n_Cs . n_l1_ratios) coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_) self.Cs_ = Cs[0] if multi_class == "multinomial": coefs_paths = np.reshape( coefs_paths, (len(folds), len(l1_ratios_) * len(self.Cs_), n_classes, -1), ) # equiv to coefs_paths = np.moveaxis(coefs_paths, (0, 1, 2, 3), # (1, 2, 0, 3)) coefs_paths = np.swapaxes(coefs_paths, 0, 1) coefs_paths = np.swapaxes(coefs_paths, 0, 2) self.n_iter_ = np.reshape( n_iter_, (1, len(folds), len(self.Cs_) * len(l1_ratios_)) ) # repeat same scores across all classes scores = np.tile(scores, (n_classes, 1, 1)) else: coefs_paths = np.reshape( coefs_paths, (n_classes, len(folds), len(self.Cs_) * len(l1_ratios_), -1), ) self.n_iter_ = np.reshape( n_iter_, (n_classes, len(folds), len(self.Cs_) * len(l1_ratios_)) ) scores = np.reshape(scores, (n_classes, len(folds), -1)) self.scores_ = dict(zip(classes, scores)) self.coefs_paths_ = dict(zip(classes, coefs_paths)) self.C_ = list() self.l1_ratio_ = list() self.coef_ = np.empty((n_classes, X.shape[1])) self.intercept_ = np.zeros(n_classes) for index, (cls, encoded_label) in enumerate( zip(iter_classes, iter_encoded_labels) ): if multi_class == "ovr": scores = self.scores_[cls] coefs_paths = self.coefs_paths_[cls] else: # For multinomial, all scores are the same across classes scores = scores[0] # coefs_paths will keep its original shape because # logistic_regression_path expects it this way if self.refit: # best_index is between 0 and (n_Cs . n_l1_ratios - 1) # for example, with n_cs=2 and n_l1_ratios=3 # the layout of scores is # [c1, c2, c1, c2, c1, c2] # l1_1 , l1_2 , l1_3 best_index = scores.sum(axis=0).argmax() best_index_C = best_index % len(self.Cs_) C_ = self.Cs_[best_index_C] self.C_.append(C_) best_index_l1 = best_index // len(self.Cs_) l1_ratio_ = l1_ratios_[best_index_l1] self.l1_ratio_.append(l1_ratio_) if multi_class == "multinomial": coef_init = np.mean(coefs_paths[:, :, best_index, :], axis=1) else: coef_init = np.mean(coefs_paths[:, best_index, :], axis=0) # Note that y is label encoded and hence pos_class must be # the encoded label / None (for 'multinomial') w, _, _ = _logistic_regression_path( X, y, pos_class=encoded_label, Cs=[C_], solver=solver, fit_intercept=self.fit_intercept, coef=coef_init, max_iter=self.max_iter, tol=self.tol, penalty=self.penalty, class_weight=class_weight, multi_class=multi_class, verbose=max(0, self.verbose - 1), random_state=self.random_state, check_input=False, max_squared_sum=max_squared_sum, sample_weight=sample_weight, l1_ratio=l1_ratio_, ) w = w[0] else: # Take the best scores across every fold and the average of # all coefficients corresponding to the best scores. best_indices = np.argmax(scores, axis=1) if multi_class == "ovr": w = np.mean( [coefs_paths[i, best_indices[i], :] for i in range(len(folds))], axis=0, ) else: w = np.mean( [ coefs_paths[:, i, best_indices[i], :] for i in range(len(folds)) ], axis=0, ) best_indices_C = best_indices % len(self.Cs_) self.C_.append(np.mean(self.Cs_[best_indices_C])) if self.penalty == "elasticnet": best_indices_l1 = best_indices // len(self.Cs_) self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1])) else: self.l1_ratio_.append(None) if multi_class == "multinomial": self.C_ = np.tile(self.C_, n_classes) self.l1_ratio_ = np.tile(self.l1_ratio_, n_classes) self.coef_ = w[:, : X.shape[1]] if self.fit_intercept: self.intercept_ = w[:, -1] else: self.coef_[index] = w[: X.shape[1]] if self.fit_intercept: self.intercept_[index] = w[-1] self.C_ = np.asarray(self.C_) self.l1_ratio_ = np.asarray(self.l1_ratio_) self.l1_ratios_ = np.asarray(l1_ratios_) # if elasticnet was used, add the l1_ratios dimension to some # attributes if self.l1_ratios is not None: # with n_cs=2 and n_l1_ratios=3 # the layout of scores is # [c1, c2, c1, c2, c1, c2] # l1_1 , l1_2 , l1_3 # To get a 2d array with the following layout # l1_1, l1_2, l1_3 # c1 [[ . , . , . ], # c2 [ . , . , . ]] # We need to first reshape and then transpose. # The same goes for the other arrays for cls, coefs_path in self.coefs_paths_.items(): self.coefs_paths_[cls] = coefs_path.reshape( (len(folds), self.l1_ratios_.size, self.Cs_.size, -1) ) self.coefs_paths_[cls] = np.transpose( self.coefs_paths_[cls], (0, 2, 1, 3) ) for cls, score in self.scores_.items(): self.scores_[cls] = score.reshape( (len(folds), self.l1_ratios_.size, self.Cs_.size) ) self.scores_[cls] = np.transpose(self.scores_[cls], (0, 2, 1)) self.n_iter_ = self.n_iter_.reshape( (-1, len(folds), self.l1_ratios_.size, self.Cs_.size) ) self.n_iter_ = np.transpose(self.n_iter_, (0, 1, 3, 2)) return self def score(self, X, y, sample_weight=None): """Score using the `scoring` option on the given test data and labels. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) True labels for X. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float Score of self.predict(X) wrt. y. """ scoring = self.scoring or "accuracy" scoring = get_scorer(scoring) return scoring(self, X, y, sample_weight=sample_weight) def _more_tags(self): return { "_xfail_checks": { "check_sample_weights_invariance": ( "zero sample_weight is not equivalent to removing samples" ), } }
codeparrot/github-code-clean
# orm/relationships.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Heuristics related to join conditions as used in :func:`.relationship`. Provides the :class:`.JoinCondition` object, which encapsulates SQL annotation and aliasing behavior focused on the `primaryjoin` and `secondaryjoin` aspects of :func:`.relationship`. """ from __future__ import absolute_import from .. import sql, util, exc as sa_exc, schema, log import weakref from .util import CascadeOptions, _orm_annotate, _orm_deannotate from . import dependency from . import attributes from ..sql.util import ( ClauseAdapter, join_condition, _shallow_annotate, visit_binary_product, _deep_deannotate, selectables_overlap, adapt_criterion_to_null ) from ..sql import operators, expression, visitors from .interfaces import (MANYTOMANY, MANYTOONE, ONETOMANY, StrategizedProperty, PropComparator) from ..inspection import inspect from . import mapper as mapperlib import collections def remote(expr): """Annotate a portion of a primaryjoin expression with a 'remote' annotation. See the section :ref:`relationship_custom_foreign` for a description of use. .. versionadded:: 0.8 .. seealso:: :ref:`relationship_custom_foreign` :func:`.foreign` """ return _annotate_columns(expression._clause_element_as_expr(expr), {"remote": True}) def foreign(expr): """Annotate a portion of a primaryjoin expression with a 'foreign' annotation. See the section :ref:`relationship_custom_foreign` for a description of use. .. versionadded:: 0.8 .. seealso:: :ref:`relationship_custom_foreign` :func:`.remote` """ return _annotate_columns(expression._clause_element_as_expr(expr), {"foreign": True}) @log.class_logger @util.langhelpers.dependency_for("sqlalchemy.orm.properties") class RelationshipProperty(StrategizedProperty): """Describes an object property that holds a single item or list of items that correspond to a related database table. Public constructor is the :func:`.orm.relationship` function. See also: :ref:`relationship_config_toplevel` """ strategy_wildcard_key = 'relationship' _dependency_processor = None def __init__(self, argument, secondary=None, primaryjoin=None, secondaryjoin=None, foreign_keys=None, uselist=None, order_by=False, backref=None, back_populates=None, post_update=False, cascade=False, extension=None, viewonly=False, lazy=True, collection_class=None, passive_deletes=False, passive_updates=True, remote_side=None, enable_typechecks=True, join_depth=None, comparator_factory=None, single_parent=False, innerjoin=False, distinct_target_key=None, doc=None, active_history=False, cascade_backrefs=True, load_on_pending=False, bake_queries=True, strategy_class=None, _local_remote_pairs=None, query_class=None, info=None): """Provide a relationship between two mapped classes. This corresponds to a parent-child or associative table relationship. The constructed class is an instance of :class:`.RelationshipProperty`. A typical :func:`.relationship`, used in a classical mapping:: mapper(Parent, properties={ 'children': relationship(Child) }) Some arguments accepted by :func:`.relationship` optionally accept a callable function, which when called produces the desired value. The callable is invoked by the parent :class:`.Mapper` at "mapper initialization" time, which happens only when mappers are first used, and is assumed to be after all mappings have been constructed. This can be used to resolve order-of-declaration and other dependency issues, such as if ``Child`` is declared below ``Parent`` in the same file:: mapper(Parent, properties={ "children":relationship(lambda: Child, order_by=lambda: Child.id) }) When using the :ref:`declarative_toplevel` extension, the Declarative initializer allows string arguments to be passed to :func:`.relationship`. These string arguments are converted into callables that evaluate the string as Python code, using the Declarative class-registry as a namespace. This allows the lookup of related classes to be automatic via their string name, and removes the need to import related classes at all into the local module space:: from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child", order_by="Child.id") .. seealso:: :ref:`relationship_config_toplevel` - Full introductory and reference documentation for :func:`.relationship`. :ref:`orm_tutorial_relationship` - ORM tutorial introduction. :param argument: a mapped class, or actual :class:`.Mapper` instance, representing the target of the relationship. :paramref:`~.relationship.argument` may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative. .. seealso:: :ref:`declarative_configuring_relationships` - further detail on relationship configuration when using Declarative. :param secondary: for a many-to-many relationship, specifies the intermediary table, and is typically an instance of :class:`.Table`. In less common circumstances, the argument may also be specified as an :class:`.Alias` construct, or even a :class:`.Join` construct. :paramref:`~.relationship.secondary` may also be passed as a callable function which is evaluated at mapper initialization time. When using Declarative, it may also be a string argument noting the name of a :class:`.Table` that is present in the :class:`.MetaData` collection associated with the parent-mapped :class:`.Table`. The :paramref:`~.relationship.secondary` keyword argument is typically applied in the case where the intermediary :class:`.Table` is not otherwise expressed in any direct class mapping. If the "secondary" table is also explicitly mapped elsewhere (e.g. as in :ref:`association_pattern`), one should consider applying the :paramref:`~.relationship.viewonly` flag so that this :func:`.relationship` is not used for persistence operations which may conflict with those of the association object pattern. .. seealso:: :ref:`relationships_many_to_many` - Reference example of "many to many". :ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to many-to-many relationships. :ref:`self_referential_many_to_many` - Specifics on using many-to-many in a self-referential case. :ref:`declarative_many_to_many` - Additional options when using Declarative. :ref:`association_pattern` - an alternative to :paramref:`~.relationship.secondary` when composing association table relationships, allowing additional attributes to be specified on the association table. :ref:`composite_secondary_join` - a lesser-used pattern which in some cases can enable complex :func:`.relationship` SQL conditions to be used. .. versionadded:: 0.9.2 :paramref:`~.relationship.secondary` works more effectively when referring to a :class:`.Join` instance. :param active_history=False: When ``True``, indicates that the "previous" value for a many-to-one reference should be loaded when replaced, if not already loaded. Normally, history tracking logic for simple many-to-ones only needs to be aware of the "new" value in order to perform a flush. This flag is available for applications that make use of :func:`.attributes.get_history` which also need to know the "previous" value of the attribute. :param backref: indicates the string name of a property to be placed on the related mapper's class that will handle this relationship in the other direction. The other property will be created automatically when the mappers are configured. Can also be passed as a :func:`.backref` object to control the configuration of the new relationship. .. seealso:: :ref:`relationships_backref` - Introductory documentation and examples. :paramref:`~.relationship.back_populates` - alternative form of backref specification. :func:`.backref` - allows control over :func:`.relationship` configuration when using :paramref:`~.relationship.backref`. :param back_populates: Takes a string name and has the same meaning as :paramref:`~.relationship.backref`, except the complementing property is **not** created automatically, and instead must be configured explicitly on the other mapper. The complementing property should also indicate :paramref:`~.relationship.back_populates` to this relationship to ensure proper functioning. .. seealso:: :ref:`relationships_backref` - Introductory documentation and examples. :paramref:`~.relationship.backref` - alternative form of backref specification. :param bake_queries: Use the :class:`.BakedQuery` cache to cache queries used in lazy loads. True by default, as this typically improves performance significantly. Set to False to reduce ORM memory use, or if unresolved stability issues are observed with the baked query cache system. .. versionadded:: 1.0.0 :param cascade: a comma-separated list of cascade rules which determines how Session operations should be "cascaded" from parent to child. This defaults to ``False``, which means the default cascade should be used - this default cascade is ``"save-update, merge"``. The available cascades are ``save-update``, ``merge``, ``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``. An additional option, ``all`` indicates shorthand for ``"save-update, merge, refresh-expire, expunge, delete"``, and is often used as in ``"all, delete-orphan"`` to indicate that related objects should follow along with the parent object in all cases, and be deleted when de-associated. .. seealso:: :ref:`unitofwork_cascades` - Full detail on each of the available cascade options. :ref:`tutorial_delete_cascade` - Tutorial example describing a delete cascade. :param cascade_backrefs=True: a boolean value indicating if the ``save-update`` cascade should operate along an assignment event intercepted by a backref. When set to ``False``, the attribute managed by this relationship will not cascade an incoming transient object into the session of a persistent parent, if the event is received via backref. .. seealso:: :ref:`backref_cascade` - Full discussion and examples on how the :paramref:`~.relationship.cascade_backrefs` option is used. :param collection_class: a class or callable that returns a new list-holding object. will be used in place of a plain list for storing elements. .. seealso:: :ref:`custom_collections` - Introductory documentation and examples. :param comparator_factory: a class which extends :class:`.RelationshipProperty.Comparator` which provides custom SQL clause generation for comparison operations. .. seealso:: :class:`.PropComparator` - some detail on redefining comparators at this level. :ref:`custom_comparators` - Brief intro to this feature. :param distinct_target_key=None: Indicate if a "subquery" eager load should apply the DISTINCT keyword to the innermost SELECT statement. When left as ``None``, the DISTINCT keyword will be applied in those cases when the target columns do not comprise the full primary key of the target table. When set to ``True``, the DISTINCT keyword is applied to the innermost SELECT unconditionally. It may be desirable to set this flag to False when the DISTINCT is reducing performance of the innermost subquery beyond that of what duplicate innermost rows may be causing. .. versionadded:: 0.8.3 - :paramref:`~.relationship.distinct_target_key` allows the subquery eager loader to apply a DISTINCT modifier to the innermost SELECT. .. versionchanged:: 0.9.0 - :paramref:`~.relationship.distinct_target_key` now defaults to ``None``, so that the feature enables itself automatically for those cases where the innermost query targets a non-unique key. .. seealso:: :ref:`loading_toplevel` - includes an introduction to subquery eager loading. :param doc: docstring which will be applied to the resulting descriptor. :param extension: an :class:`.AttributeExtension` instance, or list of extensions, which will be prepended to the list of attribute listeners for the resulting descriptor placed on the class. .. deprecated:: 0.7 Please see :class:`.AttributeEvents`. :param foreign_keys: a list of columns which are to be used as "foreign key" columns, or columns which refer to the value in a remote column, within the context of this :func:`.relationship` object's :paramref:`~.relationship.primaryjoin` condition. That is, if the :paramref:`~.relationship.primaryjoin` condition of this :func:`.relationship` is ``a.id == b.a_id``, and the values in ``b.a_id`` are required to be present in ``a.id``, then the "foreign key" column of this :func:`.relationship` is ``b.a_id``. In normal cases, the :paramref:`~.relationship.foreign_keys` parameter is **not required.** :func:`.relationship` will automatically determine which columns in the :paramref:`~.relationship.primaryjoin` conditition are to be considered "foreign key" columns based on those :class:`.Column` objects that specify :class:`.ForeignKey`, or are otherwise listed as referencing columns in a :class:`.ForeignKeyConstraint` construct. :paramref:`~.relationship.foreign_keys` is only needed when: 1. There is more than one way to construct a join from the local table to the remote table, as there are multiple foreign key references present. Setting ``foreign_keys`` will limit the :func:`.relationship` to consider just those columns specified here as "foreign". .. versionchanged:: 0.8 A multiple-foreign key join ambiguity can be resolved by setting the :paramref:`~.relationship.foreign_keys` parameter alone, without the need to explicitly set :paramref:`~.relationship.primaryjoin` as well. 2. The :class:`.Table` being mapped does not actually have :class:`.ForeignKey` or :class:`.ForeignKeyConstraint` constructs present, often because the table was reflected from a database that does not support foreign key reflection (MySQL MyISAM). 3. The :paramref:`~.relationship.primaryjoin` argument is used to construct a non-standard join condition, which makes use of columns or expressions that do not normally refer to their "parent" column, such as a join condition expressed by a complex comparison using a SQL function. The :func:`.relationship` construct will raise informative error messages that suggest the use of the :paramref:`~.relationship.foreign_keys` parameter when presented with an ambiguous condition. In typical cases, if :func:`.relationship` doesn't raise any exceptions, the :paramref:`~.relationship.foreign_keys` parameter is usually not needed. :paramref:`~.relationship.foreign_keys` may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative. .. seealso:: :ref:`relationship_foreign_keys` :ref:`relationship_custom_foreign` :func:`.foreign` - allows direct annotation of the "foreign" columns within a :paramref:`~.relationship.primaryjoin` condition. .. versionadded:: 0.8 The :func:`.foreign` annotation can also be applied directly to the :paramref:`~.relationship.primaryjoin` expression, which is an alternate, more specific system of describing which columns in a particular :paramref:`~.relationship.primaryjoin` should be considered "foreign". :param info: Optional data dictionary which will be populated into the :attr:`.MapperProperty.info` attribute of this object. .. versionadded:: 0.8 :param innerjoin=False: when ``True``, joined eager loads will use an inner join to join against related tables instead of an outer join. The purpose of this option is generally one of performance, as inner joins generally perform better than outer joins. This flag can be set to ``True`` when the relationship references an object via many-to-one using local foreign keys that are not nullable, or when the reference is one-to-one or a collection that is guaranteed to have one or at least one entry. The option supports the same "nested" and "unnested" options as that of :paramref:`.joinedload.innerjoin`. See that flag for details on nested / unnested behaviors. .. seealso:: :paramref:`.joinedload.innerjoin` - the option as specified by loader option, including detail on nesting behavior. :ref:`what_kind_of_loading` - Discussion of some details of various loader options. :param join_depth: when non-``None``, an integer value indicating how many levels deep "eager" loaders should join on a self-referring or cyclical relationship. The number counts how many times the same Mapper shall be present in the loading condition along a particular join branch. When left at its default of ``None``, eager loaders will stop chaining when they encounter a the same target mapper which is already higher up in the chain. This option applies both to joined- and subquery- eager loaders. .. seealso:: :ref:`self_referential_eager_loading` - Introductory documentation and examples. :param lazy='select': specifies how the related items should be loaded. Default value is ``select``. Values include: * ``select`` - items should be loaded lazily when the property is first accessed, using a separate SELECT statement, or identity map fetch for simple many-to-one references. * ``immediate`` - items should be loaded as the parents are loaded, using a separate SELECT statement, or identity map fetch for simple many-to-one references. * ``joined`` - items should be loaded "eagerly" in the same query as that of the parent, using a JOIN or LEFT OUTER JOIN. Whether the join is "outer" or not is determined by the :paramref:`~.relationship.innerjoin` parameter. * ``subquery`` - items should be loaded "eagerly" as the parents are loaded, using one additional SQL statement, which issues a JOIN to a subquery of the original statement, for each collection requested. * ``noload`` - no loading should occur at any time. This is to support "write-only" attributes, or attributes which are populated in some manner specific to the application. * ``dynamic`` - the attribute will return a pre-configured :class:`.Query` object for all read operations, onto which further filtering operations can be applied before iterating the results. See the section :ref:`dynamic_relationship` for more details. * True - a synonym for 'select' * False - a synonym for 'joined' * None - a synonym for 'noload' .. seealso:: :doc:`/orm/loading_relationships` - Full documentation on relationship loader configuration. :ref:`dynamic_relationship` - detail on the ``dynamic`` option. :param load_on_pending=False: Indicates loading behavior for transient or pending parent objects. When set to ``True``, causes the lazy-loader to issue a query for a parent object that is not persistent, meaning it has never been flushed. This may take effect for a pending object when autoflush is disabled, or for a transient object that has been "attached" to a :class:`.Session` but is not part of its pending collection. The :paramref:`~.relationship.load_on_pending` flag does not improve behavior when the ORM is used normally - object references should be constructed at the object level, not at the foreign key level, so that they are present in an ordinary way before a flush proceeds. This flag is not not intended for general use. .. seealso:: :meth:`.Session.enable_relationship_loading` - this method establishes "load on pending" behavior for the whole object, and also allows loading on objects that remain transient or detached. :param order_by: indicates the ordering that should be applied when loading these items. :paramref:`~.relationship.order_by` is expected to refer to one of the :class:`.Column` objects to which the target class is mapped, or the attribute itself bound to the target class which refers to the column. :paramref:`~.relationship.order_by` may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative. :param passive_deletes=False: Indicates loading behavior during delete operations. A value of True indicates that unloaded child items should not be loaded during a delete operation on the parent. Normally, when a parent item is deleted, all child items are loaded so that they can either be marked as deleted, or have their foreign key to the parent set to NULL. Marking this flag as True usually implies an ON DELETE <CASCADE|SET NULL> rule is in place which will handle updating/deleting child rows on the database side. Additionally, setting the flag to the string value 'all' will disable the "nulling out" of the child foreign keys, when there is no delete or delete-orphan cascade enabled. This is typically used when a triggering or error raise scenario is in place on the database side. Note that the foreign key attributes on in-session child objects will not be changed after a flush occurs so this is a very special use-case setting. .. seealso:: :ref:`passive_deletes` - Introductory documentation and examples. :param passive_updates=True: Indicates the persistence behavior to take when a referenced primary key value changes in place, indicating that the referencing foreign key columns will also need their value changed. When True, it is assumed that ``ON UPDATE CASCADE`` is configured on the foreign key in the database, and that the database will handle propagation of an UPDATE from a source column to dependent rows. When False, the SQLAlchemy :func:`.relationship` construct will attempt to emit its own UPDATE statements to modify related targets. However note that SQLAlchemy **cannot** emit an UPDATE for more than one level of cascade. Also, setting this flag to False is not compatible in the case where the database is in fact enforcing referential integrity, unless those constraints are explicitly "deferred", if the target backend supports it. It is highly advised that an application which is employing mutable primary keys keeps ``passive_updates`` set to True, and instead uses the referential integrity features of the database itself in order to handle the change efficiently and fully. .. seealso:: :ref:`passive_updates` - Introductory documentation and examples. :paramref:`.mapper.passive_updates` - a similar flag which takes effect for joined-table inheritance mappings. :param post_update: this indicates that the relationship should be handled by a second UPDATE statement after an INSERT or before a DELETE. Currently, it also will issue an UPDATE after the instance was UPDATEd as well, although this technically should be improved. This flag is used to handle saving bi-directional dependencies between two individual rows (i.e. each row references the other), where it would otherwise be impossible to INSERT or DELETE both rows fully since one row exists before the other. Use this flag when a particular mapping arrangement will incur two rows that are dependent on each other, such as a table that has a one-to-many relationship to a set of child rows, and also has a column that references a single child row within that list (i.e. both tables contain a foreign key to each other). If a flush operation returns an error that a "cyclical dependency" was detected, this is a cue that you might want to use :paramref:`~.relationship.post_update` to "break" the cycle. .. seealso:: :ref:`post_update` - Introductory documentation and examples. :param primaryjoin: a SQL expression that will be used as the primary join of this child object against the parent object, or in a many-to-many relationship the join of the primary object to the association table. By default, this value is computed based on the foreign key relationships of the parent and child tables (or association table). :paramref:`~.relationship.primaryjoin` may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative. .. seealso:: :ref:`relationship_primaryjoin` :param remote_side: used for self-referential relationships, indicates the column or list of columns that form the "remote side" of the relationship. :paramref:`.relationship.remote_side` may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative. .. versionchanged:: 0.8 The :func:`.remote` annotation can also be applied directly to the ``primaryjoin`` expression, which is an alternate, more specific system of describing which columns in a particular ``primaryjoin`` should be considered "remote". .. seealso:: :ref:`self_referential` - in-depth explanation of how :paramref:`~.relationship.remote_side` is used to configure self-referential relationships. :func:`.remote` - an annotation function that accomplishes the same purpose as :paramref:`~.relationship.remote_side`, typically when a custom :paramref:`~.relationship.primaryjoin` condition is used. :param query_class: a :class:`.Query` subclass that will be used as the base of the "appender query" returned by a "dynamic" relationship, that is, a relationship that specifies ``lazy="dynamic"`` or was otherwise constructed using the :func:`.orm.dynamic_loader` function. .. seealso:: :ref:`dynamic_relationship` - Introduction to "dynamic" relationship loaders. :param secondaryjoin: a SQL expression that will be used as the join of an association table to the child object. By default, this value is computed based on the foreign key relationships of the association and child tables. :paramref:`~.relationship.secondaryjoin` may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative. .. seealso:: :ref:`relationship_primaryjoin` :param single_parent: when True, installs a validator which will prevent objects from being associated with more than one parent at a time. This is used for many-to-one or many-to-many relationships that should be treated either as one-to-one or one-to-many. Its usage is optional, except for :func:`.relationship` constructs which are many-to-one or many-to-many and also specify the ``delete-orphan`` cascade option. The :func:`.relationship` construct itself will raise an error instructing when this option is required. .. seealso:: :ref:`unitofwork_cascades` - includes detail on when the :paramref:`~.relationship.single_parent` flag may be appropriate. :param uselist: a boolean that indicates if this property should be loaded as a list or a scalar. In most cases, this value is determined automatically by :func:`.relationship` at mapper configuration time, based on the type and direction of the relationship - one to many forms a list, many to one forms a scalar, many to many is a list. If a scalar is desired where normally a list would be present, such as a bi-directional one-to-one relationship, set :paramref:`~.relationship.uselist` to False. The :paramref:`~.relationship.uselist` flag is also available on an existing :func:`.relationship` construct as a read-only attribute, which can be used to determine if this :func:`.relationship` deals with collections or scalar attributes:: >>> User.addresses.property.uselist True .. seealso:: :ref:`relationships_one_to_one` - Introduction to the "one to one" relationship pattern, which is typically when the :paramref:`~.relationship.uselist` flag is needed. :param viewonly=False: when set to True, the relationship is used only for loading objects, and not for any persistence operation. A :func:`.relationship` which specifies :paramref:`~.relationship.viewonly` can work with a wider range of SQL operations within the :paramref:`~.relationship.primaryjoin` condition, including operations that feature the use of a variety of comparison operators as well as SQL functions such as :func:`~.sql.expression.cast`. The :paramref:`~.relationship.viewonly` flag is also of general use when defining any kind of :func:`~.relationship` that doesn't represent the full set of related objects, to prevent modifications of the collection from resulting in persistence operations. """ super(RelationshipProperty, self).__init__() self.uselist = uselist self.argument = argument self.secondary = secondary self.primaryjoin = primaryjoin self.secondaryjoin = secondaryjoin self.post_update = post_update self.direction = None self.viewonly = viewonly self.lazy = lazy self.single_parent = single_parent self._user_defined_foreign_keys = foreign_keys self.collection_class = collection_class self.passive_deletes = passive_deletes self.cascade_backrefs = cascade_backrefs self.passive_updates = passive_updates self.remote_side = remote_side self.enable_typechecks = enable_typechecks self.query_class = query_class self.innerjoin = innerjoin self.distinct_target_key = distinct_target_key self.doc = doc self.active_history = active_history self.join_depth = join_depth self.local_remote_pairs = _local_remote_pairs self.extension = extension self.bake_queries = bake_queries self.load_on_pending = load_on_pending self.comparator_factory = comparator_factory or \ RelationshipProperty.Comparator self.comparator = self.comparator_factory(self, None) util.set_creation_order(self) if info is not None: self.info = info if strategy_class: self.strategy_class = strategy_class else: self.strategy_class = self._strategy_lookup(("lazy", self.lazy)) self._reverse_property = set() self.cascade = cascade if cascade is not False \ else "save-update, merge" self.order_by = order_by self.back_populates = back_populates if self.back_populates: if backref: raise sa_exc.ArgumentError( "backref and back_populates keyword arguments " "are mutually exclusive") self.backref = None else: self.backref = backref def instrument_class(self, mapper): attributes.register_descriptor( mapper.class_, self.key, comparator=self.comparator_factory(self, mapper), parententity=mapper, doc=self.doc, ) class Comparator(PropComparator): """Produce boolean, comparison, and other operators for :class:`.RelationshipProperty` attributes. See the documentation for :class:`.PropComparator` for a brief overview of ORM level operator definition. See also: :class:`.PropComparator` :class:`.ColumnProperty.Comparator` :class:`.ColumnOperators` :ref:`types_operators` :attr:`.TypeEngine.comparator_factory` """ _of_type = None def __init__( self, prop, parentmapper, adapt_to_entity=None, of_type=None): """Construction of :class:`.RelationshipProperty.Comparator` is internal to the ORM's attribute mechanics. """ self.prop = prop self._parententity = parentmapper self._adapt_to_entity = adapt_to_entity if of_type: self._of_type = of_type def adapt_to_entity(self, adapt_to_entity): return self.__class__(self.property, self._parententity, adapt_to_entity=adapt_to_entity, of_type=self._of_type) @util.memoized_property def mapper(self): """The target :class:`.Mapper` referred to by this :class:`.RelationshipProperty.Comparator`. This is the "target" or "remote" side of the :func:`.relationship`. """ return self.property.mapper @util.memoized_property def _parententity(self): return self.property.parent def _source_selectable(self): if self._adapt_to_entity: return self._adapt_to_entity.selectable else: return self.property.parent._with_polymorphic_selectable def __clause_element__(self): adapt_from = self._source_selectable() if self._of_type: of_type = inspect(self._of_type).mapper else: of_type = None pj, sj, source, dest, \ secondary, target_adapter = self.property._create_joins( source_selectable=adapt_from, source_polymorphic=True, of_type=of_type) if sj is not None: return pj & sj else: return pj def of_type(self, cls): """Produce a construct that represents a particular 'subtype' of attribute for the parent class. Currently this is usable in conjunction with :meth:`.Query.join` and :meth:`.Query.outerjoin`. """ return RelationshipProperty.Comparator( self.property, self._parententity, adapt_to_entity=self._adapt_to_entity, of_type=cls) def in_(self, other): """Produce an IN clause - this is not implemented for :func:`~.orm.relationship`-based attributes at this time. """ raise NotImplementedError('in_() not yet supported for ' 'relationships. For a simple ' 'many-to-one, use in_() against ' 'the set of foreign key values.') __hash__ = None def __eq__(self, other): """Implement the ``==`` operator. In a many-to-one context, such as:: MyClass.some_prop == <some object> this will typically produce a clause such as:: mytable.related_id == <some id> Where ``<some id>`` is the primary key of the given object. The ``==`` operator provides partial functionality for non- many-to-one comparisons: * Comparisons against collections are not supported. Use :meth:`~.RelationshipProperty.Comparator.contains`. * Compared to a scalar one-to-many, will produce a clause that compares the target columns in the parent to the given target. * Compared to a scalar many-to-many, an alias of the association table will be rendered as well, forming a natural join that is part of the main body of the query. This will not work for queries that go beyond simple AND conjunctions of comparisons, such as those which use OR. Use explicit joins, outerjoins, or :meth:`~.RelationshipProperty.Comparator.has` for more comprehensive non-many-to-one scalar membership tests. * Comparisons against ``None`` given in a one-to-many or many-to-many context produce a NOT EXISTS clause. """ if isinstance(other, (util.NoneType, expression.Null)): if self.property.direction in [ONETOMANY, MANYTOMANY]: return ~self._criterion_exists() else: return _orm_annotate(self.property._optimized_compare( None, adapt_source=self.adapter)) elif self.property.uselist: raise sa_exc.InvalidRequestError( "Can't compare a collection to an object or collection; " "use contains() to test for membership.") else: return _orm_annotate( self.property._optimized_compare( other, adapt_source=self.adapter)) def _criterion_exists(self, criterion=None, **kwargs): if getattr(self, '_of_type', None): info = inspect(self._of_type) target_mapper, to_selectable, is_aliased_class = \ info.mapper, info.selectable, info.is_aliased_class if self.property._is_self_referential and not \ is_aliased_class: to_selectable = to_selectable.alias() single_crit = target_mapper._single_table_criterion if single_crit is not None: if criterion is not None: criterion = single_crit & criterion else: criterion = single_crit else: is_aliased_class = False to_selectable = None if self.adapter: source_selectable = self._source_selectable() else: source_selectable = None pj, sj, source, dest, secondary, target_adapter = \ self.property._create_joins( dest_polymorphic=True, dest_selectable=to_selectable, source_selectable=source_selectable) for k in kwargs: crit = getattr(self.property.mapper.class_, k) == kwargs[k] if criterion is None: criterion = crit else: criterion = criterion & crit # annotate the *local* side of the join condition, in the case # of pj + sj this is the full primaryjoin, in the case of just # pj its the local side of the primaryjoin. if sj is not None: j = _orm_annotate(pj) & sj else: j = _orm_annotate(pj, exclude=self.property.remote_side) if criterion is not None and target_adapter and not \ is_aliased_class: # limit this adapter to annotated only? criterion = target_adapter.traverse(criterion) # only have the "joined left side" of what we # return be subject to Query adaption. The right # side of it is used for an exists() subquery and # should not correlate or otherwise reach out # to anything in the enclosing query. if criterion is not None: criterion = criterion._annotate( {'no_replacement_traverse': True}) crit = j & sql.True_._ifnone(criterion) ex = sql.exists([1], crit, from_obj=dest).correlate_except(dest) if secondary is not None: ex = ex.correlate_except(secondary) return ex def any(self, criterion=None, **kwargs): """Produce an expression that tests a collection against particular criterion, using EXISTS. An expression like:: session.query(MyClass).filter( MyClass.somereference.any(SomeRelated.x==2) ) Will produce a query like:: SELECT * FROM my_table WHERE EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id AND related.x=2) Because :meth:`~.RelationshipProperty.Comparator.any` uses a correlated subquery, its performance is not nearly as good when compared against large target tables as that of using a join. :meth:`~.RelationshipProperty.Comparator.any` is particularly useful for testing for empty collections:: session.query(MyClass).filter( ~MyClass.somereference.any() ) will produce:: SELECT * FROM my_table WHERE NOT EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id) :meth:`~.RelationshipProperty.Comparator.any` is only valid for collections, i.e. a :func:`.relationship` that has ``uselist=True``. For scalar references, use :meth:`~.RelationshipProperty.Comparator.has`. """ if not self.property.uselist: raise sa_exc.InvalidRequestError( "'any()' not implemented for scalar " "attributes. Use has()." ) return self._criterion_exists(criterion, **kwargs) def has(self, criterion=None, **kwargs): """Produce an expression that tests a scalar reference against particular criterion, using EXISTS. An expression like:: session.query(MyClass).filter( MyClass.somereference.has(SomeRelated.x==2) ) Will produce a query like:: SELECT * FROM my_table WHERE EXISTS (SELECT 1 FROM related WHERE related.id==my_table.related_id AND related.x=2) Because :meth:`~.RelationshipProperty.Comparator.has` uses a correlated subquery, its performance is not nearly as good when compared against large target tables as that of using a join. :meth:`~.RelationshipProperty.Comparator.has` is only valid for scalar references, i.e. a :func:`.relationship` that has ``uselist=False``. For collection references, use :meth:`~.RelationshipProperty.Comparator.any`. """ if self.property.uselist: raise sa_exc.InvalidRequestError( "'has()' not implemented for collections. " "Use any().") return self._criterion_exists(criterion, **kwargs) def contains(self, other, **kwargs): """Return a simple expression that tests a collection for containment of a particular item. :meth:`~.RelationshipProperty.Comparator.contains` is only valid for a collection, i.e. a :func:`~.orm.relationship` that implements one-to-many or many-to-many with ``uselist=True``. When used in a simple one-to-many context, an expression like:: MyClass.contains(other) Produces a clause like:: mytable.id == <some id> Where ``<some id>`` is the value of the foreign key attribute on ``other`` which refers to the primary key of its parent object. From this it follows that :meth:`~.RelationshipProperty.Comparator.contains` is very useful when used with simple one-to-many operations. For many-to-many operations, the behavior of :meth:`~.RelationshipProperty.Comparator.contains` has more caveats. The association table will be rendered in the statement, producing an "implicit" join, that is, includes multiple tables in the FROM clause which are equated in the WHERE clause:: query(MyClass).filter(MyClass.contains(other)) Produces a query like:: SELECT * FROM my_table, my_association_table AS my_association_table_1 WHERE my_table.id = my_association_table_1.parent_id AND my_association_table_1.child_id = <some id> Where ``<some id>`` would be the primary key of ``other``. From the above, it is clear that :meth:`~.RelationshipProperty.Comparator.contains` will **not** work with many-to-many collections when used in queries that move beyond simple AND conjunctions, such as multiple :meth:`~.RelationshipProperty.Comparator.contains` expressions joined by OR. In such cases subqueries or explicit "outer joins" will need to be used instead. See :meth:`~.RelationshipProperty.Comparator.any` for a less-performant alternative using EXISTS, or refer to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins` for more details on constructing outer joins. """ if not self.property.uselist: raise sa_exc.InvalidRequestError( "'contains' not implemented for scalar " "attributes. Use ==") clause = self.property._optimized_compare( other, adapt_source=self.adapter) if self.property.secondaryjoin is not None: clause.negation_clause = \ self.__negated_contains_or_equals(other) return clause def __negated_contains_or_equals(self, other): if self.property.direction == MANYTOONE: state = attributes.instance_state(other) def state_bindparam(x, state, col): dict_ = state.dict return sql.bindparam( x, unique=True, callable_=self.property._get_attr_w_warn_on_none( col, self.property.mapper._get_state_attr_by_column, state, dict_, col, passive=attributes.PASSIVE_OFF ) ) def adapt(col): if self.adapter: return self.adapter(col) else: return col if self.property._use_get: return sql.and_(*[ sql.or_( adapt(x) != state_bindparam(adapt(x), state, y), adapt(x) == None) for (x, y) in self.property.local_remote_pairs]) criterion = sql.and_(*[ x == y for (x, y) in zip( self.property.mapper.primary_key, self.property.mapper.primary_key_from_instance(other) ) ]) return ~self._criterion_exists(criterion) def __ne__(self, other): """Implement the ``!=`` operator. In a many-to-one context, such as:: MyClass.some_prop != <some object> This will typically produce a clause such as:: mytable.related_id != <some id> Where ``<some id>`` is the primary key of the given object. The ``!=`` operator provides partial functionality for non- many-to-one comparisons: * Comparisons against collections are not supported. Use :meth:`~.RelationshipProperty.Comparator.contains` in conjunction with :func:`~.expression.not_`. * Compared to a scalar one-to-many, will produce a clause that compares the target columns in the parent to the given target. * Compared to a scalar many-to-many, an alias of the association table will be rendered as well, forming a natural join that is part of the main body of the query. This will not work for queries that go beyond simple AND conjunctions of comparisons, such as those which use OR. Use explicit joins, outerjoins, or :meth:`~.RelationshipProperty.Comparator.has` in conjunction with :func:`~.expression.not_` for more comprehensive non-many-to-one scalar membership tests. * Comparisons against ``None`` given in a one-to-many or many-to-many context produce an EXISTS clause. """ if isinstance(other, (util.NoneType, expression.Null)): if self.property.direction == MANYTOONE: return _orm_annotate(~self.property._optimized_compare( None, adapt_source=self.adapter)) else: return self._criterion_exists() elif self.property.uselist: raise sa_exc.InvalidRequestError( "Can't compare a collection" " to an object or collection; use " "contains() to test for membership.") else: return _orm_annotate(self.__negated_contains_or_equals(other)) @util.memoized_property def property(self): if mapperlib.Mapper._new_mappers: mapperlib.Mapper._configure_all() return self.prop def _with_parent(self, instance, alias_secondary=True): assert instance is not None return self._optimized_compare( instance, value_is_parent=True, alias_secondary=alias_secondary) def _optimized_compare(self, state, value_is_parent=False, adapt_source=None, alias_secondary=True): if state is not None: state = attributes.instance_state(state) reverse_direction = not value_is_parent if state is None: return self._lazy_none_clause( reverse_direction, adapt_source=adapt_source) if not reverse_direction: criterion, bind_to_col = \ self._lazy_strategy._lazywhere, \ self._lazy_strategy._bind_to_col else: criterion, bind_to_col = \ self._lazy_strategy._rev_lazywhere, \ self._lazy_strategy._rev_bind_to_col if reverse_direction: mapper = self.mapper else: mapper = self.parent dict_ = attributes.instance_dict(state.obj()) def visit_bindparam(bindparam): if bindparam._identifying_key in bind_to_col: bindparam.callable = self._get_attr_w_warn_on_none( bind_to_col[bindparam._identifying_key], mapper._get_state_attr_by_column, state, dict_, bind_to_col[bindparam._identifying_key], passive=attributes.PASSIVE_OFF) if self.secondary is not None and alias_secondary: criterion = ClauseAdapter( self.secondary.alias()).\ traverse(criterion) criterion = visitors.cloned_traverse( criterion, {}, {'bindparam': visit_bindparam}) if adapt_source: criterion = adapt_source(criterion) return criterion def _get_attr_w_warn_on_none(self, column, fn, *arg, **kw): def _go(): value = fn(*arg, **kw) if value is None: util.warn( "Got None for value of column %s; this is unsupported " "for a relationship comparison and will not " "currently produce an IS comparison " "(but may in a future release)" % column) return value return _go def _lazy_none_clause(self, reverse_direction=False, adapt_source=None): if not reverse_direction: criterion, bind_to_col = \ self._lazy_strategy._lazywhere, \ self._lazy_strategy._bind_to_col else: criterion, bind_to_col = \ self._lazy_strategy._rev_lazywhere, \ self._lazy_strategy._rev_bind_to_col criterion = adapt_criterion_to_null(criterion, bind_to_col) if adapt_source: criterion = adapt_source(criterion) return criterion def __str__(self): return str(self.parent.class_.__name__) + "." + self.key def merge(self, session, source_state, source_dict, dest_state, dest_dict, load, _recursive): if load: for r in self._reverse_property: if (source_state, r) in _recursive: return if "merge" not in self._cascade: return if self.key not in source_dict: return if self.uselist: instances = source_state.get_impl(self.key).\ get(source_state, source_dict) if hasattr(instances, '_sa_adapter'): # convert collections to adapters to get a true iterator instances = instances._sa_adapter if load: # for a full merge, pre-load the destination collection, # so that individual _merge of each item pulls from identity # map for those already present. # also assumes CollectionAttrbiuteImpl behavior of loading # "old" list in any case dest_state.get_impl(self.key).get(dest_state, dest_dict) dest_list = [] for current in instances: current_state = attributes.instance_state(current) current_dict = attributes.instance_dict(current) _recursive[(current_state, self)] = True obj = session._merge(current_state, current_dict, load=load, _recursive=_recursive) if obj is not None: dest_list.append(obj) if not load: coll = attributes.init_state_collection(dest_state, dest_dict, self.key) for c in dest_list: coll.append_without_event(c) else: dest_state.get_impl(self.key)._set_iterable( dest_state, dest_dict, dest_list) else: current = source_dict[self.key] if current is not None: current_state = attributes.instance_state(current) current_dict = attributes.instance_dict(current) _recursive[(current_state, self)] = True obj = session._merge(current_state, current_dict, load=load, _recursive=_recursive) else: obj = None if not load: dest_dict[self.key] = obj else: dest_state.get_impl(self.key).set(dest_state, dest_dict, obj, None) def _value_as_iterable(self, state, dict_, key, passive=attributes.PASSIVE_OFF): """Return a list of tuples (state, obj) for the given key. returns an empty list if the value is None/empty/PASSIVE_NO_RESULT """ impl = state.manager[key].impl x = impl.get(state, dict_, passive=passive) if x is attributes.PASSIVE_NO_RESULT or x is None: return [] elif hasattr(impl, 'get_collection'): return [ (attributes.instance_state(o), o) for o in impl.get_collection(state, dict_, x, passive=passive) ] else: return [(attributes.instance_state(x), x)] def cascade_iterator(self, type_, state, dict_, visited_states, halt_on=None): # assert type_ in self._cascade # only actively lazy load on the 'delete' cascade if type_ != 'delete' or self.passive_deletes: passive = attributes.PASSIVE_NO_INITIALIZE else: passive = attributes.PASSIVE_OFF if type_ == 'save-update': tuples = state.manager[self.key].impl.\ get_all_pending(state, dict_) else: tuples = self._value_as_iterable(state, dict_, self.key, passive=passive) skip_pending = type_ == 'refresh-expire' and 'delete-orphan' \ not in self._cascade for instance_state, c in tuples: if instance_state in visited_states: continue if c is None: # would like to emit a warning here, but # would not be consistent with collection.append(None) # current behavior of silently skipping. # see [ticket:2229] continue instance_dict = attributes.instance_dict(c) if halt_on and halt_on(instance_state): continue if skip_pending and not instance_state.key: continue instance_mapper = instance_state.manager.mapper if not instance_mapper.isa(self.mapper.class_manager.mapper): raise AssertionError("Attribute '%s' on class '%s' " "doesn't handle objects " "of type '%s'" % ( self.key, self.parent.class_, c.__class__ )) visited_states.add(instance_state) yield c, instance_mapper, instance_state, instance_dict def _add_reverse_property(self, key): other = self.mapper.get_property(key, _configure_mappers=False) self._reverse_property.add(other) other._reverse_property.add(self) if not other.mapper.common_parent(self.parent): raise sa_exc.ArgumentError( 'reverse_property %r on ' 'relationship %s references relationship %s, which ' 'does not reference mapper %s' % (key, self, other, self.parent)) if self.direction in (ONETOMANY, MANYTOONE) and self.direction \ == other.direction: raise sa_exc.ArgumentError( '%s and back-reference %s are ' 'both of the same direction %r. Did you mean to ' 'set remote_side on the many-to-one side ?' % (other, self, self.direction)) @util.memoized_property def mapper(self): """Return the targeted :class:`.Mapper` for this :class:`.RelationshipProperty`. This is a lazy-initializing static attribute. """ if util.callable(self.argument) and \ not isinstance(self.argument, (type, mapperlib.Mapper)): argument = self.argument() else: argument = self.argument if isinstance(argument, type): mapper_ = mapperlib.class_mapper(argument, configure=False) elif isinstance(self.argument, mapperlib.Mapper): mapper_ = argument else: raise sa_exc.ArgumentError( "relationship '%s' expects " "a class or a mapper argument (received: %s)" % (self.key, type(argument))) return mapper_ @util.memoized_property @util.deprecated("0.7", "Use .target") def table(self): """Return the selectable linked to this :class:`.RelationshipProperty` object's target :class:`.Mapper`. """ return self.target def do_init(self): self._check_conflicts() self._process_dependent_arguments() self._setup_join_conditions() self._check_cascade_settings(self._cascade) self._post_init() self._generate_backref() self._join_condition._warn_for_conflicting_sync_targets() super(RelationshipProperty, self).do_init() self._lazy_strategy = self._get_strategy((("lazy", "select"),)) def _process_dependent_arguments(self): """Convert incoming configuration arguments to their proper form. Callables are resolved, ORM annotations removed. """ # accept callables for other attributes which may require # deferred initialization. This technique is used # by declarative "string configs" and some recipes. for attr in ( 'order_by', 'primaryjoin', 'secondaryjoin', 'secondary', '_user_defined_foreign_keys', 'remote_side', ): attr_value = getattr(self, attr) if util.callable(attr_value): setattr(self, attr, attr_value()) # remove "annotations" which are present if mapped class # descriptors are used to create the join expression. for attr in 'primaryjoin', 'secondaryjoin': val = getattr(self, attr) if val is not None: setattr(self, attr, _orm_deannotate( expression._only_column_elements(val, attr)) ) # ensure expressions in self.order_by, foreign_keys, # remote_side are all columns, not strings. if self.order_by is not False and self.order_by is not None: self.order_by = [ expression._only_column_elements(x, "order_by") for x in util.to_list(self.order_by)] self._user_defined_foreign_keys = \ util.column_set( expression._only_column_elements(x, "foreign_keys") for x in util.to_column_set( self._user_defined_foreign_keys )) self.remote_side = \ util.column_set( expression._only_column_elements(x, "remote_side") for x in util.to_column_set(self.remote_side)) self.target = self.mapper.mapped_table def _setup_join_conditions(self): self._join_condition = jc = JoinCondition( parent_selectable=self.parent.mapped_table, child_selectable=self.mapper.mapped_table, parent_local_selectable=self.parent.local_table, child_local_selectable=self.mapper.local_table, primaryjoin=self.primaryjoin, secondary=self.secondary, secondaryjoin=self.secondaryjoin, parent_equivalents=self.parent._equivalent_columns, child_equivalents=self.mapper._equivalent_columns, consider_as_foreign_keys=self._user_defined_foreign_keys, local_remote_pairs=self.local_remote_pairs, remote_side=self.remote_side, self_referential=self._is_self_referential, prop=self, support_sync=not self.viewonly, can_be_synced_fn=self._columns_are_mapped ) self.primaryjoin = jc.deannotated_primaryjoin self.secondaryjoin = jc.deannotated_secondaryjoin self.direction = jc.direction self.local_remote_pairs = jc.local_remote_pairs self.remote_side = jc.remote_columns self.local_columns = jc.local_columns self.synchronize_pairs = jc.synchronize_pairs self._calculated_foreign_keys = jc.foreign_key_columns self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs def _check_conflicts(self): """Test that this relationship is legal, warn about inheritance conflicts.""" if self.parent.non_primary and not mapperlib.class_mapper( self.parent.class_, configure=False).has_property(self.key): raise sa_exc.ArgumentError( "Attempting to assign a new " "relationship '%s' to a non-primary mapper on " "class '%s'. New relationships can only be added " "to the primary mapper, i.e. the very first mapper " "created for class '%s' " % (self.key, self.parent.class_.__name__, self.parent.class_.__name__)) # check for conflicting relationship() on superclass if not self.parent.concrete: for inheriting in self.parent.iterate_to_root(): if inheriting is not self.parent \ and inheriting.has_property(self.key): util.warn("Warning: relationship '%s' on mapper " "'%s' supersedes the same relationship " "on inherited mapper '%s'; this can " "cause dependency issues during flush" % (self.key, self.parent, inheriting)) def _get_cascade(self): """Return the current cascade setting for this :class:`.RelationshipProperty`. """ return self._cascade def _set_cascade(self, cascade): cascade = CascadeOptions(cascade) if 'mapper' in self.__dict__: self._check_cascade_settings(cascade) self._cascade = cascade if self._dependency_processor: self._dependency_processor.cascade = cascade cascade = property(_get_cascade, _set_cascade) def _check_cascade_settings(self, cascade): if cascade.delete_orphan and not self.single_parent \ and (self.direction is MANYTOMANY or self.direction is MANYTOONE): raise sa_exc.ArgumentError( 'On %s, delete-orphan cascade is not supported ' 'on a many-to-many or many-to-one relationship ' 'when single_parent is not set. Set ' 'single_parent=True on the relationship().' % self) if self.direction is MANYTOONE and self.passive_deletes: util.warn("On %s, 'passive_deletes' is normally configured " "on one-to-many, one-to-one, many-to-many " "relationships only." % self) if self.passive_deletes == 'all' and \ ("delete" in cascade or "delete-orphan" in cascade): raise sa_exc.ArgumentError( "On %s, can't set passive_deletes='all' in conjunction " "with 'delete' or 'delete-orphan' cascade" % self) if cascade.delete_orphan: self.mapper.primary_mapper()._delete_orphans.append( (self.key, self.parent.class_) ) def _columns_are_mapped(self, *cols): """Return True if all columns in the given collection are mapped by the tables referenced by this :class:`.Relationship`. """ for c in cols: if self.secondary is not None \ and self.secondary.c.contains_column(c): continue if not self.parent.mapped_table.c.contains_column(c) and \ not self.target.c.contains_column(c): return False return True def _generate_backref(self): """Interpret the 'backref' instruction to create a :func:`.relationship` complementary to this one.""" if self.parent.non_primary: return if self.backref is not None and not self.back_populates: if isinstance(self.backref, util.string_types): backref_key, kwargs = self.backref, {} else: backref_key, kwargs = self.backref mapper = self.mapper.primary_mapper() check = set(mapper.iterate_to_root()).\ union(mapper.self_and_descendants) for m in check: if m.has_property(backref_key): raise sa_exc.ArgumentError( "Error creating backref " "'%s' on relationship '%s': property of that " "name exists on mapper '%s'" % (backref_key, self, m)) # determine primaryjoin/secondaryjoin for the # backref. Use the one we had, so that # a custom join doesn't have to be specified in # both directions. if self.secondary is not None: # for many to many, just switch primaryjoin/ # secondaryjoin. use the annotated # pj/sj on the _join_condition. pj = kwargs.pop( 'primaryjoin', self._join_condition.secondaryjoin_minus_local) sj = kwargs.pop( 'secondaryjoin', self._join_condition.primaryjoin_minus_local) else: pj = kwargs.pop( 'primaryjoin', self._join_condition.primaryjoin_reverse_remote) sj = kwargs.pop('secondaryjoin', None) if sj: raise sa_exc.InvalidRequestError( "Can't assign 'secondaryjoin' on a backref " "against a non-secondary relationship." ) foreign_keys = kwargs.pop('foreign_keys', self._user_defined_foreign_keys) parent = self.parent.primary_mapper() kwargs.setdefault('viewonly', self.viewonly) kwargs.setdefault('post_update', self.post_update) kwargs.setdefault('passive_updates', self.passive_updates) self.back_populates = backref_key relationship = RelationshipProperty( parent, self.secondary, pj, sj, foreign_keys=foreign_keys, back_populates=self.key, **kwargs) mapper._configure_property(backref_key, relationship) if self.back_populates: self._add_reverse_property(self.back_populates) def _post_init(self): if self.uselist is None: self.uselist = self.direction is not MANYTOONE if not self.viewonly: self._dependency_processor = \ dependency.DependencyProcessor.from_relationship(self) @util.memoized_property def _use_get(self): """memoize the 'use_get' attribute of this RelationshipLoader's lazyloader.""" strategy = self._lazy_strategy return strategy.use_get @util.memoized_property def _is_self_referential(self): return self.mapper.common_parent(self.parent) def _create_joins(self, source_polymorphic=False, source_selectable=None, dest_polymorphic=False, dest_selectable=None, of_type=None): if source_selectable is None: if source_polymorphic and self.parent.with_polymorphic: source_selectable = self.parent._with_polymorphic_selectable aliased = False if dest_selectable is None: if dest_polymorphic and self.mapper.with_polymorphic: dest_selectable = self.mapper._with_polymorphic_selectable aliased = True else: dest_selectable = self.mapper.mapped_table if self._is_self_referential and source_selectable is None: dest_selectable = dest_selectable.alias() aliased = True else: aliased = True dest_mapper = of_type or self.mapper single_crit = dest_mapper._single_table_criterion aliased = aliased or (source_selectable is not None) primaryjoin, secondaryjoin, secondary, target_adapter, dest_selectable = \ self._join_condition.join_targets( source_selectable, dest_selectable, aliased, single_crit ) if source_selectable is None: source_selectable = self.parent.local_table if dest_selectable is None: dest_selectable = self.mapper.local_table return (primaryjoin, secondaryjoin, source_selectable, dest_selectable, secondary, target_adapter) def _annotate_columns(element, annotations): def clone(elem): if isinstance(elem, expression.ColumnClause): elem = elem._annotate(annotations.copy()) elem._copy_internals(clone=clone) return elem if element is not None: element = clone(element) return element class JoinCondition(object): def __init__(self, parent_selectable, child_selectable, parent_local_selectable, child_local_selectable, primaryjoin=None, secondary=None, secondaryjoin=None, parent_equivalents=None, child_equivalents=None, consider_as_foreign_keys=None, local_remote_pairs=None, remote_side=None, self_referential=False, prop=None, support_sync=True, can_be_synced_fn=lambda *c: True ): self.parent_selectable = parent_selectable self.parent_local_selectable = parent_local_selectable self.child_selectable = child_selectable self.child_local_selectable = child_local_selectable self.parent_equivalents = parent_equivalents self.child_equivalents = child_equivalents self.primaryjoin = primaryjoin self.secondaryjoin = secondaryjoin self.secondary = secondary self.consider_as_foreign_keys = consider_as_foreign_keys self._local_remote_pairs = local_remote_pairs self._remote_side = remote_side self.prop = prop self.self_referential = self_referential self.support_sync = support_sync self.can_be_synced_fn = can_be_synced_fn self._determine_joins() self._annotate_fks() self._annotate_remote() self._annotate_local() self._setup_pairs() self._check_foreign_cols(self.primaryjoin, True) if self.secondaryjoin is not None: self._check_foreign_cols(self.secondaryjoin, False) self._determine_direction() self._check_remote_side() self._log_joins() def _log_joins(self): if self.prop is None: return log = self.prop.logger log.info('%s setup primary join %s', self.prop, self.primaryjoin) log.info('%s setup secondary join %s', self.prop, self.secondaryjoin) log.info('%s synchronize pairs [%s]', self.prop, ','.join('(%s => %s)' % (l, r) for (l, r) in self.synchronize_pairs)) log.info('%s secondary synchronize pairs [%s]', self.prop, ','.join('(%s => %s)' % (l, r) for (l, r) in self.secondary_synchronize_pairs or [])) log.info('%s local/remote pairs [%s]', self.prop, ','.join('(%s / %s)' % (l, r) for (l, r) in self.local_remote_pairs)) log.info('%s remote columns [%s]', self.prop, ','.join('%s' % col for col in self.remote_columns) ) log.info('%s local columns [%s]', self.prop, ','.join('%s' % col for col in self.local_columns) ) log.info('%s relationship direction %s', self.prop, self.direction) def _determine_joins(self): """Determine the 'primaryjoin' and 'secondaryjoin' attributes, if not passed to the constructor already. This is based on analysis of the foreign key relationships between the parent and target mapped selectables. """ if self.secondaryjoin is not None and self.secondary is None: raise sa_exc.ArgumentError( "Property %s specified with secondary " "join condition but " "no secondary argument" % self.prop) # find a join between the given mapper's mapped table and # the given table. will try the mapper's local table first # for more specificity, then if not found will try the more # general mapped table, which in the case of inheritance is # a join. try: consider_as_foreign_keys = self.consider_as_foreign_keys or None if self.secondary is not None: if self.secondaryjoin is None: self.secondaryjoin = \ join_condition( self.child_selectable, self.secondary, a_subset=self.child_local_selectable, consider_as_foreign_keys=consider_as_foreign_keys ) if self.primaryjoin is None: self.primaryjoin = \ join_condition( self.parent_selectable, self.secondary, a_subset=self.parent_local_selectable, consider_as_foreign_keys=consider_as_foreign_keys ) else: if self.primaryjoin is None: self.primaryjoin = \ join_condition( self.parent_selectable, self.child_selectable, a_subset=self.parent_local_selectable, consider_as_foreign_keys=consider_as_foreign_keys ) except sa_exc.NoForeignKeysError: if self.secondary is not None: raise sa_exc.NoForeignKeysError( "Could not determine join " "condition between parent/child tables on " "relationship %s - there are no foreign keys " "linking these tables via secondary table '%s'. " "Ensure that referencing columns are associated " "with a ForeignKey or ForeignKeyConstraint, or " "specify 'primaryjoin' and 'secondaryjoin' " "expressions." % (self.prop, self.secondary)) else: raise sa_exc.NoForeignKeysError( "Could not determine join " "condition between parent/child tables on " "relationship %s - there are no foreign keys " "linking these tables. " "Ensure that referencing columns are associated " "with a ForeignKey or ForeignKeyConstraint, or " "specify a 'primaryjoin' expression." % self.prop) except sa_exc.AmbiguousForeignKeysError: if self.secondary is not None: raise sa_exc.AmbiguousForeignKeysError( "Could not determine join " "condition between parent/child tables on " "relationship %s - there are multiple foreign key " "paths linking the tables via secondary table '%s'. " "Specify the 'foreign_keys' " "argument, providing a list of those columns which " "should be counted as containing a foreign key " "reference from the secondary table to each of the " "parent and child tables." % (self.prop, self.secondary)) else: raise sa_exc.AmbiguousForeignKeysError( "Could not determine join " "condition between parent/child tables on " "relationship %s - there are multiple foreign key " "paths linking the tables. Specify the " "'foreign_keys' argument, providing a list of those " "columns which should be counted as containing a " "foreign key reference to the parent table." % self.prop) @property def primaryjoin_minus_local(self): return _deep_deannotate(self.primaryjoin, values=("local", "remote")) @property def secondaryjoin_minus_local(self): return _deep_deannotate(self.secondaryjoin, values=("local", "remote")) @util.memoized_property def primaryjoin_reverse_remote(self): """Return the primaryjoin condition suitable for the "reverse" direction. If the primaryjoin was delivered here with pre-existing "remote" annotations, the local/remote annotations are reversed. Otherwise, the local/remote annotations are removed. """ if self._has_remote_annotations: def replace(element): if "remote" in element._annotations: v = element._annotations.copy() del v['remote'] v['local'] = True return element._with_annotations(v) elif "local" in element._annotations: v = element._annotations.copy() del v['local'] v['remote'] = True return element._with_annotations(v) return visitors.replacement_traverse( self.primaryjoin, {}, replace) else: if self._has_foreign_annotations: # TODO: coverage return _deep_deannotate(self.primaryjoin, values=("local", "remote")) else: return _deep_deannotate(self.primaryjoin) def _has_annotation(self, clause, annotation): for col in visitors.iterate(clause, {}): if annotation in col._annotations: return True else: return False @util.memoized_property def _has_foreign_annotations(self): return self._has_annotation(self.primaryjoin, "foreign") @util.memoized_property def _has_remote_annotations(self): return self._has_annotation(self.primaryjoin, "remote") def _annotate_fks(self): """Annotate the primaryjoin and secondaryjoin structures with 'foreign' annotations marking columns considered as foreign. """ if self._has_foreign_annotations: return if self.consider_as_foreign_keys: self._annotate_from_fk_list() else: self._annotate_present_fks() def _annotate_from_fk_list(self): def check_fk(col): if col in self.consider_as_foreign_keys: return col._annotate({"foreign": True}) self.primaryjoin = visitors.replacement_traverse( self.primaryjoin, {}, check_fk ) if self.secondaryjoin is not None: self.secondaryjoin = visitors.replacement_traverse( self.secondaryjoin, {}, check_fk ) def _annotate_present_fks(self): if self.secondary is not None: secondarycols = util.column_set(self.secondary.c) else: secondarycols = set() def is_foreign(a, b): if isinstance(a, schema.Column) and \ isinstance(b, schema.Column): if a.references(b): return a elif b.references(a): return b if secondarycols: if a in secondarycols and b not in secondarycols: return a elif b in secondarycols and a not in secondarycols: return b def visit_binary(binary): if not isinstance(binary.left, sql.ColumnElement) or \ not isinstance(binary.right, sql.ColumnElement): return if "foreign" not in binary.left._annotations and \ "foreign" not in binary.right._annotations: col = is_foreign(binary.left, binary.right) if col is not None: if col.compare(binary.left): binary.left = binary.left._annotate( {"foreign": True}) elif col.compare(binary.right): binary.right = binary.right._annotate( {"foreign": True}) self.primaryjoin = visitors.cloned_traverse( self.primaryjoin, {}, {"binary": visit_binary} ) if self.secondaryjoin is not None: self.secondaryjoin = visitors.cloned_traverse( self.secondaryjoin, {}, {"binary": visit_binary} ) def _refers_to_parent_table(self): """Return True if the join condition contains column comparisons where both columns are in both tables. """ pt = self.parent_selectable mt = self.child_selectable result = [False] def visit_binary(binary): c, f = binary.left, binary.right if ( isinstance(c, expression.ColumnClause) and isinstance(f, expression.ColumnClause) and pt.is_derived_from(c.table) and pt.is_derived_from(f.table) and mt.is_derived_from(c.table) and mt.is_derived_from(f.table) ): result[0] = True visitors.traverse( self.primaryjoin, {}, {"binary": visit_binary} ) return result[0] def _tables_overlap(self): """Return True if parent/child tables have some overlap.""" return selectables_overlap( self.parent_selectable, self.child_selectable) def _annotate_remote(self): """Annotate the primaryjoin and secondaryjoin structures with 'remote' annotations marking columns considered as part of the 'remote' side. """ if self._has_remote_annotations: return if self.secondary is not None: self._annotate_remote_secondary() elif self._local_remote_pairs or self._remote_side: self._annotate_remote_from_args() elif self._refers_to_parent_table(): self._annotate_selfref(lambda col: "foreign" in col._annotations, False) elif self._tables_overlap(): self._annotate_remote_with_overlap() else: self._annotate_remote_distinct_selectables() def _annotate_remote_secondary(self): """annotate 'remote' in primaryjoin, secondaryjoin when 'secondary' is present. """ def repl(element): if self.secondary.c.contains_column(element): return element._annotate({"remote": True}) self.primaryjoin = visitors.replacement_traverse( self.primaryjoin, {}, repl) self.secondaryjoin = visitors.replacement_traverse( self.secondaryjoin, {}, repl) def _annotate_selfref(self, fn, remote_side_given): """annotate 'remote' in primaryjoin, secondaryjoin when the relationship is detected as self-referential. """ def visit_binary(binary): equated = binary.left.compare(binary.right) if isinstance(binary.left, expression.ColumnClause) and \ isinstance(binary.right, expression.ColumnClause): # assume one to many - FKs are "remote" if fn(binary.left): binary.left = binary.left._annotate({"remote": True}) if fn(binary.right) and not equated: binary.right = binary.right._annotate( {"remote": True}) elif not remote_side_given: self._warn_non_column_elements() self.primaryjoin = visitors.cloned_traverse( self.primaryjoin, {}, {"binary": visit_binary}) def _annotate_remote_from_args(self): """annotate 'remote' in primaryjoin, secondaryjoin when the 'remote_side' or '_local_remote_pairs' arguments are used. """ if self._local_remote_pairs: if self._remote_side: raise sa_exc.ArgumentError( "remote_side argument is redundant " "against more detailed _local_remote_side " "argument.") remote_side = [r for (l, r) in self._local_remote_pairs] else: remote_side = self._remote_side if self._refers_to_parent_table(): self._annotate_selfref(lambda col: col in remote_side, True) else: def repl(element): if element in remote_side: return element._annotate({"remote": True}) self.primaryjoin = visitors.replacement_traverse( self.primaryjoin, {}, repl) def _annotate_remote_with_overlap(self): """annotate 'remote' in primaryjoin, secondaryjoin when the parent/child tables have some set of tables in common, though is not a fully self-referential relationship. """ def visit_binary(binary): binary.left, binary.right = proc_left_right(binary.left, binary.right) binary.right, binary.left = proc_left_right(binary.right, binary.left) check_entities = self.prop is not None and \ self.prop.mapper is not self.prop.parent def proc_left_right(left, right): if isinstance(left, expression.ColumnClause) and \ isinstance(right, expression.ColumnClause): if self.child_selectable.c.contains_column(right) and \ self.parent_selectable.c.contains_column(left): right = right._annotate({"remote": True}) elif check_entities and \ right._annotations.get('parentmapper') is self.prop.mapper: right = right._annotate({"remote": True}) elif check_entities and \ left._annotations.get('parentmapper') is self.prop.mapper: left = left._annotate({"remote": True}) else: self._warn_non_column_elements() return left, right self.primaryjoin = visitors.cloned_traverse( self.primaryjoin, {}, {"binary": visit_binary}) def _annotate_remote_distinct_selectables(self): """annotate 'remote' in primaryjoin, secondaryjoin when the parent/child tables are entirely separate. """ def repl(element): if self.child_selectable.c.contains_column(element) and \ (not self.parent_local_selectable.c. contains_column(element) or self.child_local_selectable.c. contains_column(element)): return element._annotate({"remote": True}) self.primaryjoin = visitors.replacement_traverse( self.primaryjoin, {}, repl) def _warn_non_column_elements(self): util.warn( "Non-simple column elements in primary " "join condition for property %s - consider using " "remote() annotations to mark the remote side." % self.prop ) def _annotate_local(self): """Annotate the primaryjoin and secondaryjoin structures with 'local' annotations. This annotates all column elements found simultaneously in the parent table and the join condition that don't have a 'remote' annotation set up from _annotate_remote() or user-defined. """ if self._has_annotation(self.primaryjoin, "local"): return if self._local_remote_pairs: local_side = util.column_set([l for (l, r) in self._local_remote_pairs]) else: local_side = util.column_set(self.parent_selectable.c) def locals_(elem): if "remote" not in elem._annotations and \ elem in local_side: return elem._annotate({"local": True}) self.primaryjoin = visitors.replacement_traverse( self.primaryjoin, {}, locals_ ) def _check_remote_side(self): if not self.local_remote_pairs: raise sa_exc.ArgumentError( 'Relationship %s could ' 'not determine any unambiguous local/remote column ' 'pairs based on join condition and remote_side ' 'arguments. ' 'Consider using the remote() annotation to ' 'accurately mark those elements of the join ' 'condition that are on the remote side of ' 'the relationship.' % (self.prop, )) def _check_foreign_cols(self, join_condition, primary): """Check the foreign key columns collected and emit error messages.""" can_sync = False foreign_cols = self._gather_columns_with_annotation( join_condition, "foreign") has_foreign = bool(foreign_cols) if primary: can_sync = bool(self.synchronize_pairs) else: can_sync = bool(self.secondary_synchronize_pairs) if self.support_sync and can_sync or \ (not self.support_sync and has_foreign): return # from here below is just determining the best error message # to report. Check for a join condition using any operator # (not just ==), perhaps they need to turn on "viewonly=True". if self.support_sync and has_foreign and not can_sync: err = "Could not locate any simple equality expressions "\ "involving locally mapped foreign key columns for "\ "%s join condition "\ "'%s' on relationship %s." % ( primary and 'primary' or 'secondary', join_condition, self.prop ) err += \ " Ensure that referencing columns are associated "\ "with a ForeignKey or ForeignKeyConstraint, or are "\ "annotated in the join condition with the foreign() "\ "annotation. To allow comparison operators other than "\ "'==', the relationship can be marked as viewonly=True." raise sa_exc.ArgumentError(err) else: err = "Could not locate any relevant foreign key columns "\ "for %s join condition '%s' on relationship %s." % ( primary and 'primary' or 'secondary', join_condition, self.prop ) err += \ ' Ensure that referencing columns are associated '\ 'with a ForeignKey or ForeignKeyConstraint, or are '\ 'annotated in the join condition with the foreign() '\ 'annotation.' raise sa_exc.ArgumentError(err) def _determine_direction(self): """Determine if this relationship is one to many, many to one, many to many. """ if self.secondaryjoin is not None: self.direction = MANYTOMANY else: parentcols = util.column_set(self.parent_selectable.c) targetcols = util.column_set(self.child_selectable.c) # fk collection which suggests ONETOMANY. onetomany_fk = targetcols.intersection( self.foreign_key_columns) # fk collection which suggests MANYTOONE. manytoone_fk = parentcols.intersection( self.foreign_key_columns) if onetomany_fk and manytoone_fk: # fks on both sides. test for overlap of local/remote # with foreign key. # we will gather columns directly from their annotations # without deannotating, so that we can distinguish on a column # that refers to itself. # 1. columns that are both remote and FK suggest # onetomany. onetomany_local = self._gather_columns_with_annotation( self.primaryjoin, "remote", "foreign") # 2. columns that are FK but are not remote (e.g. local) # suggest manytoone. manytoone_local = set([c for c in self._gather_columns_with_annotation( self.primaryjoin, "foreign") if "remote" not in c._annotations]) # 3. if both collections are present, remove columns that # refer to themselves. This is for the case of # and_(Me.id == Me.remote_id, Me.version == Me.version) if onetomany_local and manytoone_local: self_equated = self.remote_columns.intersection( self.local_columns ) onetomany_local = onetomany_local.difference(self_equated) manytoone_local = manytoone_local.difference(self_equated) # at this point, if only one or the other collection is # present, we know the direction, otherwise it's still # ambiguous. if onetomany_local and not manytoone_local: self.direction = ONETOMANY elif manytoone_local and not onetomany_local: self.direction = MANYTOONE else: raise sa_exc.ArgumentError( "Can't determine relationship" " direction for relationship '%s' - foreign " "key columns within the join condition are present " "in both the parent and the child's mapped tables. " "Ensure that only those columns referring " "to a parent column are marked as foreign, " "either via the foreign() annotation or " "via the foreign_keys argument." % self.prop) elif onetomany_fk: self.direction = ONETOMANY elif manytoone_fk: self.direction = MANYTOONE else: raise sa_exc.ArgumentError( "Can't determine relationship " "direction for relationship '%s' - foreign " "key columns are present in neither the parent " "nor the child's mapped tables" % self.prop) def _deannotate_pairs(self, collection): """provide deannotation for the various lists of pairs, so that using them in hashes doesn't incur high-overhead __eq__() comparisons against original columns mapped. """ return [(x._deannotate(), y._deannotate()) for x, y in collection] def _setup_pairs(self): sync_pairs = [] lrp = util.OrderedSet([]) secondary_sync_pairs = [] def go(joincond, collection): def visit_binary(binary, left, right): if "remote" in right._annotations and \ "remote" not in left._annotations and \ self.can_be_synced_fn(left): lrp.add((left, right)) elif "remote" in left._annotations and \ "remote" not in right._annotations and \ self.can_be_synced_fn(right): lrp.add((right, left)) if binary.operator is operators.eq and \ self.can_be_synced_fn(left, right): if "foreign" in right._annotations: collection.append((left, right)) elif "foreign" in left._annotations: collection.append((right, left)) visit_binary_product(visit_binary, joincond) for joincond, collection in [ (self.primaryjoin, sync_pairs), (self.secondaryjoin, secondary_sync_pairs) ]: if joincond is None: continue go(joincond, collection) self.local_remote_pairs = self._deannotate_pairs(lrp) self.synchronize_pairs = self._deannotate_pairs(sync_pairs) self.secondary_synchronize_pairs = \ self._deannotate_pairs(secondary_sync_pairs) _track_overlapping_sync_targets = weakref.WeakKeyDictionary() def _warn_for_conflicting_sync_targets(self): if not self.support_sync: return # we would like to detect if we are synchronizing any column # pairs in conflict with another relationship that wishes to sync # an entirely different column to the same target. This is a # very rare edge case so we will try to minimize the memory/overhead # impact of this check for from_, to_ in [ (from_, to_) for (from_, to_) in self.synchronize_pairs ] + [ (from_, to_) for (from_, to_) in self.secondary_synchronize_pairs ]: # save ourselves a ton of memory and overhead by only # considering columns that are subject to a overlapping # FK constraints at the core level. This condition can arise # if multiple relationships overlap foreign() directly, but # we're going to assume it's typically a ForeignKeyConstraint- # level configuration that benefits from this warning. if len(to_.foreign_keys) < 2: continue if to_ not in self._track_overlapping_sync_targets: self._track_overlapping_sync_targets[to_] = \ weakref.WeakKeyDictionary({self.prop: from_}) else: other_props = [] prop_to_from = self._track_overlapping_sync_targets[to_] for pr, fr_ in prop_to_from.items(): if pr.mapper in mapperlib._mapper_registry and \ fr_ is not from_ and \ pr not in self.prop._reverse_property: other_props.append((pr, fr_)) if other_props: util.warn( "relationship '%s' will copy column %s to column %s, " "which conflicts with relationship(s): %s. " "Consider applying " "viewonly=True to read-only relationships, or provide " "a primaryjoin condition marking writable columns " "with the foreign() annotation." % ( self.prop, from_, to_, ", ".join( "'%s' (copies %s to %s)" % (pr, fr_, to_) for (pr, fr_) in other_props) ) ) self._track_overlapping_sync_targets[to_][self.prop] = from_ @util.memoized_property def remote_columns(self): return self._gather_join_annotations("remote") @util.memoized_property def local_columns(self): return self._gather_join_annotations("local") @util.memoized_property def foreign_key_columns(self): return self._gather_join_annotations("foreign") @util.memoized_property def deannotated_primaryjoin(self): return _deep_deannotate(self.primaryjoin) @util.memoized_property def deannotated_secondaryjoin(self): if self.secondaryjoin is not None: return _deep_deannotate(self.secondaryjoin) else: return None def _gather_join_annotations(self, annotation): s = set( self._gather_columns_with_annotation( self.primaryjoin, annotation) ) if self.secondaryjoin is not None: s.update( self._gather_columns_with_annotation( self.secondaryjoin, annotation) ) return set([x._deannotate() for x in s]) def _gather_columns_with_annotation(self, clause, *annotation): annotation = set(annotation) return set([ col for col in visitors.iterate(clause, {}) if annotation.issubset(col._annotations) ]) def join_targets(self, source_selectable, dest_selectable, aliased, single_crit=None): """Given a source and destination selectable, create a join between them. This takes into account aliasing the join clause to reference the appropriate corresponding columns in the target objects, as well as the extra child criterion, equivalent column sets, etc. """ # place a barrier on the destination such that # replacement traversals won't ever dig into it. # its internal structure remains fixed # regardless of context. dest_selectable = _shallow_annotate( dest_selectable, {'no_replacement_traverse': True}) primaryjoin, secondaryjoin, secondary = self.primaryjoin, \ self.secondaryjoin, self.secondary # adjust the join condition for single table inheritance, # in the case that the join is to a subclass # this is analogous to the # "_adjust_for_single_table_inheritance()" method in Query. if single_crit is not None: if secondaryjoin is not None: secondaryjoin = secondaryjoin & single_crit else: primaryjoin = primaryjoin & single_crit if aliased: if secondary is not None: secondary = secondary.alias(flat=True) primary_aliasizer = ClauseAdapter(secondary) secondary_aliasizer = \ ClauseAdapter(dest_selectable, equivalents=self.child_equivalents).\ chain(primary_aliasizer) if source_selectable is not None: primary_aliasizer = \ ClauseAdapter(secondary).\ chain(ClauseAdapter( source_selectable, equivalents=self.parent_equivalents)) secondaryjoin = \ secondary_aliasizer.traverse(secondaryjoin) else: primary_aliasizer = ClauseAdapter( dest_selectable, exclude_fn=_ColInAnnotations("local"), equivalents=self.child_equivalents) if source_selectable is not None: primary_aliasizer.chain( ClauseAdapter(source_selectable, exclude_fn=_ColInAnnotations("remote"), equivalents=self.parent_equivalents)) secondary_aliasizer = None primaryjoin = primary_aliasizer.traverse(primaryjoin) target_adapter = secondary_aliasizer or primary_aliasizer target_adapter.exclude_fn = None else: target_adapter = None return primaryjoin, secondaryjoin, secondary, \ target_adapter, dest_selectable def create_lazy_clause(self, reverse_direction=False): binds = util.column_dict() equated_columns = util.column_dict() has_secondary = self.secondaryjoin is not None if has_secondary: lookup = collections.defaultdict(list) for l, r in self.local_remote_pairs: lookup[l].append((l, r)) equated_columns[r] = l elif not reverse_direction: for l, r in self.local_remote_pairs: equated_columns[r] = l else: for l, r in self.local_remote_pairs: equated_columns[l] = r def col_to_bind(col): if ( (not reverse_direction and 'local' in col._annotations) or reverse_direction and ( (has_secondary and col in lookup) or (not has_secondary and 'remote' in col._annotations) ) ): if col not in binds: binds[col] = sql.bindparam( None, None, type_=col.type, unique=True) return binds[col] return None lazywhere = self.primaryjoin if self.secondaryjoin is None or not reverse_direction: lazywhere = visitors.replacement_traverse( lazywhere, {}, col_to_bind) if self.secondaryjoin is not None: secondaryjoin = self.secondaryjoin if reverse_direction: secondaryjoin = visitors.replacement_traverse( secondaryjoin, {}, col_to_bind) lazywhere = sql.and_(lazywhere, secondaryjoin) bind_to_col = dict((binds[col].key, col) for col in binds) # this is probably not necessary lazywhere = _deep_deannotate(lazywhere) return lazywhere, bind_to_col, equated_columns class _ColInAnnotations(object): """Seralizable equivalent to: lambda c: "name" in c._annotations """ def __init__(self, name): self.name = name def __call__(self, c): return self.name in c._annotations
codeparrot/github-code-clean
#!/usr/bin/env python # File created on 05 Jun 2011 from __future__ import division __author__ = "Greg Caporaso" __copyright__ = "Copyright 2011, The QIIME project" __credits__ = ["Greg Caporaso", "Jose Antonio Navas Molina", "Daniel McDonald"] __license__ = "GPL" __version__ = "1.9.1-dev" __maintainer__ = "Greg Caporaso" __email__ = "gregcaporaso@gmail.com" import numpy as np from unittest import TestCase, main from tempfile import mkdtemp, NamedTemporaryFile from shutil import rmtree from qiime.split_libraries_fastq import ( process_fastq_single_end_read_file, quality_filter_sequence, bad_chars_from_threshold, get_illumina_qual_chars, quality_filter_sequence, FastqParseError, check_header_match_pre180, check_header_match_180_or_later, correct_barcode, process_fastq_single_end_read_file_no_barcode, extract_reads_from_interleaved ) from qiime.golay import decode_golay_12 import skbio.parse.sequences from skbio.parse.sequences.fastq import ascii_to_phred64, ascii_to_phred33 class FakeFile(object): def __init__(self, d=""): self.s = d def write(self, s): self.s += s def close(self): pass class SplitLibrariesFastqTests(TestCase): """ """ def setUp(self): self.fastq1 = fastq1.split('\n') self.barcode_fastq1 = barcode_fastq1.split('\n') self.fastq2 = fastq2.split('\n') self.barcode_fastq2 = barcode_fastq2.split('\n') self.fastq1_expected_no_qual_unassigned = fastq1_expected_no_qual_unassigned self.fastq1_expected_default = fastq1_expected_default self.fastq2_expected_default = fastq2_expected_default self.fastq1_expected_single_barcode = fastq1_expected_single_barcode self.barcode_map1 = barcode_map1 # vars for test_create_forward_and_reverse_fp self.temp_dir_path = mkdtemp() self.create_forward_and_reverse = NamedTemporaryFile( prefix='create_forward_and_reverse_fp_', suffix='.fastq', dir=self.temp_dir_path, delete=False) self.create_forward_and_reverse_fp = self.create_forward_and_reverse.name self.create_forward_and_reverse.write(forward_reads) self.create_forward_and_reverse.write(reverse_reads) self.create_forward_and_reverse.close() def tearDown(self): """Remove all temp files""" rmtree(self.temp_dir_path) def test_correct_barcode_exact_match(self): """correct_barcode functions as expected w exact match""" barcode = "GGAGACAAGGGA" barcode_to_sample_id = { "GGAGACAAGGGA": "s1", "ACACCTGGTGAT": "s2"} correction_fn = None actual = correct_barcode(barcode, barcode_to_sample_id, correction_fn) expected = (0, barcode, False, 's1') self.assertEqual(actual, expected) correction_fn = decode_golay_12 actual = correct_barcode(barcode, barcode_to_sample_id, correction_fn) expected = (0, barcode, False, 's1') self.assertEqual(actual, expected) def test_correct_barcode_no_error_correction(self): """correct_barcode functions as expected w no error correction""" barcode = "GGAGACAAGGGT" barcode_to_sample_id = { "GGAGACAAGGGA": "s1", "ACACCTGGTGAT": "s2"} correction_fn = None actual = correct_barcode(barcode, barcode_to_sample_id, correction_fn) expected = (0, barcode, False, None) self.assertEqual(actual, expected) # barcode contains N barcode = "CCAGTGTANGCA" actual = correct_barcode(barcode, barcode_to_sample_id, correction_fn) expected = (0, "CCAGTGTANGCA", False, None) self.assertEqual(actual, expected) def test_correct_barcode_golay_correction(self): """correct_barcode functions as expected w golay correction""" barcode = "GGAGACAAGGGT" barcode_to_sample_id = { "GGAGACAAGGGA": "s1", "ACACCTGGTGAT": "s2"} correction_fn = decode_golay_12 actual = correct_barcode(barcode, barcode_to_sample_id, correction_fn) expected = (1, "GGAGACAAGGGA", True, "s1") self.assertEqual(actual, expected) barcode = "ACACCTGGTGAC" actual = correct_barcode(barcode, barcode_to_sample_id, correction_fn) expected = (1, "ACACCTGGTGAT", True, "s2") self.assertEqual(actual, expected) # valid code, but not in barcode_to_sample_id map barcode = "CCAGTGTATGCA" actual = correct_barcode(barcode, barcode_to_sample_id, correction_fn) expected = (0, "CCAGTGTATGCA", True, None) self.assertEqual(actual, expected) # invalid code, corrected not in barcode_to_sample_id map barcode = "CCTGTGTATGCA" actual = correct_barcode(barcode, barcode_to_sample_id, correction_fn) expected = (1, "CCAGTGTATGCA", True, None) self.assertEqual(actual, expected) def test_process_fastq_single_end_read_file_invalid_phred_offset(self): # passing phred_offset that isn't 33 or 64 raises error with self.assertRaises(ValueError): list(process_fastq_single_end_read_file( self.fastq1,self.barcode_fastq1, self.barcode_map1, store_unassigned=True, max_bad_run_length=1000, phred_quality_threshold=None, min_per_read_length_fraction=0., rev_comp=False, rev_comp_barcode=False, seq_max_N=1000, start_seq_id=0, filter_bad_illumina_qual_digit=False, phred_offset=42)) # passing wrong phred_offset for illumina1.8+ data raises error with self.assertRaises(skbio.parse.sequences.FastqParseError): list(process_fastq_single_end_read_file( self.fastq2, self.barcode_fastq2, self.barcode_map1, min_per_read_length_fraction=0.45, phred_offset=64)) def test_process_fastq_single_end_read_file(self): """process_fastq_single_end_read_file functions as expected w no qual filter """ actual = process_fastq_single_end_read_file(self.fastq1, self.barcode_fastq1, self.barcode_map1, store_unassigned=True, max_bad_run_length=1000, phred_quality_threshold=None, min_per_read_length_fraction=0., rev_comp=False, rev_comp_barcode=False, seq_max_N=1000, start_seq_id=0, filter_bad_illumina_qual_digit=False) actual = list(actual) expected = self.fastq1_expected_no_qual_unassigned self.assertEqual(len(actual), len(expected)) for i in range(len(expected)): np.testing.assert_equal(actual[i], expected[i]) def test_process_fastq_single_end_read_file_w_defaults(self): """process_fastq_single_end_read_file functions as expected w default filters """ actual = process_fastq_single_end_read_file(self.fastq1, self.barcode_fastq1, self.barcode_map1, min_per_read_length_fraction=0.45) actual = list(actual) expected = self.fastq1_expected_default self.assertEqual(len(actual), len(expected)) for i in range(len(expected)): np.testing.assert_equal(actual[i], expected[i]) def test_process_fastq_single_end_read_file_no_barcode(self): """process_fastq_single_end_read_file functions as expected for non-barcoded lane """ actual = process_fastq_single_end_read_file_no_barcode( self.fastq1, 's1', min_per_read_length_fraction=0.45) actual = list(actual) expected = self.fastq1_expected_single_barcode self.assertEqual(len(actual), len(expected)) for i in range(len(expected)): np.testing.assert_equal(actual[i], expected[i]) def test_process_fastq_single_end_read_file_w_defaults_v180(self): """process_fastq_single_end_read_file functions as expected w default filters on casava 180 data """ # test autodetection of phred_offset (phred_offset=None) and # phred_offset=33 gives same results for phred_offset in None, 33: actual = process_fastq_single_end_read_file( self.fastq2, self.barcode_fastq2, self.barcode_map1, min_per_read_length_fraction=0.45, phred_offset=phred_offset) actual = list(actual) expected = self.fastq2_expected_default self.assertEqual(len(actual), len(expected)) for i in range(len(expected)): np.testing.assert_equal(actual[i], expected[i]) def test_process_fastq_single_end_read_file_handles_log(self): """ process_fastq_single_end_read_file generates log when expected """ log = FakeFile() list(process_fastq_single_end_read_file(self.fastq1, self.barcode_fastq1, self.barcode_map1, min_per_read_length_fraction=0.45, log_f=log)) self.assertTrue(log.s.startswith("Quality filter results")) def test_process_fastq_single_end_read_file_handles_histogram(self): """ process_fastq_single_end_read_file generates histogram when expected """ histogram = FakeFile() list(process_fastq_single_end_read_file(self.fastq1, self.barcode_fastq1, self.barcode_map1, min_per_read_length_fraction=0.45, histogram_f=histogram)) self.assertTrue(histogram.s.startswith("Length")) def test_check_header_match_pre180(self): """check_header_match_pre180 functions as expected with varied input """ # match w illumina qual string self.assertTrue(check_header_match_pre180("@990:2:4:11272:5533#1/1", "@990:2:4:11272:5533#1/2")) self.assertTrue(check_header_match_pre180("@990:2:4:11272:5533#1/1", "@990:2:4:11272:5533#1/3")) # qual string differs (this is acceptable) self.assertTrue(check_header_match_pre180("@990:2:4:11272:5533#1/1", "@990:2:4:11272:5533#0/3")) # match wo illumina qual string self.assertTrue(check_header_match_pre180("@990:2:4:11272:5533/1", "@990:2:4:11272:5533/2")) self.assertTrue(check_header_match_pre180("@990:2:4:11272:5533/1", "@990:2:4:11272:5533/3")) # mismatch w illumina qual string self.assertFalse(check_header_match_pre180("@990:2:4:11272:5533#1/1", "@990:2:4:11272:5532#1/2")) self.assertFalse(check_header_match_pre180("@990:2:4:11272:5533#1/1", "@890:2:4:11272:5533#1/2")) # mismatch wo illumina qual string self.assertFalse(check_header_match_pre180("@990:2:4:11272:5533/1", "@990:2:4:11272:5532/2")) self.assertFalse(check_header_match_pre180("@990:2:4:11272:5533/1", "@890:2:4:11272:5533/2")) def test_check_header_match_180_or_later(self): """check_header_match_180_or_later functions as expected with varied input """ # identical self.assertTrue(check_header_match_180_or_later( "M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0", "M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0")) # identical except read number self.assertTrue(check_header_match_180_or_later( "M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0", "M00176:17:000000000-A0CNA:1:1:15487:1773 2:N:0:0")) # identical except read number self.assertTrue(check_header_match_180_or_later( "M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0", "M00176:17:000000000-A0CNA:1:1:15487:1773 3:N:0:0")) # different reads self.assertFalse(check_header_match_180_or_later( "M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0", "M00176:17:000000000-A0CNA:1:1:16427:1774 1:N:0:0")) def test_process_fastq_single_end_read_file_toggle_store_unassigned(self): """process_fastq_single_end_read_file handles store_unassigned """ fastq_f = [ "@990:2:4:11272:5533#1/1", "GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC", "+", "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"] barcode_fastq_f = [ "@990:2:4:11272:5533#1/2", "GAAAAAAAAAAT", "+", "bbbbbbbbbbbb"] barcode_to_sample_id = {'AAAAAAAAAAAA': 's1'} # empty results when store_unassigned=False actual = process_fastq_single_end_read_file(fastq_f, barcode_fastq_f, barcode_to_sample_id, store_unassigned=False, max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length_fraction=0.75, rev_comp=False, rev_comp_barcode=False, seq_max_N=0, start_seq_id=0) actual = list(actual) expected = [] self.assertEqual(actual, expected) # non-empty results when store_unassigned=True actual = process_fastq_single_end_read_file(fastq_f, barcode_fastq_f, barcode_to_sample_id, store_unassigned=True, max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length_fraction=0.75, rev_comp=False, rev_comp_barcode=False, seq_max_N=0, start_seq_id=0) actual = list(actual) expected = [( 'Unassigned_0 990:2:4:11272:5533#1/1 orig_bc=GAAAAAAAAAAT new_bc=GAAAAAAAAAAT bc_diffs=0', "GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC", np.array([34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 25, 32, 32, 28, 32, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 32, 34, 34, 34, 34, 33, 34, 32, 33, 32, 31, 27, 34, 33, 31, 33, 33, 29, 34, 30, 31, 34, 9, 23, 20, 20, 17, 30, 25, 18, 30, 21, 32], dtype=np.int8) , 0)] np.testing.assert_equal(actual, expected) def test_process_fastq_single_end_read_file_toggle_thirteen_base_barcodes( self): """process_fastq_single_end_read_file handles thriteen base reads of tweleve base barcodes """ fastq_f = [ "@990:2:4:11272:5533#1/1", "GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC", "+", "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"] barcode_fastq_f = [ "@990:2:4:11272:5533#1/2", "AAAAAAAAAAAAT", "+", "bbbbbbbbbbbbb"] barcode_to_sample_id = {'AAAAAAAAAAAA': 's1', 'TAAAAAAAAAAA': 's2'} # rev_comp = False actual = process_fastq_single_end_read_file(fastq_f, barcode_fastq_f, barcode_to_sample_id, store_unassigned=False, max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length_fraction=0.75, rev_comp=False, rev_comp_barcode=False, seq_max_N=0, start_seq_id=0) actual = list(actual) expected = [( 's1_0 990:2:4:11272:5533#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0', "GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC", np.array([34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 25, 32, 32, 28, 32, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 32, 34, 34, 34, 34, 33, 34, 32, 33, 32, 31, 27, 34, 33, 31, 33, 33, 29, 34, 30, 31, 34, 9, 23, 20, 20, 17, 30, 25, 18, 30, 21, 32], dtype=np.int8), 0)] np.testing.assert_equal(actual, expected) def test_process_fastq_single_end_read_file_toggle_rev_comp(self): """process_fastq_single_end_read_file handles rev_comp """ fastq_f = [ "@990:2:4:11272:5533#1/1", "GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC", "+", "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"] barcode_fastq_f = [ "@990:2:4:11272:5533#1/2", "AAAAAAAAAAAA", "+", "bbbbbbbbbbbb"] barcode_to_sample_id = {'AAAAAAAAAAAA': 's1'} # rev_comp = False actual = process_fastq_single_end_read_file(fastq_f, barcode_fastq_f, barcode_to_sample_id, store_unassigned=False, max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length_fraction=0.75, rev_comp=False, rev_comp_barcode=False, seq_max_N=0, start_seq_id=0) actual = list(actual) expected = [( 's1_0 990:2:4:11272:5533#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0', "GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC", np.array([34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 25, 32, 32, 28, 32, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 32, 34, 34, 34, 34, 33, 34, 32, 33, 32, 31, 27, 34, 33, 31, 33, 33, 29, 34, 30, 31, 34, 9, 23, 20, 20, 17, 30, 25, 18, 30, 21, 32], dtype=np.int8), 0)] np.testing.assert_equal(actual, expected) # rev_comp = True actual = process_fastq_single_end_read_file(fastq_f, barcode_fastq_f, barcode_to_sample_id, store_unassigned=False, max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length_fraction=0.75, rev_comp=True, rev_comp_barcode=False, seq_max_N=0, start_seq_id=0) actual = list(actual) expected = [( 's1_0 990:2:4:11272:5533#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0', "GGCTGGCTCCCCTTTCGGGGTTACCTCACCGACTTCGGGTGTTGCCGACTCTCGTGGTGTGACGGGCGGTGTGTGC", ascii_to_phred64("`U^RY^QTTWIb_^b]aa_ab[_`a`babbbb`bbbbbbbbbbbbb`\``Ybbbbbbbbbbbbbbbbbbbbbbbbb"), 0)] np.testing.assert_equal(actual, expected) def test_process_fastq_single_end_read_file_error_on_header_mismatch(self): """ValueError on barcode/read header mismatch """ fastq_f = [ "@990:2:4:11272:5533#1/1", "GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC", "+", "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"] barcode_fastq_f = [ "@990:2:4:11272:5532#1/2", "TTTTTTTTTTTT", "+", "bbbbbbbbbbbb"] barcode_to_sample_id = {'AAAAAAAAAAAA': 's1'} actual = process_fastq_single_end_read_file( fastq_f, barcode_fastq_f, barcode_to_sample_id, store_unassigned=False, max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length_fraction=0.75, rev_comp=False, rev_comp_barcode=False, seq_max_N=0, start_seq_id=0) self.assertRaises(FastqParseError, list, actual) def test_process_fastq_single_end_read_file_toggle_rev_comp_barcode(self): """process_fastq_single_end_read_file handles rev_comp_barcode """ fastq_f = [ "@990:2:4:11272:5533#1/1", "GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC", "+", "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"] barcode_fastq_f = [ "@990:2:4:11272:5533#1/2", "TTTTTTTTTTTT", "+", "bbbbbbbbbbbb"] barcode_to_sample_id = {'AAAAAAAAAAAA': 's1'} # empty results when rev_comp_barcode=False actual = process_fastq_single_end_read_file(fastq_f, barcode_fastq_f, barcode_to_sample_id, store_unassigned=False, max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length_fraction=0.75, rev_comp=False, rev_comp_barcode=False, seq_max_N=0, start_seq_id=0) actual = list(actual) expected = [] self.assertEqual(actual, expected) # non-empty results when rev_comp_barcode=True actual = process_fastq_single_end_read_file(fastq_f, barcode_fastq_f, barcode_to_sample_id, store_unassigned=False, max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length_fraction=0.75, rev_comp=False, rev_comp_barcode=True, seq_max_N=0, start_seq_id=0) actual = list(actual) expected = [( 's1_0 990:2:4:11272:5533#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0', "GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC", np.array([34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 25, 32, 32, 28, 32, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 32, 34, 34, 34, 34, 33, 34, 32, 33, 32, 31, 27, 34, 33, 31, 33, 33, 29, 34, 30, 31, 34, 9, 23, 20, 20, 17, 30, 25, 18, 30, 21, 32], dtype=np.int8), 0)] np.testing.assert_equal(actual, expected) # forward orientation no longer matches when rev_comp_barcode=True barcode_to_sample_id = {'TTTTTTTTTTTT': 's1'} actual = process_fastq_single_end_read_file(fastq_f, barcode_fastq_f, barcode_to_sample_id, store_unassigned=False, max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length_fraction=0.75, rev_comp=False, rev_comp_barcode=True, seq_max_N=0, start_seq_id=0) actual = list(actual) expected = [] self.assertEqual(actual, expected) def test_process_fastq_single_end_read_file_w_golay_correction(self): """process_fastq_single_end_read_file handles golay correction """ fastq_f = [ "@990:2:4:11272:5533#1/1", "GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC", "+", "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"] barcode_fastq_f = [ "@990:2:4:11272:5533#1/2", "ACAGACCACTCT", "+", "bbbbbbbbbbbb"] barcode_to_sample_id = {'ACAGACCACTCA': 's1'} # empty result with single barcode error and golay correction actual = process_fastq_single_end_read_file(fastq_f, barcode_fastq_f, barcode_to_sample_id, store_unassigned=False, max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length_fraction=0.75, rev_comp=False, rev_comp_barcode=False, seq_max_N=0, start_seq_id=0, barcode_correction_fn=decode_golay_12, max_barcode_errors=1.5) actual = list(actual) expected = [( 's1_0 990:2:4:11272:5533#1/1 orig_bc=ACAGACCACTCT new_bc=ACAGACCACTCA bc_diffs=1', "GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC", np.array([34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 25, 32, 32, 28, 32, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 32, 34, 34, 34, 34, 33, 34, 32, 33, 32, 31, 27, 34, 33, 31, 33, 33, 29, 34, 30, 31, 34, 9, 23, 20, 20, 17, 30, 25, 18, 30, 21, 32], dtype=np.int8), 0)] np.testing.assert_equal(actual, expected) # empty result with adjusted max_barcode_errors actual = process_fastq_single_end_read_file(fastq_f, barcode_fastq_f, barcode_to_sample_id, store_unassigned=False, max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length_fraction=0.75, rev_comp=False, rev_comp_barcode=False, seq_max_N=0, start_seq_id=0, barcode_correction_fn=decode_golay_12, max_barcode_errors=0.9) actual = list(actual) expected = [] self.assertEqual(actual, expected) def test_bad_chars_from_threshold(self): """bad_chars_from_threshold selects correct chars as bad """ exp1 = [ '\t', '\n', '\r', ' ', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B'] exp2 = ['\t', '\n', '\r', ' ', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~'] exp3 = [ '\t', '\n', '\r', ' ', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@'] self.assertEqual(bad_chars_from_threshold('B'), {}.fromkeys(exp1)) self.assertEqual(bad_chars_from_threshold(''), {}) self.assertEqual(bad_chars_from_threshold('~'), {}.fromkeys(exp2)) self.assertEqual(bad_chars_from_threshold('@'), {}.fromkeys(exp3)) def test_quality_filter_sequence_pass(self): """quality_filter_sequence functions as expected for good read """ header = "990:2:4:11271:5323#1/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length=75, seq_max_N=0, filter_bad_illumina_qual_digit=True) np.testing.assert_equal(actual, (0, "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"))) def test_quality_filter_illumina_qual(self): """quality_filter_sequence functions as expected with bad illumina qual digit """ # header with no qual data passes header = "990:2:4:11271:5323/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length=0.75, seq_max_N=0, filter_bad_illumina_qual_digit=True) np.testing.assert_equal(actual, (0, "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"))) # header with no qual data passes header = "990:2:4:11271:5323/0" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length=75, seq_max_N=0, filter_bad_illumina_qual_digit=True) np.testing.assert_equal(actual, (0, "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"))) # header with no qual data passes (old barcode in header format) header = "HWI-6X_9267:1:1:4:1699#ACCACCC/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length=75, seq_max_N=0, filter_bad_illumina_qual_digit=True) np.testing.assert_equal(actual, (0, "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"))) # bad qual fails filter header = "@HWI-ST753_50:6:1101:1138:1965#0/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length=75, seq_max_N=0, filter_bad_illumina_qual_digit=True) np.testing.assert_equal(actual, (3, "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"))) # bad qual passes filter if filter turned off header = "@HWI-ST753_50:6:1101:1138:1965#0/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length=75, seq_max_N=0, filter_bad_illumina_qual_digit=False) np.testing.assert_equal(actual, (0, "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"))) # good qual passes filter header = "@HWI-ST753_50:6:1101:1138:1965#1/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length=75, seq_max_N=0, filter_bad_illumina_qual_digit=True) np.testing.assert_equal(actual, (0, "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"))) def test_quality_filter_sequence_fail_w_B(self): """quality_filter_sequence handles bad qual score as expected """ # early 'B' in sequence causes truncation and too short of a read header = "990:2:4:11271:5323#1/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbBbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length=75, seq_max_N=0, filter_bad_illumina_qual_digit=True) np.testing.assert_equal( actual, (1, "GCACTCACCGCCCGTCAC", ascii_to_phred64("bbbbbbbbbbbbbbbbbb"))) # increasing max_bad_run_length rescues read header = "990:2:4:11271:5323#1/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbBbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=1, phred_quality_threshold=2, min_per_read_length=75, seq_max_N=0, filter_bad_illumina_qual_digit=True) np.testing.assert_equal(actual, (0, "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbBbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"))) # changing threshold rescues read header = "990:2:4:11271:5323#1/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbBbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=1, min_per_read_length=75, seq_max_N=0, filter_bad_illumina_qual_digit=True) np.testing.assert_equal(actual, (0, "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbBbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"))) # changing min_per_read_length_fraction rescues read header = "990:2:4:11271:5323#1/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbBbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length=5, seq_max_N=0, filter_bad_illumina_qual_digit=True) np.testing.assert_equal( actual, (0, "GCACTCACCGCCCGTCAC", ascii_to_phred64("bbbbbbbbbbbbbbbbbb"))) def test_quality_filter_sequence_fail_w_N(self): """quality_filter_sequence handles N as expected """ # 'N' in sequence causes failure header = "990:2:4:11271:5323#1/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTNGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length=75, seq_max_N=0, filter_bad_illumina_qual_digit=True) expected = (2, "GCACTCACCGCCCGTCACACCACGAAAGTNGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`")) np.testing.assert_equal(actual, expected) # increasing max N rescues sequence header = "990:2:4:11271:5323#1/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTNGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC" quality = \ "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length=75, seq_max_N=1, filter_bad_illumina_qual_digit=True) expected = (0, "GCACTCACCGCCCGTCACACCACGAAAGTNGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`")) np.testing.assert_equal(actual, expected) # truncation of N rescues sequence (sequence is truncated when # the quality hits B, and the truncated sequence is above the # length threshold and no longer contains an N) header = "990:2:4:11271:5323#1/1" sequence = \ "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTN" quality = \ "bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^B`" actual = quality_filter_sequence(header, sequence, ascii_to_phred64(quality), max_bad_run_length=0, phred_quality_threshold=2, min_per_read_length=50, seq_max_N=0, filter_bad_illumina_qual_digit=True) expected = (0, "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTG", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^")) np.testing.assert_equal(actual, expected) def test_create_forward_and_reverse_fp(self): """ perform different tests for extract_reads_from_interleaved """ # regular processing extract_reads_from_interleaved( self.create_forward_and_reverse_fp, '1:N:0', '2:N:0', self.temp_dir_path) forward = open(self.temp_dir_path + '/forward_reads.fastq', 'U').read() reverse = open(self.temp_dir_path + '/reverse_reads.fastq', 'U').read() self.assertEqual(forward, forward_reads) self.assertEqual(reverse, reverse_reads) # should raise an error due to no matching id with self.assertRaises(ValueError): extract_reads_from_interleaved( self.create_forward_and_reverse_fp, '1N', '2N', self.temp_dir_path) barcode_map1 = {'AAAAAAAAAAAA': 's1', 'AAAAAAAAAAAC': 's2', 'AAAAAAAAAAAG': 's3', 'AAAAAAAAAAAT': 's4', } fastq1 = """@990:2:4:11271:5323#1/1 GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC + bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U` @990:2:4:11271:5323#1/1 GGTTACCTTGTTACGACTTCACCCCAATCATCGGCCCCACCTTAGACAGCTGACTCCTAAAAGGTTATCTCACCGG + bbcbbbbbbbbbbbbbbbbbbbbbbbbbb_bbbbbbbbaba_b^bY_`aa^bPb`bbbbHYGYZTbb^_ab[^baT @990:2:4:11272:9538#1/1 GCACACACCGCCCGTCACACCATCCGAGTTGGAGGTACCCGAAGCCGGTAGTCTAACCGCAAGGAGGACGCTGTCG + b_bbbbbbbbbbbbbbbbbbbbbbbbbbabaa^a`[bbbb`bbbbTbbabb]b][_a`a]acaaacbaca_a^`aa @990:2:4:11272:9538#1/1 GGCTACCTTGTTACGACTTCACCCTCCTCACTAAACGTACCTTCGACAGCGTCCTCCTTGCGGTTAGACTACCGGC + bb^bbbbbbbbbbbbbbbbbbbbbbbabbbb``bbb`__bbbbbbIWRXX`R``\`\Y\^__ba^a[Saaa_]O]O @990:2:4:11272:7447#1/1 GCACACACCGCCCGTCACACCATCCGAGTTGGGGGTACCCGAAGCCGGCAGTCTAACCGCAAGGAGGACGCTGTCG + b`bbbbbbbbbbbbbbb`^bbbbbYbbbbb\___`_bbab^aaaU^\`BBBBBBBBBBBBBBBBBBBBBBBBBBBB @990:2:4:11272:7447#1/1 GGATACCTTGTTACGACTTCACCCTCCTCACTCATCGTACCCTCGACAGCGTCCTCCTTGCTGTTAGACTTCCGGC + b`bbbbbbbbbbbbbbb`^bbbbbYbbbbb\___`_bbab^aaaU^\`BBBBBBBBBBBBBBBBBBBBBBBBBBBB @990:2:4:11272:19991#1/1 GCACTCACCGCCCGTCACGCCACGGAAGCCGGCTGCACCTGAAGCCGGTGGGGCAACCGGCTGTCCCTTTTAGCGG + bbbbbbbbbbbbbbbbbbbbbXbbb_bbbabbb`aZ[U]\OTYXV`TbBBBBBBBBBBBBBBBBBBBBBBBBBBBB @990:2:4:11272:19991#1/1 GGCTACCTTGTTACGACTTCGCCCCAGTCACCGACCACACCCTCGACGGCTGCCTCCGGCTGGCCCTTTCCACCCA + bbbbbbbbbbbbbbbbbbbba`bbbbbbbbbb`abb_aacbbbbb]___]\[\^^[aOcBBBBBBBBBBBBBBBBB @990:2:4:11272:4315#1/1 GTACTCACCGCCCGTCACGCCATGGGAGTTGGGCTTACCTGAAGCCCGCGAGCTAACCGGAAAGGGGGGGATGTGG + bbbb_bbbbbbbbbb```Q```BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB @990:2:4:11272:4315#1/1 GGCTACCTTGTTACGACTTCACCCCCGTCGCTCGGCGTACCTTCGACCGCTGCCTCCTTTTGGTTATATCTCCGGG + ``Q``````_``````````K]]aBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB @990:2:4:11272:5533#1/1 GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC + ``Q``````_``````````K]]aBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB @990:2:4:11272:5533#0/1 GGATACCTTGTTACGACTTCACCCCAATCATCGACCCCACCTTCGGCGGCTGGCTCCCCTTTCGGGGGTACCTCAC + bbbbbbbbbbbbbbbbbbbbbXbbb_bbbabbb`aZ[U]\OTYXV`TbBBBBBBBBBBBBBBBBBBBBBBBBBBBB """ barcode_fastq1 = """@990:2:4:11271:5323#1/2 AAAAAAAAAAAA + bbbbbbbbbbbb @990:2:4:11271:5323#1/2 AAAAAAAAAAAC + bbcbbbbbbbbb @990:2:4:11272:9538#1/2 AAAAAAAAAAAA + b_bbbbbbbbbb @990:2:4:11272:9538#1/2 AAAAAAAAAAAT + bb^bbbbbbbbb @990:2:4:11272:7447#1/2 AAAAAAAAAAAA + b`bbbbbbbbbb @990:2:4:11272:7447#1/2 AAAAAAAAAAAA + b`bbbbbbbbbb @990:2:4:11272:19991#1/2 AAAAAAAAAAAC + bbbbbbbbbbbb @990:2:4:11272:19991#1/2 AAAAAAAAAAAC + bbbbbbbbbbbb @990:2:4:11272:4315#1/2 AAAAAAAAAAAT + bbbb_bbbbbbb @990:2:4:11272:4315#1/2 AAAAAAAAAAAT + ``Q``````_`` @990:2:4:11272:5533#1/2 GAAAAAAAAAAT + ``Q``````_`` @990:2:4:11272:5533#0/2 AAAAAAAAAAAT + bbbbbbbbbbbb """ fastq2 = """@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0 GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC + CCCCCCCCCCAAAAAAAAAAAAAAA:AA=ACCCCCCCCCCCCCACCCCBCABA@<CB@BB>C?@C*8552?:3?6A @M00176:17:000000000-A0CNA:1:1:17088:1773 1:N:0:0 GGTTACCTTGTTACGACTTCACCCCAATCATCGGCCCCACCTTAGACAGCTGACTCCTAAAAGGTTATCTCACCGG + CCDCCCCCCCCCCCCCCCCCCCCCCCCCC@CCCCCCCCBCB@C?C:@ABB?C1CACCCC):(:;5CC?@BC<?CB5 @M00176:17:000000000-A0CNA:1:1:16738:1773 1:N:0:0 GCACACACCGCCCGTCACACCATCCGAGTTGGAGGTACCCGAAGCCGGTAGTCTAACCGCAAGGAGGACGCTGTCG + C@CCCCCCCCCCCCCCCCCCCCCCCCCCBCBB?BA<CCCCACCCC5CCBCC>C><@BAB>BDBBBDCBDB@B?ABB @M00176:17:000000000-A0CNA:1:1:12561:1773 1:N:0:0 GGCTACCTTGTTACGACTTCACCCTCCTCACTAAACGTACCTTCGACAGCGTCCTCCTTGCGGTTAGACTACCGGC + CC?CCCAAAACCCCCCCCCCCCCCCCBCCCCAACCCA@@CCCCCC*8399A3AA=A=:=?@@CB?B<4BBB@>0>0 @M00176:17:000000000-A0CNA:1:1:14596:1773 1:N:0:0 GCACACACCGCCCGTCACACCATCCGAGTTGGGGGTACCCGAAGCCGGCAGTCTAACCGCAAGGAGGACGCTGTCG + CACCCCCCCCCCCCCCCA?CCCCC:CCCCC=@@@A@CCBC?BBB6?=A############################ @M00176:17:000000000-A0CNA:1:1:12515:1774 1:N:0:0 GGATACCTTGTTACGACTTCACCCTCCTCACTCATCGTACCCTCGACAGCGTCCTCCTTGCTGTTAGACTTCCGGC + CACCCCCCCCCCCCCCCA?CCCCC:CCCCC=@@@A@CCBC?BBB6?=A############################ @M00176:17:000000000-A0CNA:1:1:17491:1774 1:N:0:0 GCACTCACCGCCCGTCACGCCACGGAAGCCGGCTGCACCTGAAGCCGGTGGGGCAACCGGCTGTCCCTTTTAGCGG + CCCCCCCCCCCCCCCCCCCCC9CCC@CCCBCCCAB;<6>=05:97A5C############################ @M00176:17:000000000-A0CNA:1:1:16427:1774 1:N:0:0 GGCTACCTTGTTACGACTTCGCCCCAGTCACCGACCACACCCTCGACGGCTGCCTCCGGCTGGCCCTTTCCACCCA + CCCCCCCCCCCCCCCCCCCCBACCCCCCCCCCABCC@BBDCCCCC>@@@>=<=??<B0D################# @M00176:17:000000000-A0CNA:1:1:13372:1775 1:N:0:0 GTACTCACCGCCCGTCACGCCATGGGAGTTGGGCTTACCTGAAGCCCGCGAGCTAACCGGAAAGGGGGGGATGTGG + CCCC@CCCCCCCCCCAAA2AAA###################################################### @M00176:17:000000000-A0CNA:1:1:14806:1775 1:N:0:0 GGCTACCTTGTTACGACTTCACCCCCGTCGCTCGGCGTACCTTCGACCGCTGCCTCCTTTTGGTTATATCTCCGGG + AA2AAAAAA@AA####AAAA,>>B#################################################### @M00176:17:000000000-A0CNA:1:1:13533:1775 1:N:0:0 GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC + AA2AAAAAA@AAAAAAAAAA,>>B#################################################### @M00176:17:000000000-A0CNA:1:1:18209:1775 1:N:0:0 GGATACCTTGTTACGACTTCACCCCAATCATCGACCCCACCTTCGGCGGCTGGCTCCCCTTTCGGGGGTACCTCAC + CCCCCCCCCCCCCCCCCCCCC9CCC@CCCBCCCAB;<6>=05:97A5C############################ """ barcode_fastq2 = """@M00176:17:000000000-A0CNA:1:1:15487:1773 2:N:0:0 AAAAAAAAAAAA + AAAAAAAAAAAA @M00176:17:000000000-A0CNA:1:1:17088:1773 2:N:0:0 AAAAAAAAAAAC + AABAAAAAAAAA @M00176:17:000000000-A0CNA:1:1:16738:1773 2:N:0:0 AAAAAAAAAAAA + A>AAAAAAAAAA @M00176:17:000000000-A0CNA:1:1:12561:1773 2:N:0:0 AAAAAAAAAAAT + AA:AAAAAAAAA @M00176:17:000000000-A0CNA:1:1:14596:1773 2:N:0:0 AAAAAAAAAAAA + A?AAAAAAAAAA @M00176:17:000000000-A0CNA:1:1:12515:1774 2:N:0:0 AAAAAAAAAAAA + A?AAAAAAAAAA @M00176:17:000000000-A0CNA:1:1:17491:1774 2:N:0:0 AAAAAAAAAAAC + AAAAAAAAAAAA @M00176:17:000000000-A0CNA:1:1:16427:1774 2:N:0:0 AAAAAAAAAAAC + AAAAAAAAAAAA @M00176:17:000000000-A0CNA:1:1:13372:1775 2:N:0:0 AAAAAAAAAAAT + AAAA>AAAAAAA @M00176:17:000000000-A0CNA:1:1:14806:1775 2:N:0:0 AAAAAAAAAAAT + >>1>>>>>>;>> @M00176:17:000000000-A0CNA:1:1:13533:1775 2:N:0:0 GAAAAAAAAAAT + >>1>>>>>>;>> @M00176:17:000000000-A0CNA:1:1:18209:1775 2:N:0:0 AAAAAAAAAAAT + AAAAAAAAAAAA """ fastq1_expected_no_qual_unassigned = [ ("s1_0 990:2:4:11271:5323#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"), 0), ("s2_1 990:2:4:11271:5323#1/1 orig_bc=AAAAAAAAAAAC new_bc=AAAAAAAAAAAC bc_diffs=0", "GGTTACCTTGTTACGACTTCACCCCAATCATCGGCCCCACCTTAGACAGCTGACTCCTAAAAGGTTATCTCACCGG", ascii_to_phred64("bbcbbbbbbbbbbbbbbbbbbbbbbbbbb_bbbbbbbbaba_b^bY_`aa^bPb`bbbbHYGYZTbb^_ab[^baT"), 1), ("s1_2 990:2:4:11272:9538#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACACACCGCCCGTCACACCATCCGAGTTGGAGGTACCCGAAGCCGGTAGTCTAACCGCAAGGAGGACGCTGTCG", ascii_to_phred64("b_bbbbbbbbbbbbbbbbbbbbbbbbbbabaa^a`[bbbb`bbbbTbbabb]b][_a`a]acaaacbaca_a^`aa"), 2), ("s4_3 990:2:4:11272:9538#1/1 orig_bc=AAAAAAAAAAAT new_bc=AAAAAAAAAAAT bc_diffs=0", "GGCTACCTTGTTACGACTTCACCCTCCTCACTAAACGTACCTTCGACAGCGTCCTCCTTGCGGTTAGACTACCGGC", ascii_to_phred64("bb^bbbbbbbbbbbbbbbbbbbbbbbabbbb``bbb`__bbbbbbIWRXX`R``\`\Y\^__ba^a[Saaa_]O]O"), 3), ("s1_4 990:2:4:11272:7447#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACACACCGCCCGTCACACCATCCGAGTTGGGGGTACCCGAAGCCGGCAGTCTAACCGCAAGGAGGACGCTGTCG", ascii_to_phred64("b`bbbbbbbbbbbbbbb`^bbbbbYbbbbb\___`_bbab^aaaU^\`BBBBBBBBBBBBBBBBBBBBBBBBBBBB"), 4), ("s1_5 990:2:4:11272:7447#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GGATACCTTGTTACGACTTCACCCTCCTCACTCATCGTACCCTCGACAGCGTCCTCCTTGCTGTTAGACTTCCGGC", ascii_to_phred64("b`bbbbbbbbbbbbbbb`^bbbbbYbbbbb\___`_bbab^aaaU^\`BBBBBBBBBBBBBBBBBBBBBBBBBBBB"), 5), ("s2_6 990:2:4:11272:19991#1/1 orig_bc=AAAAAAAAAAAC new_bc=AAAAAAAAAAAC bc_diffs=0", "GCACTCACCGCCCGTCACGCCACGGAAGCCGGCTGCACCTGAAGCCGGTGGGGCAACCGGCTGTCCCTTTTAGCGG", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbXbbb_bbbabbb`aZ[U]\OTYXV`TbBBBBBBBBBBBBBBBBBBBBBBBBBBBB"), 6), ("s2_7 990:2:4:11272:19991#1/1 orig_bc=AAAAAAAAAAAC new_bc=AAAAAAAAAAAC bc_diffs=0", "GGCTACCTTGTTACGACTTCGCCCCAGTCACCGACCACACCCTCGACGGCTGCCTCCGGCTGGCCCTTTCCACCCA", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbba`bbbbbbbbbb`abb_aacbbbbb]___]\[\^^[aOcBBBBBBBBBBBBBBBBB"), 7), ("s4_8 990:2:4:11272:4315#1/1 orig_bc=AAAAAAAAAAAT new_bc=AAAAAAAAAAAT bc_diffs=0", "GTACTCACCGCCCGTCACGCCATGGGAGTTGGGCTTACCTGAAGCCCGCGAGCTAACCGGAAAGGGGGGGATGTGG", ascii_to_phred64("bbbb_bbbbbbbbbb```Q```BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"), 8), ("s4_9 990:2:4:11272:4315#1/1 orig_bc=AAAAAAAAAAAT new_bc=AAAAAAAAAAAT bc_diffs=0", "GGCTACCTTGTTACGACTTCACCCCCGTCGCTCGGCGTACCTTCGACCGCTGCCTCCTTTTGGTTATATCTCCGGG", ascii_to_phred64("``Q``````_``````````K]]aBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"), 9), ("Unassigned_10 990:2:4:11272:5533#1/1 orig_bc=GAAAAAAAAAAT new_bc=GAAAAAAAAAAT bc_diffs=0", "GCACACACCGCCCGTCACACCACGAGAGTCGGCAACACCCGAAGTCGGTGAGGTAACCCCGAAAGGGGAGCCAGCC", ascii_to_phred64("``Q``````_``````````K]]aBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"), 10), ("s4_11 990:2:4:11272:5533#0/1 orig_bc=AAAAAAAAAAAT new_bc=AAAAAAAAAAAT bc_diffs=0", "GGATACCTTGTTACGACTTCACCCCAATCATCGACCCCACCTTCGGCGGCTGGCTCCCCTTTCGGGGGTACCTCAC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbXbbb_bbbabbb`aZ[U]\OTYXV`TbBBBBBBBBBBBBBBBBBBBBBBBBBBBB"), 11)] fastq1_expected_default = [ ("s1_0 990:2:4:11271:5323#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"), 0), ("s2_1 990:2:4:11271:5323#1/1 orig_bc=AAAAAAAAAAAC new_bc=AAAAAAAAAAAC bc_diffs=0", "GGTTACCTTGTTACGACTTCACCCCAATCATCGGCCCCACCTTAGACAGCTGACTCCTAAAAGGTTATCTCACCGG", ascii_to_phred64("bbcbbbbbbbbbbbbbbbbbbbbbbbbbb_bbbbbbbbaba_b^bY_`aa^bPb`bbbbHYGYZTbb^_ab[^baT"), 1), ("s1_2 990:2:4:11272:9538#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACACACCGCCCGTCACACCATCCGAGTTGGAGGTACCCGAAGCCGGTAGTCTAACCGCAAGGAGGACGCTGTCG", ascii_to_phred64("b_bbbbbbbbbbbbbbbbbbbbbbbbbbabaa^a`[bbbb`bbbbTbbabb]b][_a`a]acaaacbaca_a^`aa"), 2), ("s4_3 990:2:4:11272:9538#1/1 orig_bc=AAAAAAAAAAAT new_bc=AAAAAAAAAAAT bc_diffs=0", "GGCTACCTTGTTACGACTTCACCCTCCTCACTAAACGTACCTTCGACAGCGTCCTCCTTGCGGTTAGACTACCGGC", ascii_to_phred64("bb^bbbbbbbbbbbbbbbbbbbbbbbabbbb``bbb`__bbbbbbIWRXX`R``\`\Y\^__ba^a[Saaa_]O]O"), 3), ("s1_4 990:2:4:11272:7447#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACACACCGCCCGTCACACCATCCGAGTTGGGGGTACCCGAAGCCGG", ascii_to_phred64("b`bbbbbbbbbbbbbbb`^bbbbbYbbbbb\___`_bbab^aaaU^\`"), 4), ("s1_5 990:2:4:11272:7447#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GGATACCTTGTTACGACTTCACCCTCCTCACTCATCGTACCCTCGACA", ascii_to_phred64("b`bbbbbbbbbbbbbbb`^bbbbbYbbbbb\___`_bbab^aaaU^\`"), 5), ("s2_6 990:2:4:11272:19991#1/1 orig_bc=AAAAAAAAAAAC new_bc=AAAAAAAAAAAC bc_diffs=0", "GCACTCACCGCCCGTCACGCCACGGAAGCCGGCTGCACCTGAAGCCGG", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbXbbb_bbbabbb`aZ[U]\OTYXV`Tb"), 6), ("s2_7 990:2:4:11272:19991#1/1 orig_bc=AAAAAAAAAAAC new_bc=AAAAAAAAAAAC bc_diffs=0", "GGCTACCTTGTTACGACTTCGCCCCAGTCACCGACCACACCCTCGACGGCTGCCTCCGG", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbba`bbbbbbbbbb`abb_aacbbbbb]___]\[\^^[aOc"), 7), ("s4_8 990:2:4:11272:5533#0/1 orig_bc=AAAAAAAAAAAT new_bc=AAAAAAAAAAAT bc_diffs=0", "GGATACCTTGTTACGACTTCACCCCAATCATCGACCCCACCTTCGGCG", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbXbbb_bbbabbb`aZ[U]\OTYXV`Tb"), 8)] fastq1_expected_single_barcode = [ ("s1_0 990:2:4:11271:5323#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbbbbbY``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"), 0), ("s1_1 990:2:4:11271:5323#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GGTTACCTTGTTACGACTTCACCCCAATCATCGGCCCCACCTTAGACAGCTGACTCCTAAAAGGTTATCTCACCGG", ascii_to_phred64("bbcbbbbbbbbbbbbbbbbbbbbbbbbbb_bbbbbbbbaba_b^bY_`aa^bPb`bbbbHYGYZTbb^_ab[^baT"), 1), ("s1_2 990:2:4:11272:9538#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACACACCGCCCGTCACACCATCCGAGTTGGAGGTACCCGAAGCCGGTAGTCTAACCGCAAGGAGGACGCTGTCG", ascii_to_phred64("b_bbbbbbbbbbbbbbbbbbbbbbbbbbabaa^a`[bbbb`bbbbTbbabb]b][_a`a]acaaacbaca_a^`aa"), 2), ("s1_3 990:2:4:11272:9538#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GGCTACCTTGTTACGACTTCACCCTCCTCACTAAACGTACCTTCGACAGCGTCCTCCTTGCGGTTAGACTACCGGC", ascii_to_phred64("bb^bbbbbbbbbbbbbbbbbbbbbbbabbbb``bbb`__bbbbbbIWRXX`R``\`\Y\^__ba^a[Saaa_]O]O"), 3), ("s1_4 990:2:4:11272:7447#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACACACCGCCCGTCACACCATCCGAGTTGGGGGTACCCGAAGCCGG", ascii_to_phred64("b`bbbbbbbbbbbbbbb`^bbbbbYbbbbb\___`_bbab^aaaU^\`"), 4), ("s1_5 990:2:4:11272:7447#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GGATACCTTGTTACGACTTCACCCTCCTCACTCATCGTACCCTCGACA", ascii_to_phred64("b`bbbbbbbbbbbbbbb`^bbbbbYbbbbb\___`_bbab^aaaU^\`"), 5), ("s1_6 990:2:4:11272:19991#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACTCACCGCCCGTCACGCCACGGAAGCCGGCTGCACCTGAAGCCGG", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbXbbb_bbbabbb`aZ[U]\OTYXV`Tb"), 6), ("s1_7 990:2:4:11272:19991#1/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GGCTACCTTGTTACGACTTCGCCCCAGTCACCGACCACACCCTCGACGGCTGCCTCCGG", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbba`bbbbbbbbbb`abb_aacbbbbb]___]\[\^^[aOc"), 7), ("s1_8 990:2:4:11272:5533#0/1 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GGATACCTTGTTACGACTTCACCCCAATCATCGACCCCACCTTCGGCG", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbXbbb_bbbabbb`aZ[U]\OTYXV`Tb"), 8)] fastq2_expected_default = [ ("s1_0 M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACTCACCGCCCGTCACACCACGAAAGTTGGTAACACCCGAAGCCGGTGAGATAACCTTTTAGGAGTCAGCTGTC", ascii_to_phred64("bbbbbbbbbb```````````````Y``\`bbbbbbbbbbbbb`bbbbab`a`_[ba_aa]b^_bIWTTQ^YR^U`"), 0), ("s2_1 M00176:17:000000000-A0CNA:1:1:17088:1773 1:N:0:0 orig_bc=AAAAAAAAAAAC new_bc=AAAAAAAAAAAC bc_diffs=0", "GGTTACCTTGTTACGACTTCACCCCAATCATCGGCCCCACCTTAGACAGCTGACTCCTAAAAGGTTATCTCACCGG", ascii_to_phred64("bbcbbbbbbbbbbbbbbbbbbbbbbbbbb_bbbbbbbbaba_b^bY_`aa^bPb`bbbbHYGYZTbb^_ab[^baT"), 1), ("s1_2 M00176:17:000000000-A0CNA:1:1:16738:1773 1:N:0:0 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACACACCGCCCGTCACACCATCCGAGTTGGAGGTACCCGAAGCCGGTAGTCTAACCGCAAGGAGGACGCTGTCG", ascii_to_phred64("b_bbbbbbbbbbbbbbbbbbbbbbbbbbabaa^a`[bbbb`bbbbTbbabb]b][_a`a]acaaacbaca_a^`aa"), 2), ("s4_3 M00176:17:000000000-A0CNA:1:1:12561:1773 1:N:0:0 orig_bc=AAAAAAAAAAAT new_bc=AAAAAAAAAAAT bc_diffs=0", "GGCTACCTTGTTACGACTTCACCCTCCTCACTAAACGTACCTTCGACAGCGTCCTCCTTGCGGTTAGACTACCGGC", ascii_to_phred64("bb^bbb````bbbbbbbbbbbbbbbbabbbb``bbb`__bbbbbbIWRXX`R``\`\Y\^__ba^a[Saaa_]O]O"), 3), ("s1_4 M00176:17:000000000-A0CNA:1:1:14596:1773 1:N:0:0 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GCACACACCGCCCGTCACACCATCCGAGTTGGGGGTACCCGAAGCCGG", ascii_to_phred64("b`bbbbbbbbbbbbbbb`^bbbbbYbbbbb\___`_bbab^aaaU^\`"), 4), ("s1_5 M00176:17:000000000-A0CNA:1:1:12515:1774 1:N:0:0 orig_bc=AAAAAAAAAAAA new_bc=AAAAAAAAAAAA bc_diffs=0", "GGATACCTTGTTACGACTTCACCCTCCTCACTCATCGTACCCTCGACA", ascii_to_phred64("b`bbbbbbbbbbbbbbb`^bbbbbYbbbbb\___`_bbab^aaaU^\`"), 5), ("s2_6 M00176:17:000000000-A0CNA:1:1:17491:1774 1:N:0:0 orig_bc=AAAAAAAAAAAC new_bc=AAAAAAAAAAAC bc_diffs=0", "GCACTCACCGCCCGTCACGCCACGGAAGCCGGCTGCACCTGAAGCCGG", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbXbbb_bbbabbb`aZ[U]\OTYXV`Tb"), 6), ("s2_7 M00176:17:000000000-A0CNA:1:1:16427:1774 1:N:0:0 orig_bc=AAAAAAAAAAAC new_bc=AAAAAAAAAAAC bc_diffs=0", "GGCTACCTTGTTACGACTTCGCCCCAGTCACCGACCACACCCTCGACGGCTGCCTCCGG", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbba`bbbbbbbbbb`abb_aacbbbbb]___]\[\^^[aOc"), 7), ("s4_8 M00176:17:000000000-A0CNA:1:1:18209:1775 1:N:0:0 orig_bc=AAAAAAAAAAAT new_bc=AAAAAAAAAAAT bc_diffs=0", "GGATACCTTGTTACGACTTCACCCCAATCATCGACCCCACCTTCGGCG", ascii_to_phred64("bbbbbbbbbbbbbbbbbbbbbXbbb_bbbabbb`aZ[U]\OTYXV`Tb"), 8)] forward_reads = """@MISEQ03:64:000000000-A2H3D:1:1101:14358:1530 1:N:0:TCCACAGGAGT TNCAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAATCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGTAGCGGTGAAATGCGTAGATATAGGAAGGAACACCAGTGGCGAAGGCGACCACCTGGACTGAAACTGACACTGAGGGGCGAAAGCGGGGGGGGCAAACG + ?#5<????DDDDDDDDEEEEFFHHHHHHHHHHHHHHDCCHHFGDEHEH>CCE5AEEHHHHHHHHHHHHHHHHHFFFFHHHHHHEEADEEEEEEEEEEEEEEEEEEEEEEE?BEEEEEEEEEEEAEEEE0?A:?EE)8;)0ACEEECECCECAACEE?>)8CCC?CCA8?88ACC*A*::A??:0?C?.?0:?8884>'.''..'0?8C?C**0:0::?ECEE?############################ @MISEQ03:64:000000000-A2H3D:1:1101:14206:1564 1:N:0:TCCACAGGAGT TACGTAGGGTGCGAGCGTTAATCGGAATTACTGGGCGTAAAGCGTGCGCAGGCGGTTTTGTAAGTCAGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCGTTTGAAACTACAAGGCTAGAGTGTAGCAGAGGGGGGTAGAATTCCACGTGTAGCGGTGAAATGCGTAGAGATGGGGAGGAATACCAATGGCGAAGGCAGCCCCCGGGGTTAACACTGACGCCAAGGCACGAAAGCGGGGGGGGCAAACG + ?????BB?DDDDDD@DDCEEFFH>EEFHHHHHHGHHHCEEFFDC5EECCCCCCDECEHF;?DFDDFHDDBBDF?CFDCCFEA@@::;EEEEEEEECBA,BBE?:>AA?CA*:**0:??A:8*:*0*0**0*:?CE?DD'..0????:*:?*0?EC*'.)4.?A***00)'.00*0*08)8??8*0:CEE*0:082.4;**?AEAA?############################################# @MISEQ03:64:000000000-A2H3D:1:1101:14943:1619 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGTAGCGGTGAAATGCGTAGATATAGGAAGGAACACCAGGGGCGAAGGCGACCACCTGGACTGAAACTGACACTGAGGGGCGAAAGCGGGGGGGGCAAAAG + ?AAAABBADDDEDEDDGGGGGGIIHHHIIIIIIIHHHCCHHFFEFHHHDCDH5CFHIIHIIIIHHHHHHHHHHHHHHHHHHHGGGEGGGGDDEGEGGGGGGGGGGGGGGEEEGCCGGGGGGEGCEEEECE?ECGE.84.8CEEGGGE:CCCC0:?C<8.48CC:C??.8.8?C:*:0:*9)??CCEE**)0'''42<2C8'8..8801**0*.1*1?:?EEEC?########################### @MISEQ03:64:000000000-A2H3D:1:1101:15764:1660 1:N:0:TCCACAGGAGT TACGAAGGGGGCTAGCGTTGCTCGGAATCACTGGGCGTAAAGCGCACGTAGGCGGATTGTTAAGTCAGGGGTGAAATCCTGGAGCTCAACTCCAGAACTGCCTTTGATACTGGCGATCTTGAGTCCGGGAGAGGTGAGTGGAACTGCGAGTGTAGAGGTGAAATTCGTAGATATTCGCAAGAACACCAGTGGCGAAGGCGGCTCACTGGCCCGGAACTGACGCTGAGGGGCGAAAGCTGGGGGAGCAAACG + ???????@DDDDDDBDFEEFEFHEHHHHHHHHHHHHHEHHHHFEHHHHAEFHGEHAHHHHHHHHHHHHHHH=@FEFEEFEFEDAEEEFFE=CEBCFFFCECEFEFFFCEEEFFCD>>FFFEFF*?EED;?8AEE08*A*1)?E::???;>2?*01::A?EEEFEEE?:C.8:CE?:?8EE8AECEFE?C0::8'488DE>882)*1?A*8A######################################## @MISEQ03:64:000000000-A2H3D:1:1101:15211:1752 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAATCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGGGGTGGAATTTCCTGTGTAGCGGGGAAATGCGTAGATATGGGAAGGAACACCAGGGGCGAAGGCGACCACCTGGACTGATACTGACACTGGGGTGGGAAAGGGGGGGGAGGAAAAG + ?????<B?DBBDDDBACCEEFFHFHHHHHHHHHHHHH5>EFFFEAACEC7>E5AFEFHHHHHHF=GHFGHFHHHHFHFHH;CED8@DDDE=4@EEEEECE=CECECEECCBB34,=CAB,40:?EEEE:?AAAE8'.4'..8*:AEEECCCA::A################################################################################################ @MISEQ03:64:000000000-A2H3D:1:1101:15201:1774 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAATCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGTAGCGGGGAAATGCGTAGATATAGGAAGGAACACCAGTGGCGAAGGCGACCACCTGGACTGATACTGACACTGAGGTGCGAAAGCGGGGGGGGCAAACG + ?????BB?DDDDDDBDEEEEFFHFHHHHHHHHHHHFH>CEHDGDDCDE5CCEACFHHHHHHHHFFHHHHHHHHFHHFHHHHHHDEBFEEE@DEEEEEEEEEEEEEEBBCBECEEEEEEEEEEEEEEE?ACCEEEA)84)0.?EEE:AEACA?0?CEDD'.4?A:ACA)0'80:A:?*0*0)8CEEEEE?*0*)88888A'.5;2A)*0000*8:*0:?CEEEE?A*?A####################### @MISEQ03:64:000000000-A2H3D:1:1101:15976:1791 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTCGTTAAGTTGGATGTGAAATCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGTAGCGGGGAAATGCGTAGATATGGGAAGGAACACCGGGGGGGAGGGGGGCTCTCGGGTCCTTTTCGGCGGCTGGGGGCGGAAGGCAGGGGGGGCAACCG + ?????BB?DDDDDDDDEEEEFFIFHHHHHHIIIHIFHCCHF@F>CECHCDDECCFEADEHHHHHHHHFGHHHHHHFHHHHHHF8:<DEEEADEEFFFFFFABEFFEFFECBCEEFEFFFFEACEEEEE*10*1??.08.888AEF?EEEC1:1:??>>'88AC?::?AA################################################################################## @MISEQ03:64:000000000-A2H3D:1:1101:16031:1840 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGTAGCGGGGAAATGCGTAGATATAGGAAGGAACACCAGGGGCGAAGGCGACCACCTGGACGGATACTGACACTGAGGGGCGAAAGGGTGGGGAGAAAAAG + ?????BB?DDDDDDDDGFEGGGIHHHHIIIIIIIHFE>CFFFFDCHCH>>CE-5EEIIHHHIHHHHHHHHHHGHFDFHFHEHGBEEEEGGEDGGGGEGGEGGGGGCEGCCEEGGG><CEECCGCEEEG?C:1?EG.8<.88CCCEEGE?C?C*:1:<>'.8?8:C:?00.0?:?*1::*9CC?EEEG*?############################################################## @MISEQ03:64:000000000-A2H3D:1:1101:12964:1852 1:N:0:TCCACAGGAGT TNCAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGCAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGAAGCGGGGAAATGCGTAGATATAGGAAGGAACACCAGGGGCGAAGGCAACCACCGGGACTGAAACTGAACCGGAGGGGGGAAAGCGGGGGGGGAAAACG + ?#55????DDDDDDDDEEEEFFHHBHHHFFGHHFHDC+5>C/?E7DCHCCCD5AECFHHHFHHHHHHHHHFFFFFHFFDFEFF5)@=DEFDEFEEFF;AEAABC,4BECCC=B,5?C8?CC?CC*:?E010:?EA)0.)08C?A:?A######################################################################################################## @MISEQ03:64:000000000-A2H3D:1:1101:17245:1906 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGTAGCGGTGAAATGCGTAGATATAGGAGGGAACACAAGGGGCGAAGGCGACCACCGGGACGGAAACTGCAACTGGGGGGGGAAAGCGGGGGGGGAAACAG + AAA??BB?DDDDDDDDGGEGGGIHGHIIIIIIHF?CFCCDFFFDCHEHC>DH5AFEHIHHHHHHHHHHHHHHFFFFFHHHHHGDBEEGGGGGGG@EGEGGGGGGGCGEGACC>EGEGGGGC:C0CEEG:0::CEE)88)08?:CECCE:C*10*104A.4CE:*:?C8)'8CC############################################################################## @MISEQ03:64:000000000-A2H3D:1:1101:18171:1983 1:N:0:TCCACAGGAGT GACGTAGGAGGCGAGCGTTGTCCGGATTCATTGGGCGTAAAGAGCGCGCAGGCGGCTTGGTAAGTCGGATGTGAAATCCCGAGGCTCAACCTCGGGTCTGCATCCGATACTGCCCGGCTAGAGGTAGGTAGGGGAGATCGGAATTCCTGGTGTAGCGGTGAAATGCGCAGATATCAGGAGGAACACCGGGGGCGAAGGCGGATCTCTGGGCCTTCCCTGACGCTCAGGCGCGAAAGCGGGGGGGGCGAACG + ??????B?DDDDDDDDFFEFFFIHFEEEHHIHHHFHHEHHFGFFFHCEHEHCDECCEFFE4DDFDBEEEEEFFFFEEFFCE8B>BEFEEFFCEFE>8>EFFE*A?A?ADDAAEE8E>;>EA:??1*:?111?C<88AA08?ACECF:*:?*08:0:8<.4?EE*A:))'..0*01*?:08?A*?CA################################################################# @MISEQ03:64:000000000-A2H3D:1:1101:14225:2009 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGTAGCGGTGAAATGCGTAGATATAGGAAGGAACACCAGTGGCGAAGGCGACCACCTGGACTGAAACGGACACTGAGGGGCGAAAGCGGGGGGGGCAAACG + ?????BB?DDDDDDBDEEEEFFHHHHIIIIHHIIIIHHEHIFGEHHHHCCEHAEFHIIHIIIIHHHHHHHHHHFHHHHHHHHFFFEFFFFFEFFFFFFEFFFFFFEFFFEFCACEFFFFFF:C?CEEE*?:AAEE88;088?AEFCEAEECEEEFE>?).?ECCEEE8?4AFFE0?*0088ACFFFAAC*0?C888>>CD?D;8CE*:*:A?CF*::)0?DD?:::?######################## @MISEQ03:64:000000000-A2H3D:1:1101:16656:2052 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGTAGCGGTGAAATGCGTAGATATAGGAAGGAACACCAGGGGCGAAGGCGACCACCGGGACTGAAACTGACACTGAGGGGGGAAAGCGGGGGGGGAAAACA + ?????BB?BDDDDDDDGFFEGGIIHHIIIIIHHHHIHCCFFDEEEHEHFFEH5AFHHIHIHIHGGHHHHHHHFHHFHHHHHHGEG@EGEGGEGGGGCEGGEGGGGEGGACECGGGGGGGGEGGCCEGG?CCCEGC088)0.?EGG?EC*::C*:??<8.48?C:?C808.8CEE############################################################################# @MISEQ03:64:000000000-A2H3D:1:1101:18209:2060 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCACGTAGGCGGCTCAGCAAGTCAGGGGTGAAATCCCGGGGCTCAACCCCGGAACTGCCCTTGAAACTGCTAAGCTAGAATCTTGGAGAGGCGAGTGGAATTCCGAGTGTAGAGGGGAAATTCGTAGATATTCGGAAGAACACCAGGGGCGAAGGCGACCCCCTGGACAAGCATTGACGCTGAGGGGGGAAAGCGGGGGGGGCAAAAG + ?????BB?BDDDDDDDECEEFFHHHHAHFHHHHHHHHCCHHH=@DEEHFHFCGHHB)?ECGHHH?DHHHHHCCCFFHHHFEEEEEEEEEEEEEB)>EDACEECEECEEECEE:*0A:AEAECA:0::ACE??E?.8'4.88?EC*00:08).0:*00?)..8AAAAA*0)0::?::?0A8)?C:?A################################################################# @MISEQ03:64:000000000-A2H3D:1:1101:13824:2095 1:N:0:TCCACAGGAGT TACGTAGGGGGCTAGCGTTGTCCGGAATCATTGGGCGTAAAGCGCGTGTAGGCGGCCCGGTAAGTCCGCTGTGAAAGTCGGGGGCTCAACCCTCGAAAGCCGGGGGATACTGTCGGGCTAGAGTACGGAAGAGGCGAGTGGAATTCCTGGTGTAGCGGTGAAATGCGCAGATATCAGGAGGAACACCCATTGCGAAGGCAGCTCGCTGGGACGTTACTGAGGCTGAGACCGGAAAGGGGGGGGGGCAAAAG + ??A??BBADDDDDDBDFEEFFFHHHHHFHHHIHHFHHCCHHFHDCCDEEHHFIHAHHHHH@EFFDFFEBDEDEFFECBBEEEED?28CCFFECE;EF8?ECD;'488?EEFCE:A>>?>EECEE::A8E8.8?8).'.'08AEE*?:*::*001:?<D.'8??*:*))'''01***10*088CEEEEA8C############################################################# @MISEQ03:64:000000000-A2H3D:1:1101:17839:2106 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGTAGCGGTGAAATGCGTAGATATAGGAAGGAACACCAGTGGCGAAGGCGACAACCGGGACGGAAACTGACACTGAGGGGCGAAAGCGGGGGGGGCAAAAG + AAA?ABB?DDDDDDDEGFEGGFHIHHIIIIIIIIDFH5CFHHGHEH=DC>CE5AEEHFHIHIFHHHHHHHHHFHHFHHHHHHGGGGGEEGGGGGDEGGGGGGGGGGGGGCE>AEGEGGGGEEECEGEE1:??CEC08>.88CEEECG*:C?CC:?0.4.4CE?CECC?)4?CC:*11?:?)CCEGG).9*1:?8<2<<C#################################################### @MISEQ03:64:000000000-A2H3D:1:1101:17831:2124 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGGGGGGGAATTTCCTGTGTAGCGGTGAAATGCGTAGATATAGGAAGGAACACCAGTGGCGAAGGCGACCACCTGGACTGAAACTGACACTGAGGGGCGAAAGCGGGGGGAGCAAACG + AAAAABB?DDDDDDDDGFGFGGIIHHIIIIIHIIDFH>CFHHGDCFDH>CDHAEFEHIEFFGGHHHHHHHFH=CFFHHHHEHG8DEEGEGGGGGDEEEEGEEGGGCGGEEECCACCEGGGCEE::?CE0?CCEGE'.<'..4CEGEGGEEEE*::C>20>?C?*1:C..'8:??*:*?*0)??9CEG8?*1*8'4.44?58<28?C############################################# @MISEQ03:64:000000000-A2H3D:1:1101:12555:2129 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTCGTTAAGTTGGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACGAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGTAGCGGTGAAATGCGTAGATATAGGAAGGAACACCAGTGGCGAAGGCGACCACCTGGACGGAAACTGACACTGAGGTGCGAAAGCGTGGGGACCAACCG + ????ABBADDDDDEDDGGGGGGIIHHIIIIIHIIHHHFFHHHFHHHHH>CDHAEFHFGHHFHHHHHHHHHFHHFHFFHHHHHGBEEAD;DGGGGEGGGCGCEEEGEGGGCE>>>CEDGDGE:C:CGGG:?C??EE08<)0?ECEGEGCCECEEGGGGG08CECE?CE8)4?CC:*:*:0989*9CEC8C*:?C'842.8'.4.2?E9?*:?'.8).::::?CC:*110*0C8C<8??C############# @MISEQ03:64:000000000-A2H3D:1:1101:13627:2170 1:N:0:TCCACAGGAGT GACAGAGGGTGCAAACGTTGTTCGGAATTACTGGGCATAAAGAGCACGTAGGTGGTCTGCTAAGTCACGTGTGAAATCCCCCGGCTCAACCGGGGAATTGCGCGTGATACTGGCCGGCTCGAGGTGGGTAGGGGGGAGCGGAACTCCAGGGGGAGCGGGGAAATGCGTAGATATCTGGAGGAACACCGGGGGCGAAAGCGGCTCACGGGACCCAATCTGACACTGAGGGGCGAAAGCTAGGGTGGCAAACG + ?????BB?DDDDDDDDEFFFFFHHHHHIHIIHIIFHCEHIIHBFHIHHAAFH5CF@FHHHGHIIGHHHHFHIHIIIHIIIHHHHHHHHHHFHHHFFEFEFEDBE<>BBEEFECECE?D'..2AD)8A>40?AED''''.4<D>>AC**1?).2'888D'5<EACEEEAEDEFEE:*??*08A?AAC)58'4>2<>D8A:A82'.*:*.'?>E)AA#################################### @MISEQ03:64:000000000-A2H3D:1:1101:11781:2223 1:N:0:TCCACAGGAGT TACGTAGGGCGCAAGCGTTATCCGGAATTATTGGGCGTAAAGAGCTCGTAGGCGGTTTGTCGCGTCTGCCGTGAAAGTCCGGGGCTCAACTCCGGATCTGCGGTGGGTACGGGCAGACTAGAGTGATGTAGGGGAGATTGGAATTCCTGGTGTAGCGGGGAAATGCGCAGATATCAGGAGGAACACCGATGGCGAAGGCAGGTCTCTGGGCATTAACTGACGCTGAGGAGCGAAAGCAGGGGGGGCGAACG + ???A?BB?DDDDDDDDEEECFFHHHHHIHHHIIIHHHECEHFCFHGGH>CFEFEHHHHHFFDFHCDEFFHHEBFFFF?BBEEEEEEEFFFBEEEEAEDEFEDD.8A8.ACEDDD;AEFFFFEF:*1:?ACCFFD8<AE?EFFFF:EEEEFFFA:CEDD'.8??CEF?ADDDFF:C:?::?AEEFFFA>8'08:2448DE?E?8:*:*1A***0*:AA*?AEEEEE?######################### @MISEQ03:64:000000000-A2H3D:1:1101:17996:2254 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGTAGCGGGGAAATGCGTAGATATAGGAAGGAACACCAGGGGCGAAGGCGACCACCGGGCCTGAAACTGACACTGAGGGGGGAAAGCGGGGGGGGAAAACG + ?????BB?DDDDDDDDGGGGGGIHHHHIIIIHHHFFH>CHFHGHHHEHCCCE5AFEHIHHHHHHHHHHHHHHHHHHHHHHHHGGEEGGEGEGGGEGEGGGCGGGGGGGECGEECGAECGGEEEC**CE?C::CCC.8<)08?CCC:CCCEC?CC?:8>'4<?.1C:8082CCGG*:*:0C8?EC*0C89.?############################################################ @MISEQ03:64:000000000-A2H3D:1:1101:13712:2276 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGTGGAATTTCCTGTGTAGCGGGGAAATGCGTAGATATAGGAAGGAACACCAGTGGCGAAGGCGACCACCTGGACTGATACTGACACTGAGGGGGGAAAGCGGGGGGGGAAAACG + ?????BBADDDDDDDDGGEGGGIHHHHIIIIIIIIIHCCHHDCECHEHCDEH-AFEHIHIHHIHHHHHHHHHHHFFFHHHHHGEGEDDEEDDDGGGGGEGGGGGEEEGEEGEGGGGGGGCEGEGCEGG:C::CEE)88.8?EGGG:C?:?:C??:*52'.888:CEE).2CCGE*C??:C.?EGGGGC9*118>>.4>C''.8<CC*?*:**00*01?:CEGCC########################### @MISEQ03:64:000000000-A2H3D:1:1101:15819:2301 1:N:0:TCCACAGGAGT TACGTAGGGTGCGAGCGTTAATCGGAATTACTGGGCGTAAAGCGTGCGCAGGCGGTGAGTTAAGTCTGCTGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGGGGAATTTCCTGTGTAGCGGGGAAATGCGTAGATATAGGAAGGAACACCAGGGGCGAAGGCGACCACCTGGACTGAAACTGACACTGAGGGGCGAAAGCGGGGGGGGCAAACG + ?????BB?DEDDDDDDFDEEFFHHHEFHHIIIIIIHHCHHFHHHCEEACCHHHEH)<<CCDGFFDFFFBCB@DFHFHFHHEEFB8@EEEFFEEFFFFFFFFFEFCEFFFCAAC?EF??AC???0*:?C*:::?EE)0>'.42AAECEFE:*0:AAC?D'..8C?:?A)).0001*11::??8A**?A################################################################ @MISEQ03:64:000000000-A2H3D:1:1101:11958:2304 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAAGCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGTGGGGGAATTTCCTGTGTAGCGGGGAAATGCGTAGATATAGGAAGGAACACCAGGGGCGAAGGCGACCACCGGGACTGAAACTGACACTGAGGGGCGAAAGCGTGGGGGGCAAACG + ????ABBADDDDDDDDEEEEFFHHHHHIFHHIIIHFEECEFGDECECE5>C:55EEHIHIFGHFGHHHHHFHFFHHFHHHHHFBFEEDEEFFFFEFFFEFEFABEEFFFEEBEFFEFF=::AE*:AEE0?:?CFE8A>'.<?EEE??E?A??CEEF<>'.8AC?ECE)848?0**::AAC???EEE)*0)084'48<'8'882<CA).2<408?*1:??EEE############################# @MISEQ03:64:000000000-A2H3D:1:1101:19110:2311 1:N:0:TCCACAGGAGT TACAGAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGCGCGTAGGTGGTTTGTTAAGTTGGATGTGAAATCCCCGGGCTCAACCTGGGAACTGCATTCAAAACTGACAAGCTAGAGTATGGTAGAGGGGGGTGGAATTTCCTGTGTAGCGGTGAAATGCGTAGATATAGGAAGGAACACCAGTGGCGAAGGCGACCACCTGGACTAAAACTGACACTGAGGGGCGAAAGCGGGGGGGGCAAACG + ????9BB?BBBBBBBBEEECFAFHHHHHHHFHHHHHHCCEFF/CDCCE>DCH5AECFHHHFHHHHHHHHHHHGFHHCFHHHHHEEEDEDEED@EEEEEEEEEEEEEEEEE;;BEEE?EEEEEE?*?CA?EE::?8'.''..?CEE*::/:?A:C?E??82?CCEEEE))4?EEEEA:?*80?AEEC################################################################# """ # For reference. Data used to make the 'joined_reads' reference string. reverse_reads = """@MISEQ03:64:000000000-A2H3D:1:1101:14358:1530 2:N:0:TCCACAGGAGT ACGGACTACAAGGGTTTCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTCAGTGTCAGTATCAGTCCAGGTGGTCGCCTTCGCCACTGGTGTTCCTTCCTATATCTACGCATTTCACCGCTACACAGGAAATTCCACCACCCTCTACCATACTCTAGCTTGTCAGTTTTGAATGCAGTTCCCAGGTTGAGCCCGGGGATTTCACATCCAACTTAACAACCCACATACCCGCCTTTTCGCCCAGGTAATCC + ?????@@BDDBDDDBBCFFCFFHIIIIIIIIFGHHHHEHHHIIIHHHHHFHIIHIGHHIDGGHHHHIIIIICEFHIHHCDEHHHHHHFHHCFHDF?FHHFHHHFFDFFFDEDDD..=DDDE@<BFEEFCFFCECE==CACFE?*0:*CCAA?:*:*:0*A?A80:???A?*00:**0*1*:C??C?A?01*0?);>>'.8::A?############################################### @MISEQ03:64:000000000-A2H3D:1:1101:14206:1564 2:N:0:TCCACAGGAGT ACGGACTACAGGGGTTTCTAATCCTGTTTGCTCCCCACGCTTTCGTGCATGAGCGTCAGTGTTAACCCAGGGGGCTGCCTTCGCCATTGGTATTCCTCCACATCTCTACGCATTTCACTGCTACACGTGGAATTCTACCCCCCTCTGCTACACTCTAGCCTTGTAGTTTCAAACGCAGTTCCCAGGTTGAGCCCGGGGCTTTCACATCTGCCTTACAAAACCGCCTGCGCACGCTTTACGCCCCGTAATTC + ?????@@BDDBDDD?@CFFFFFHHHHHFFHHHHHHHHHHH@FFHEDFFH@FHBGCDHHHBFHHHHHHHEHHHHDCCEFFDFFFEE:=?FF?DFDFDFFF==BEE=DBDDEEEEEB,4??EE@EEE,3,3*3,?:?*0ACCEDD88:***?*0:*0***0*?C?00:AE:?EE:*A8'.?:CAA?A80*0*??AA88;28;C################################################## @MISEQ03:64:000000000-A2H3D:1:1101:14943:1619 2:N:0:TCCACAGGAGT ACGGACTACAGGGGTATCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTCAGTGTCAGTATCAGTCCAGGTGGTCGCCTTCGCCACTGGTGTTCCTTCCTATATCTACGCATTTCACCGCTACACAGGAAATTCCACCACCCTCTACCATACTCTAGCTTGTCAGTTTTGAATGCAGTTCCAAGGTTGAGCCCGGGGCTTTCACATCCAACTTACAAAACCACCTACCCGACCTTTACGCCCAGAAATTC + ?????@@BDDDDDD?AFFFFFFIIHHIHIIHIIIIHIHHHHHHHHHHHHHHHHHHIHHHIIHHIHIIIIII?EFEGHHHHHIIHHDHHFD@FFEHFHFHFHFHFFFFFFFFEEEEFFFDEB<E@EFEEABA=B=CAEFEEFEA?A:*AC::??:**10??:00::*??EC*?:?C*::A*?C*1:8A################################################################ @MISEQ03:64:000000000-A2H3D:1:1101:15764:1660 2:N:0:TCCACAGGAGT ACGGACTACCGGGGTATCTAATCCTGTTTGCTCCCCTAGCTTTCGCACCTCAGCGTCAGTACCGGGCCAGTGAGCCGCCTTCGCCACTGGTGTTCTTGCGAATATCTACGAATTTCACCTCTACACTCGCAGTTCCACTCACCTCTCCCGGACTCAAGACCGCCAGTACCAAAGGCAGTTCTGGAGTTGAGCTCCAGGTTTTCACCCCTGCTTTAAAAATCCCCCAACGCGGCCTTTCCCCCCAGTGACTC + ?????@=BB?BBBB<?>ACFFCECFCHCFHH=CGHHDFH=E?ACDEHHCCFFGHHDHH@CBEFHHCHHHF,5@?DF)4<C3D4DD+=,BD5;DE=DBDE=<E<;?E?B;3?;A?;;;EBBC:??EEEEE?;AA*:A??################################################################################################################# @MISEQ03:64:000000000-A2H3D:1:1101:15211:1752 2:N:0:TCCACAGGAGT ACGGACTACCGGGGTTTCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTCAGTGTCAGTATCAGTCCAGGTGGTCGCCTTCGCCACTGGTGTTCCTTCCTATATCTACGCATTTCACCGCTACACAGGAAATTCCACCACCCTCTACCATACTCTAGCTTGTCAGTTTTGAATGCAGTTCCCAGGTTGAGCCCGGGGATTTCCCATACAACTTAACAAACCACTTACGCGGGTTTTCGCCCCACAAATTC + ?????@@BB-<@BBBBEA?FFFA8ACFCBHHHGHHHHBHHHHFCDDD7CHHFE?CCDDCFGBHHHGGHFGFGFFHFDCDHHC?=CDHFFDCDDDF,EFF5?BFEDBBDB@@EEACCE;,?::@BEEEEACC*??//:AA*8AAAEE?ECC##################################################################################################### @MISEQ03:64:000000000-A2H3D:1:1101:15201:1774 2:N:0:TCCACAGGAGT ACGGACTACCGGGGTTTCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTCAGTGTCAGTATCAGTCCAGGTGGTCGCCTTCGCCACTGGTGTTCCTTCCTATATCTACGCATTTCACCGCTACACAGGAAATTCCACCACCCTCTACCATACTCTAGCTTGTCAGTTTTGAATGCAGTTCCCAGGTTAAGCCCGGGGATTTCCAATCCAACTAAACAAAACACACACCCGCTCTTTACGCCCACCATTTC + ?????@=BBBBBBB<=CFFFFFFHFCFCEHHDGHHEFEHHHHHHHHHHHFHHGC-AEGFCGHHHFFHFHHBFGDEDDCEDDH+DDDHHF,,7D@DFDFFFBFFEDEDEEDE:@:B?C;,3@<><EEEE*BEEC?E*0AEEEEE*8*:CCE:CA*?*A?:AAA######################################################################################### @MISEQ03:64:000000000-A2H3D:1:1101:15976:1791 2:N:0:TCCACAGGAGT ACGGACTACAAGGGTTTCTAATCCCGTTTGCTCCCCTAGCTTTCGCACCTCAGCGTCAGAAATGGACCAGAGAGCCGCCTTCGCCACCGGTGTTCCTTCCTATATCTACGCATTTCACCGCTACACAGGAAATTCCACCACCCTCTACCATACTCTAGCTTGTCAGTTTTGAATGCAGTTCCCAGGTTGAGCCCGGGGTTTTCCAATCCAATTTAACGACCAACCACGCGGCGCTTTAGGCCCGGTAATGC + ?<???@=BBBBBBB556CFFBCEFFEHHHHHHHHHE=EHHHHHHHHHHHHHHHFHHEDCGFHHHHHHHHH;A?EFHHHHHHHHH+=EHHC)8@+?BFFFDFEEEEE;DDEEEEBCEECEEEBEEEEEEEEEEE:?*/:ACEE)*8*:C:A*0?:A*:C?A:?**:AECE?*?:*:C:????C##################################################################### @MISEQ03:64:000000000-A2H3D:1:1101:16031:1840 2:N:0:TCCACAGGAGT ACGGACTACTGGGGTATCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTCAGTGTCAGTATCAGTCCAGGTGGTCGCCTTCGCCACTGGTGTTCCTTCCTATATCTACGCATTTCACCGCTACACAGGAAATTCCACCACCCTTAACAATACTCTAGCTTGTCAGTTTTAAATGCAGTCCCCAGGTTGAGCCCGGGGCTTTCACATCAAATTTACAAAACCACCCACGCGCGTTCTACGCCGGACAATCC + ?????==<BBBBBBBBFFFFF?FFF?FFFHHFEFFHHHH:@>CHEDHHHFFFGBCCDDFGGHHHHEHHHHH5AE+C*>==+EDHHDEFCFCDF3@.D=,CFH=,@,4DFFE:=DDDDEB:)1:1;;?B;BE;??,?EE;AEE??**0*/:0??:***:?E*:8?A*:CEE################################################################################# @MISEQ03:64:000000000-A2H3D:1:1101:12964:1852 2:N:0:TCCACAGGAGT ACGGACTACTCGGGTATCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTCAGTGTCAGTATCAGTCCAGGTGGTCGCCTTCGCCACTGGTGTTCCTTCCTATATCTACGCATTTCACCGCTACACAGGAAATTCAACCACCCTCTACCATACTCTAGCTTGTAAGTTTTGAATGCAGTTCCAAGGTTGAGCCCGGGGCTTTCACACCCAACTTAACAACCCACCTACGCGGCATTTACGCCCACTACTTC + ?????@=BBBB9?=55AACCC?ECFFFHB>FFHGFHFHHHHHHHHHHHHFHHHGGGHHHGGHHHHHHDDFEGH;EBCEHD+AFE@C+@F=.7D+@CDCFFHHFFFD?DF@E+=:BDDB;D=@BE?BCE*,33,,?,3;;:?).0**::***0/*/0A??:*:****00/**8*0?AE:?AAC**0):??C############################################################# @MISEQ03:64:000000000-A2H3D:1:1101:17245:1906 2:N:0:TCCACAGGAGT ACGGACTACAGGGGTTTCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTCAGTGTCAGTATCAGTCCAGGTGGTCGCCTTCGCCACTGGTGTTCCTTCCTATATCTACGCATTTCACCGCTACACAGGAAATTCCACCACCCTCTACCATACTCTAGCTTGTCAGTTTTGAATGCAGTTCCCAGGTTGAGCCCGGGGCTTTCAAATCCAACTTAACAAACAACCAACCGGCGCTTTACGCCCAGTAATTC + ?????@@BDDDDBDABEFFFFFHHHHHHHHGGHGHHHHHHEH@FEHEEHHHFHHH=EGHHHHDGHHHHFHHGGHHHCCEDEHHHHHHHHHDFHHF=DBDFHFD?BB.BF;@DDDD.=DD*>6)@==ABAACBB5=B,=,88A)???:E*::::??*:**1**8??CCCEE8A:A::AAACAC??A)1:0**1*)48;'42A?EA**1?*1*0::??:ACEF############################## @MISEQ03:64:000000000-A2H3D:1:1101:18171:1983 2:N:0:TCCACAGGAGT ACGGACTACCGGGGTTTCTAATCCTGTTCGCTCCCCACGCTTTCGCGCCTGAGCGTCAGGTAAGGCCCAGAGATCCGCCTTCGCCACCGGTGTTCCTCCTGATATCTGCGCATTTCACCGCTACACCAGGAATTCCGATCTCCCCTACATACCTCTAGCCGGGCAGTATCGGATGCAGACCCGAGGTTGAGCCCCGGGATTTCACATCGGCTTACCAAAGCGCCCGGCGCCCCCTTTACGCCCCAGAAACC + ?????@@BDBDDDD=BCFFFFFIIIIHHFEHHHHIHIHHHEFCGDEHHHEFFEGC>EEHI?EHHGHHHHFH+C=,,?FHDDHFEE@EFE=1;A;EECCE==BEB,BBC=@@<?EE:?E:8>24;:CEAA8?CC*??:0?;*1?AE?CE*10:0*1:CAA*;22;2A##################################################################################### @MISEQ03:64:000000000-A2H3D:1:1101:14225:2009 2:N:0:TCCACAGGAGT ACGGACTACAGGGGTATCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTCAGTGTCAGTATCAGTCCAGGTGGTCGCCTTCGCCACTGGTGTTCCTTCCTATATCTACGCATTTCACCGCTACACAGGAAATTCCACCACCCTCTACCATACTCTAGCTTGTCAGTTTTGAATGCAGTTCCCAGGTTGAGCCCGGGGCTTTCACATCCAACTTAACAAACAACCACGCCGCGCTTTAGCCCAGGTAATTC + ?????@@BDDDDDD??CFFFFFHIHHHHIIIIHIIHIHHHHHIHHHHHHFFHHIHHHIHHHHHIIHIHIIIFFFEGHHEDEHHHHDHHHHCFFDFFHHHHHHFFFFFFF@EDEED=DDEED@EBFCEEEFECCCEEEFB<CA888:AEEFEFEA??CCEFF:?:ACCFFE?E:AC?:*:?EFE*:):???::A).;D>D>8:?################################################ @MISEQ03:64:000000000-A2H3D:1:1101:16656:2052 2:N:0:TCCACAGGAGT ACGGACTACCCGGGTATCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTCAGTGTCAGTATCAGTCCAGGTGGTCGCCTTCGCCACTGGTGTTCCTTCCTATATCTACGCATTTCACCGCTACACAGGAAATTCCACCCCCCTCTACCATACTCTAGCTTGTCAGTTTTGAATGCAGTTCCAAGGTTGAGCCCGGGGCTTTCACACCCAACTTAACAAACCACCTACGCGCGCTTTACGCCCAGCATTTC + ?????@@BDDDDDD<@CFFFFFIHHFHHFHHHHHHHIHHHHHFHCEHHHHIIFHHAFHHHFFHIIHHIIIHGHFEH<DDEDH;DEHHHHHFFH;FFHFHFFFFD?FBFF=BDEDDDFEEAE@BEFFFF<BE=B,=,5?*).;>8A:*:::?E?*::A::?AE8AEFEEEC?A:CE?AEA:EF*008:?EF*:C)8;D228A0:??:*.8A8):*:*1::CE############################## @MISEQ03:64:000000000-A2H3D:1:1101:18209:2060 2:N:0:TCCACAGGAGT ACGGACTACTAGGGTATCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTCAGCGTCAATACTTGTCCAGCGAGTCCCCTTCGCCACTGGTGTTCTTCGGAATATCTACGAATTTCACCTCTACACCCGGAATTCCACTCCCCCTTCCAAGATTCCAGCTTAGCAGTTTCAAGGGCAGTTCCGGGGTTGGCACCCGGGATTTCACCCCTGCCTTGCTCAACCCCCCACGGGGCCTTTACCCCCAGCATTCC + =9<=9<7<@@@@@@<@A8A>C>8E>-8AE;C99CEEECC>>EECE@CCDE,C@E++5>E-A=E-C@@=@5@C>C<D-5A5CC<CD+<4DE3=<C+4==+<@D++2:9DEE1<9DE######################################################################################################################################## @MISEQ03:64:000000000-A2H3D:1:1101:13824:2095 2:N:0:TCCACAGGAGT ACGGACTACCAGGGTTTCTAATCCTGTTTGCTCCCCACGCTTTCGCGTCTCAGCGTCAGTAACGTCCCAGCGAGCTGCCTTCGCAATGGGTGTTCCTCCTGATATCTGCGCATTTCACCGCTACACCAGGAATTCCACTCCCCCCTTCCGTACCCTAGCCCGACAGTACCCCCCGGCTTCCGAGGCTTGACCCCCCGCCTTTCACACCGGACTTACCGGGCCGCCTACCCGGCCTTTCGCCCCACCGTTTC + ??<??@@BBBBBBBBBCFCFFFHHHHHHBHHGHHHHHCHHEH>GDEHCA:DFGHHEEEEFFHHHHHHDHED7<CHEGHFFDFFHEDHHHDDDE@D??DD;=B,,5B,,56)):?;BEE?*1::):?**8AEAC*?*:?################################################################################################################# @MISEQ03:64:000000000-A2H3D:1:1101:17839:2106 2:N:0:TCCACAGGAGT ACGGACTACAAGGGTATCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTCAGTGTCAGTATCAGTCCAGGTGGTCGCCTTCGCCACTGGTGTTCCTTCCTATATCTACGCATTTCACCGCTACACAGGAAATTCCACCACCCTCTACCATACTCTAGCTTGTCAGTTTTGAATGAAGTTCCAAGTTTGAGCCCGGGGCTTTCACATCCAATTTAAAAAACAACCAACGCGCGCTTCACCCCAGGTAATAC + ?????@@BBBBBBB<5ACFFFFHHHHHHHHHHHHHHHHHHHHFFHHHHHHHHHGHHHFHGHHHHFGHHHHH?EFEECCEEEHHHEHHHHHDFHDFDHDHHHHFFDFFFHFEDDDD,5DD@BB<EEEEECBB?B3B;,?,3?E8CE?*?A*/:/:::??AE::**0:AEE################################################################################## @MISEQ03:64:000000000-A2H3D:1:1101:17831:2124 2:N:0:TCCACAGGAGT ACGGACTACAAGGGTATCTAATCCTGTTTGCTCCCCACGCTTTCGCACCTCAGTGTCAGTATCAGTCCAGGTGGTCGCCTTCGCCACTGGTGTTCCTTCCTATATCTACGCATTTCACCGCTACACAGGAAATTCCACCACCCTCTACCATACTCTAGCTTGTCAGTTTCGAATGCAGTTCCCAGGTTGAGCCCGGGCCTTCAACCTCCACTTTACAAAACAACCAAACGCCGCTTACCGCCACGAAATCC + ?????@=BBBBBBB5<CFFFFCFHHHHHFHHHHHHHHHHHHHFHEEHHEHFGHGH
codeparrot/github-code-clean
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from msrest import Serializer from .. import models as _models from .._vendor import _convert_request, _format_url_section T = TypeVar('T') JSONType = Any ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_get_request( subscription_id: str, resource_group_name: str, resource_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_create_or_update_request_initial( subscription_id: str, resource_group_name: str, resource_name: str, *, json: JSONType = None, content: Any = None, if_match: Optional[str] = None, **kwargs: Any ) -> HttpRequest: content_type = kwargs.pop('content_type', None) # type: Optional[str] api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if if_match is not None: header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str') if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PUT", url=url, params=query_parameters, headers=header_parameters, json=json, content=content, **kwargs ) def build_update_request_initial( subscription_id: str, resource_group_name: str, resource_name: str, *, json: JSONType = None, content: Any = None, **kwargs: Any ) -> HttpRequest: content_type = kwargs.pop('content_type', None) # type: Optional[str] api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PATCH", url=url, params=query_parameters, headers=header_parameters, json=json, content=content, **kwargs ) def build_delete_request_initial( subscription_id: str, resource_group_name: str, resource_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="DELETE", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_by_subscription_request( subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/IotHubs') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_by_resource_group_request( subscription_id: str, resource_group_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_stats_request( subscription_id: str, resource_group_name: str, resource_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubStats') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_valid_skus_request( subscription_id: str, resource_group_name: str, resource_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/skus') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_event_hub_consumer_groups_request( subscription_id: str, resource_group_name: str, resource_name: str, event_hub_endpoint_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), "eventHubEndpointName": _SERIALIZER.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_event_hub_consumer_group_request( subscription_id: str, resource_group_name: str, resource_name: str, event_hub_endpoint_name: str, name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), "eventHubEndpointName": _SERIALIZER.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'), "name": _SERIALIZER.url("name", name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_create_event_hub_consumer_group_request( subscription_id: str, resource_group_name: str, resource_name: str, event_hub_endpoint_name: str, name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), "eventHubEndpointName": _SERIALIZER.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'), "name": _SERIALIZER.url("name", name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PUT", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_delete_event_hub_consumer_group_request( subscription_id: str, resource_group_name: str, resource_name: str, event_hub_endpoint_name: str, name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), "eventHubEndpointName": _SERIALIZER.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'), "name": _SERIALIZER.url("name", name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="DELETE", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_jobs_request( subscription_id: str, resource_group_name: str, resource_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_job_request( subscription_id: str, resource_group_name: str, resource_name: str, job_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs/{jobId}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), "jobId": _SERIALIZER.url("job_id", job_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_quota_metrics_request( subscription_id: str, resource_group_name: str, resource_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/quotaMetrics') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_check_name_availability_request( subscription_id: str, *, json: JSONType = None, content: Any = None, **kwargs: Any ) -> HttpRequest: content_type = kwargs.pop('content_type', None) # type: Optional[str] api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/checkNameAvailability') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="POST", url=url, params=query_parameters, headers=header_parameters, json=json, content=content, **kwargs ) def build_list_keys_request( subscription_id: str, resource_group_name: str, resource_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/listkeys') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="POST", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_keys_for_key_name_request( subscription_id: str, resource_group_name: str, resource_name: str, key_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubKeys/{keyName}/listkeys') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), "keyName": _SERIALIZER.url("key_name", key_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="POST", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_export_devices_request( subscription_id: str, resource_group_name: str, resource_name: str, *, json: JSONType = None, content: Any = None, **kwargs: Any ) -> HttpRequest: content_type = kwargs.pop('content_type', None) # type: Optional[str] api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/exportDevices') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="POST", url=url, params=query_parameters, headers=header_parameters, json=json, content=content, **kwargs ) def build_import_devices_request( subscription_id: str, resource_group_name: str, resource_name: str, *, json: JSONType = None, content: Any = None, **kwargs: Any ) -> HttpRequest: content_type = kwargs.pop('content_type', None) # type: Optional[str] api_version = "2018-01-22" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/importDevices') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="POST", url=url, params=query_parameters, headers=header_parameters, json=json, content=content, **kwargs ) class IotHubResourceOperations(object): """IotHubResourceOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.iothub.v2018_01_22.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def get( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> "_models.IotHubDescription": """Get the non-security related metadata of an IoT hub. Get the non-security related metadata of an IoT hub. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: IotHubDescription, or the result of cls(response) :rtype: ~azure.mgmt.iothub.v2018_01_22.models.IotHubDescription :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.get.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('IotHubDescription', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore def _create_or_update_initial( self, resource_group_name: str, resource_name: str, iot_hub_description: "_models.IotHubDescription", if_match: Optional[str] = None, **kwargs: Any ) -> "_models.IotHubDescription": cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(iot_hub_description, 'IotHubDescription') request = build_create_or_update_request_initial( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, content_type=content_type, json=_json, if_match=if_match, template_url=self._create_or_update_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('IotHubDescription', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('IotHubDescription', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore @distributed_trace def begin_create_or_update( self, resource_group_name: str, resource_name: str, iot_hub_description: "_models.IotHubDescription", if_match: Optional[str] = None, **kwargs: Any ) -> LROPoller["_models.IotHubDescription"]: """Create or update the metadata of an IoT hub. Create or update the metadata of an Iot hub. The usual pattern to modify a property is to retrieve the IoT hub metadata and security metadata, and then combine them with the modified values in a new body to update the IoT hub. If certain properties are missing in the JSON, updating IoT Hub may cause these values to fallback to default, which may lead to unexpected behavior. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :param iot_hub_description: The IoT hub metadata and security metadata. :type iot_hub_description: ~azure.mgmt.iothub.v2018_01_22.models.IotHubDescription :param if_match: ETag of the IoT Hub. Do not specify for creating a brand new IoT Hub. Required to update an existing IoT Hub. :type if_match: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either IotHubDescription or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.v2018_01_22.models.IotHubDescription] :raises: ~azure.core.exceptions.HttpResponseError """ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, resource_name=resource_name, iot_hub_description=iot_hub_description, if_match=if_match, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('IotHubDescription', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = ARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore def _update_initial( self, resource_group_name: str, resource_name: str, iot_hub_tags: "_models.TagsResource", **kwargs: Any ) -> "_models.IotHubDescription": cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(iot_hub_tags, 'TagsResource') request = build_update_request_initial( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, content_type=content_type, json=_json, template_url=self._update_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('IotHubDescription', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore @distributed_trace def begin_update( self, resource_group_name: str, resource_name: str, iot_hub_tags: "_models.TagsResource", **kwargs: Any ) -> LROPoller["_models.IotHubDescription"]: """Update an existing IoT Hubs tags. Update an existing IoT Hub tags. to update other fields use the CreateOrUpdate method. :param resource_group_name: Resource group identifier. :type resource_group_name: str :param resource_name: Name of iot hub to update. :type resource_name: str :param iot_hub_tags: Updated tag information to set into the iot hub instance. :type iot_hub_tags: ~azure.mgmt.iothub.v2018_01_22.models.TagsResource :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either IotHubDescription or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.v2018_01_22.models.IotHubDescription] :raises: ~azure.core.exceptions.HttpResponseError """ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._update_initial( resource_group_name=resource_group_name, resource_name=resource_name, iot_hub_tags=iot_hub_tags, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('IotHubDescription', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = ARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore def _delete_initial( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> Optional[Union["_models.IotHubDescription", "_models.ErrorDetails"]]: cls = kwargs.pop('cls', None) # type: ClsType[Optional[Union["_models.IotHubDescription", "_models.ErrorDetails"]]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_delete_request_initial( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self._delete_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('IotHubDescription', pipeline_response) if response.status_code == 202: deserialized = self._deserialize('IotHubDescription', pipeline_response) if response.status_code == 404: deserialized = self._deserialize('ErrorDetails', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore @distributed_trace def begin_delete( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> LROPoller[Union["_models.IotHubDescription", "_models.ErrorDetails"]]: """Delete an IoT hub. Delete an IoT hub. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either IotHubDescription or ErrorDetails or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.v2018_01_22.models.IotHubDescription or ~azure.mgmt.iothub.v2018_01_22.models.ErrorDetails] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[Union["_models.IotHubDescription", "_models.ErrorDetails"]] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._delete_initial( resource_group_name=resource_group_name, resource_name=resource_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('IotHubDescription', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = ARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore @distributed_trace def list_by_subscription( self, **kwargs: Any ) -> Iterable["_models.IotHubDescriptionListResult"]: """Get all the IoT hubs in a subscription. Get all the IoT hubs in a subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either IotHubDescriptionListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.IotHubDescriptionListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescriptionListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_by_subscription_request( subscription_id=self._config.subscription_id, template_url=self.list_by_subscription.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_subscription_request( subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("IotHubDescriptionListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/IotHubs'} # type: ignore @distributed_trace def list_by_resource_group( self, resource_group_name: str, **kwargs: Any ) -> Iterable["_models.IotHubDescriptionListResult"]: """Get all the IoT hubs in a resource group. Get all the IoT hubs in a resource group. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either IotHubDescriptionListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.IotHubDescriptionListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescriptionListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_by_resource_group_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, template_url=self.list_by_resource_group.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_resource_group_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("IotHubDescriptionListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs'} # type: ignore @distributed_trace def get_stats( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> "_models.RegistryStatistics": """Get the statistics from an IoT hub. Get the statistics from an IoT hub. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: RegistryStatistics, or the result of cls(response) :rtype: ~azure.mgmt.iothub.v2018_01_22.models.RegistryStatistics :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.RegistryStatistics"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_stats_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.get_stats.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('RegistryStatistics', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubStats'} # type: ignore @distributed_trace def get_valid_skus( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> Iterable["_models.IotHubSkuDescriptionListResult"]: """Get the list of valid SKUs for an IoT hub. Get the list of valid SKUs for an IoT hub. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either IotHubSkuDescriptionListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.IotHubSkuDescriptionListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubSkuDescriptionListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_get_valid_skus_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.get_valid_skus.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_get_valid_skus_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("IotHubSkuDescriptionListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) get_valid_skus.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/skus'} # type: ignore @distributed_trace def list_event_hub_consumer_groups( self, resource_group_name: str, resource_name: str, event_hub_endpoint_name: str, **kwargs: Any ) -> Iterable["_models.EventHubConsumerGroupsListResult"]: """Get a list of the consumer groups in the Event Hub-compatible device-to-cloud endpoint in an IoT hub. Get a list of the consumer groups in the Event Hub-compatible device-to-cloud endpoint in an IoT hub. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint. :type event_hub_endpoint_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either EventHubConsumerGroupsListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.EventHubConsumerGroupsListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupsListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_event_hub_consumer_groups_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, event_hub_endpoint_name=event_hub_endpoint_name, template_url=self.list_event_hub_consumer_groups.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_event_hub_consumer_groups_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, event_hub_endpoint_name=event_hub_endpoint_name, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("EventHubConsumerGroupsListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_event_hub_consumer_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups'} # type: ignore @distributed_trace def get_event_hub_consumer_group( self, resource_group_name: str, resource_name: str, event_hub_endpoint_name: str, name: str, **kwargs: Any ) -> "_models.EventHubConsumerGroupInfo": """Get a consumer group from the Event Hub-compatible device-to-cloud endpoint for an IoT hub. Get a consumer group from the Event Hub-compatible device-to-cloud endpoint for an IoT hub. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub. :type event_hub_endpoint_name: str :param name: The name of the consumer group to retrieve. :type name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: EventHubConsumerGroupInfo, or the result of cls(response) :rtype: ~azure.mgmt.iothub.v2018_01_22.models.EventHubConsumerGroupInfo :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupInfo"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_event_hub_consumer_group_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, event_hub_endpoint_name=event_hub_endpoint_name, name=name, template_url=self.get_event_hub_consumer_group.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('EventHubConsumerGroupInfo', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore @distributed_trace def create_event_hub_consumer_group( self, resource_group_name: str, resource_name: str, event_hub_endpoint_name: str, name: str, **kwargs: Any ) -> "_models.EventHubConsumerGroupInfo": """Add a consumer group to an Event Hub-compatible endpoint in an IoT hub. Add a consumer group to an Event Hub-compatible endpoint in an IoT hub. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub. :type event_hub_endpoint_name: str :param name: The name of the consumer group to add. :type name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: EventHubConsumerGroupInfo, or the result of cls(response) :rtype: ~azure.mgmt.iothub.v2018_01_22.models.EventHubConsumerGroupInfo :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupInfo"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_create_event_hub_consumer_group_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, event_hub_endpoint_name=event_hub_endpoint_name, name=name, template_url=self.create_event_hub_consumer_group.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('EventHubConsumerGroupInfo', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore @distributed_trace def delete_event_hub_consumer_group( self, resource_group_name: str, resource_name: str, event_hub_endpoint_name: str, name: str, **kwargs: Any ) -> None: """Delete a consumer group from an Event Hub-compatible endpoint in an IoT hub. Delete a consumer group from an Event Hub-compatible endpoint in an IoT hub. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub. :type event_hub_endpoint_name: str :param name: The name of the consumer group to delete. :type name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_delete_event_hub_consumer_group_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, event_hub_endpoint_name=event_hub_endpoint_name, name=name, template_url=self.delete_event_hub_consumer_group.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) delete_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore @distributed_trace def list_jobs( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> Iterable["_models.JobResponseListResult"]: """Get a list of all the jobs in an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry. Get a list of all the jobs in an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either JobResponseListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.JobResponseListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponseListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_jobs_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.list_jobs.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_jobs_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("JobResponseListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_jobs.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs'} # type: ignore @distributed_trace def get_job( self, resource_group_name: str, resource_name: str, job_id: str, **kwargs: Any ) -> "_models.JobResponse": """Get the details of a job from an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry. Get the details of a job from an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :param job_id: The job identifier. :type job_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: JobResponse, or the result of cls(response) :rtype: ~azure.mgmt.iothub.v2018_01_22.models.JobResponse :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_job_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, job_id=job_id, template_url=self.get_job.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('JobResponse', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_job.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs/{jobId}'} # type: ignore @distributed_trace def get_quota_metrics( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> Iterable["_models.IotHubQuotaMetricInfoListResult"]: """Get the quota metrics for an IoT hub. Get the quota metrics for an IoT hub. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either IotHubQuotaMetricInfoListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.IotHubQuotaMetricInfoListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubQuotaMetricInfoListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_get_quota_metrics_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.get_quota_metrics.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_get_quota_metrics_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("IotHubQuotaMetricInfoListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) get_quota_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/quotaMetrics'} # type: ignore @distributed_trace def check_name_availability( self, operation_inputs: "_models.OperationInputs", **kwargs: Any ) -> "_models.IotHubNameAvailabilityInfo": """Check if an IoT hub name is available. Check if an IoT hub name is available. :param operation_inputs: Set the name parameter in the OperationInputs structure to the name of the IoT hub to check. :type operation_inputs: ~azure.mgmt.iothub.v2018_01_22.models.OperationInputs :keyword callable cls: A custom type or function that will be passed the direct response :return: IotHubNameAvailabilityInfo, or the result of cls(response) :rtype: ~azure.mgmt.iothub.v2018_01_22.models.IotHubNameAvailabilityInfo :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubNameAvailabilityInfo"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(operation_inputs, 'OperationInputs') request = build_check_name_availability_request( subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self.check_name_availability.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('IotHubNameAvailabilityInfo', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/checkNameAvailability'} # type: ignore @distributed_trace def list_keys( self, resource_group_name: str, resource_name: str, **kwargs: Any ) -> Iterable["_models.SharedAccessSignatureAuthorizationRuleListResult"]: """Get the security metadata for an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security. Get the security metadata for an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SharedAccessSignatureAuthorizationRuleListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.iothub.v2018_01_22.models.SharedAccessSignatureAuthorizationRuleListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRuleListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_keys_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=self.list_keys.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_keys_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("SharedAccessSignatureAuthorizationRuleListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/listkeys'} # type: ignore @distributed_trace def get_keys_for_key_name( self, resource_group_name: str, resource_name: str, key_name: str, **kwargs: Any ) -> "_models.SharedAccessSignatureAuthorizationRule": """Get a shared access policy by name from an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security. Get a shared access policy by name from an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :param key_name: The name of the shared access policy. :type key_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: SharedAccessSignatureAuthorizationRule, or the result of cls(response) :rtype: ~azure.mgmt.iothub.v2018_01_22.models.SharedAccessSignatureAuthorizationRule :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRule"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_keys_for_key_name_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, key_name=key_name, template_url=self.get_keys_for_key_name.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('SharedAccessSignatureAuthorizationRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_keys_for_key_name.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubKeys/{keyName}/listkeys'} # type: ignore @distributed_trace def export_devices( self, resource_group_name: str, resource_name: str, export_devices_parameters: "_models.ExportDevicesRequest", **kwargs: Any ) -> "_models.JobResponse": """Exports all the device identities in the IoT hub identity registry to an Azure Storage blob container. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities. Exports all the device identities in the IoT hub identity registry to an Azure Storage blob container. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :param export_devices_parameters: The parameters that specify the export devices operation. :type export_devices_parameters: ~azure.mgmt.iothub.v2018_01_22.models.ExportDevicesRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: JobResponse, or the result of cls(response) :rtype: ~azure.mgmt.iothub.v2018_01_22.models.JobResponse :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(export_devices_parameters, 'ExportDevicesRequest') request = build_export_devices_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, content_type=content_type, json=_json, template_url=self.export_devices.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('JobResponse', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized export_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/exportDevices'} # type: ignore @distributed_trace def import_devices( self, resource_group_name: str, resource_name: str, import_devices_parameters: "_models.ImportDevicesRequest", **kwargs: Any ) -> "_models.JobResponse": """Import, update, or delete device identities in the IoT hub identity registry from a blob. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities. Import, update, or delete device identities in the IoT hub identity registry from a blob. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities. :param resource_group_name: The name of the resource group that contains the IoT hub. :type resource_group_name: str :param resource_name: The name of the IoT hub. :type resource_name: str :param import_devices_parameters: The parameters that specify the import devices operation. :type import_devices_parameters: ~azure.mgmt.iothub.v2018_01_22.models.ImportDevicesRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: JobResponse, or the result of cls(response) :rtype: ~azure.mgmt.iothub.v2018_01_22.models.JobResponse :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(import_devices_parameters, 'ImportDevicesRequest') request = build_import_devices_request( subscription_id=self._config.subscription_id, resource_group_name=resource_group_name, resource_name=resource_name, content_type=content_type, json=_json, template_url=self.import_devices.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('JobResponse', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized import_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/importDevices'} # type: ignore
codeparrot/github-code-clean
# Use of this source code is governed by the MIT license. __license__ = "MIT" try: from collections.abc import Callable # Python 3.6 except ImportError as e: from collections import Callable import re import sys import warnings try: import thlib.side.soupsieve as soupsieve except ImportError as e: soupsieve = None warnings.warn( 'The soupsieve package is not installed. CSS selectors cannot be used.' ) from bs4.formatter import ( Formatter, HTMLFormatter, XMLFormatter, ) DEFAULT_OUTPUT_ENCODING = "utf-8" PY3K = (sys.version_info[0] > 2) nonwhitespace_re = re.compile(r"\S+") # NOTE: This isn't used as of 4.7.0. I'm leaving it for a little bit on # the off chance someone imported it for their own use. whitespace_re = re.compile(r"\s+") def _alias(attr): """Alias one attribute name to another for backward compatibility""" @property def alias(self): return getattr(self, attr) @alias.setter def alias(self): return setattr(self, attr) return alias # These encodings are recognized by Python (so PageElement.encode # could theoretically support them) but XML and HTML don't recognize # them (so they should not show up in an XML or HTML document as that # document's encoding). # # If an XML document is encoded in one of these encodings, no encoding # will be mentioned in the XML declaration. If an HTML document is # encoded in one of these encodings, and the HTML document has a # <meta> tag that mentions an encoding, the encoding will be given as # the empty string. # # Source: # https://docs.python.org/3/library/codecs.html#python-specific-encodings PYTHON_SPECIFIC_ENCODINGS = set([ "idna", "mbcs", "oem", "palmos", "punycode", "raw_unicode_escape", "undefined", "unicode_escape", "raw-unicode-escape", "unicode-escape", "string-escape", "string_escape", ]) class NamespacedAttribute(str): """A namespaced string (e.g. 'xml:lang') that remembers the namespace ('xml') and the name ('lang') that were used to create it. """ def __new__(cls, prefix, name=None, namespace=None): if not name: # This is the default namespace. Its name "has no value" # per https://www.w3.org/TR/xml-names/#defaulting name = None if name is None: obj = str.__new__(cls, prefix) elif prefix is None: # Not really namespaced. obj = str.__new__(cls, name) else: obj = str.__new__(cls, prefix + ":" + name) obj.prefix = prefix obj.name = name obj.namespace = namespace return obj class AttributeValueWithCharsetSubstitution(str): """A stand-in object for a character encoding specified in HTML.""" class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution): """A generic stand-in for the value of a meta tag's 'charset' attribute. When Beautiful Soup parses the markup '<meta charset="utf8">', the value of the 'charset' attribute will be one of these objects. """ def __new__(cls, original_value): obj = str.__new__(cls, original_value) obj.original_value = original_value return obj def encode(self, encoding): """When an HTML document is being encoded to a given encoding, the value of a meta tag's 'charset' is the name of the encoding. """ if encoding in PYTHON_SPECIFIC_ENCODINGS: return '' return encoding class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution): """A generic stand-in for the value of a meta tag's 'content' attribute. When Beautiful Soup parses the markup: <meta http-equiv="content-type" content="text/html; charset=utf8"> The value of the 'content' attribute will be one of these objects. """ CHARSET_RE = re.compile(r"((^|;)\s*charset=)([^;]*)", re.M) def __new__(cls, original_value): match = cls.CHARSET_RE.search(original_value) if match is None: # No substitution necessary. return str.__new__(str, original_value) obj = str.__new__(cls, original_value) obj.original_value = original_value return obj def encode(self, encoding): if encoding in PYTHON_SPECIFIC_ENCODINGS: return '' def rewrite(match): return match.group(1) + encoding return self.CHARSET_RE.sub(rewrite, self.original_value) class PageElement(object): """Contains the navigational information for some part of the page: that is, its current location in the parse tree. NavigableString, Tag, etc. are all subclasses of PageElement. """ def setup(self, parent=None, previous_element=None, next_element=None, previous_sibling=None, next_sibling=None): """Sets up the initial relations between this element and other elements. :param parent: The parent of this element. :param previous_element: The element parsed immediately before this one. :param next_element: The element parsed immediately before this one. :param previous_sibling: The most recently encountered element on the same level of the parse tree as this one. :param previous_sibling: The next element to be encountered on the same level of the parse tree as this one. """ self.parent = parent self.previous_element = previous_element if previous_element is not None: self.previous_element.next_element = self self.next_element = next_element if self.next_element is not None: self.next_element.previous_element = self self.next_sibling = next_sibling if self.next_sibling is not None: self.next_sibling.previous_sibling = self if (previous_sibling is None and self.parent is not None and self.parent.contents): previous_sibling = self.parent.contents[-1] self.previous_sibling = previous_sibling if previous_sibling is not None: self.previous_sibling.next_sibling = self def format_string(self, s, formatter): """Format the given string using the given formatter. :param s: A string. :param formatter: A Formatter object, or a string naming one of the standard formatters. """ if formatter is None: return s if not isinstance(formatter, Formatter): formatter = self.formatter_for_name(formatter) output = formatter.substitute(s) return output def formatter_for_name(self, formatter): """Look up or create a Formatter for the given identifier, if necessary. :param formatter: Can be a Formatter object (used as-is), a function (used as the entity substitution hook for an XMLFormatter or HTMLFormatter), or a string (used to look up an XMLFormatter or HTMLFormatter in the appropriate registry. """ if isinstance(formatter, Formatter): return formatter if self._is_xml: c = XMLFormatter else: c = HTMLFormatter if isinstance(formatter, Callable): return c(entity_substitution=formatter) return c.REGISTRY[formatter] @property def _is_xml(self): """Is this element part of an XML tree or an HTML tree? This is used in formatter_for_name, when deciding whether an XMLFormatter or HTMLFormatter is more appropriate. It can be inefficient, but it should be called very rarely. """ if self.known_xml is not None: # Most of the time we will have determined this when the # document is parsed. return self.known_xml # Otherwise, it's likely that this element was created by # direct invocation of the constructor from within the user's # Python code. if self.parent is None: # This is the top-level object. It should have .known_xml set # from tree creation. If not, take a guess--BS is usually # used on HTML markup. return getattr(self, 'is_xml', False) return self.parent._is_xml nextSibling = _alias("next_sibling") # BS3 previousSibling = _alias("previous_sibling") # BS3 def replace_with(self, replace_with): """Replace this PageElement with another one, keeping the rest of the tree the same. :param replace_with: A PageElement. :return: `self`, no longer part of the tree. """ if self.parent is None: raise ValueError( "Cannot replace one element with another when the " "element to be replaced is not part of a tree.") if replace_with is self: return if replace_with is self.parent: raise ValueError("Cannot replace a Tag with its parent.") old_parent = self.parent my_index = self.parent.index(self) self.extract(_self_index=my_index) old_parent.insert(my_index, replace_with) return self replaceWith = replace_with # BS3 def unwrap(self): """Replace this PageElement with its contents. :return: `self`, no longer part of the tree. """ my_parent = self.parent if self.parent is None: raise ValueError( "Cannot replace an element with its contents when that" "element is not part of a tree.") my_index = self.parent.index(self) self.extract(_self_index=my_index) for child in reversed(self.contents[:]): my_parent.insert(my_index, child) return self replace_with_children = unwrap replaceWithChildren = unwrap # BS3 def wrap(self, wrap_inside): """Wrap this PageElement inside another one. :param wrap_inside: A PageElement. :return: `wrap_inside`, occupying the position in the tree that used to be occupied by `self`, and with `self` inside it. """ me = self.replace_with(wrap_inside) wrap_inside.append(me) return wrap_inside def extract(self, _self_index=None): """Destructively rips this element out of the tree. :param _self_index: The location of this element in its parent's .contents, if known. Passing this in allows for a performance optimization. :return: `self`, no longer part of the tree. """ if self.parent is not None: if _self_index is None: _self_index = self.parent.index(self) del self.parent.contents[_self_index] #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. last_child = self._last_descendant() next_element = last_child.next_element if (self.previous_element is not None and self.previous_element is not next_element): self.previous_element.next_element = next_element if next_element is not None and next_element is not self.previous_element: next_element.previous_element = self.previous_element self.previous_element = None last_child.next_element = None self.parent = None if (self.previous_sibling is not None and self.previous_sibling is not self.next_sibling): self.previous_sibling.next_sibling = self.next_sibling if (self.next_sibling is not None and self.next_sibling is not self.previous_sibling): self.next_sibling.previous_sibling = self.previous_sibling self.previous_sibling = self.next_sibling = None return self def _last_descendant(self, is_initialized=True, accept_self=True): """Finds the last element beneath this object to be parsed. :param is_initialized: Has `setup` been called on this PageElement yet? :param accept_self: Is `self` an acceptable answer to the question? """ if is_initialized and self.next_sibling is not None: last_child = self.next_sibling.previous_element else: last_child = self while isinstance(last_child, Tag) and last_child.contents: last_child = last_child.contents[-1] if not accept_self and last_child is self: last_child = None return last_child # BS3: Not part of the API! _lastRecursiveChild = _last_descendant def insert(self, position, new_child): """Insert a new PageElement in the list of this PageElement's children. This works the same way as `list.insert`. :param position: The numeric position that should be occupied in `self.children` by the new PageElement. :param new_child: A PageElement. """ if new_child is None: raise ValueError("Cannot insert None into a tag.") if new_child is self: raise ValueError("Cannot insert a tag into itself.") if (isinstance(new_child, str) and not isinstance(new_child, NavigableString)): new_child = NavigableString(new_child) from bs4 import BeautifulSoup if isinstance(new_child, BeautifulSoup): # We don't want to end up with a situation where one BeautifulSoup # object contains another. Insert the children one at a time. for subchild in list(new_child.contents): self.insert(position, subchild) position += 1 return position = min(position, len(self.contents)) if hasattr(new_child, 'parent') and new_child.parent is not None: # We're 'inserting' an element that's already one # of this object's children. if new_child.parent is self: current_index = self.index(new_child) if current_index < position: # We're moving this element further down the list # of this object's children. That means that when # we extract this element, our target index will # jump down one. position -= 1 new_child.extract() new_child.parent = self previous_child = None if position == 0: new_child.previous_sibling = None new_child.previous_element = self else: previous_child = self.contents[position - 1] new_child.previous_sibling = previous_child new_child.previous_sibling.next_sibling = new_child new_child.previous_element = previous_child._last_descendant(False) if new_child.previous_element is not None: new_child.previous_element.next_element = new_child new_childs_last_element = new_child._last_descendant(False) if position >= len(self.contents): new_child.next_sibling = None parent = self parents_next_sibling = None while parents_next_sibling is None and parent is not None: parents_next_sibling = parent.next_sibling parent = parent.parent if parents_next_sibling is not None: # We found the element that comes next in the document. break if parents_next_sibling is not None: new_childs_last_element.next_element = parents_next_sibling else: # The last element of this tag is the last element in # the document. new_childs_last_element.next_element = None else: next_child = self.contents[position] new_child.next_sibling = next_child if new_child.next_sibling is not None: new_child.next_sibling.previous_sibling = new_child new_childs_last_element.next_element = next_child if new_childs_last_element.next_element is not None: new_childs_last_element.next_element.previous_element = new_childs_last_element self.contents.insert(position, new_child) def append(self, tag): """Appends the given PageElement to the contents of this one. :param tag: A PageElement. """ self.insert(len(self.contents), tag) def extend(self, tags): """Appends the given PageElements to this one's contents. :param tags: A list of PageElements. """ for tag in tags: self.append(tag) def insert_before(self, *args): """Makes the given element(s) the immediate predecessor of this one. All the elements will have the same parent, and the given elements will be immediately before this one. :param args: One or more PageElements. """ parent = self.parent if parent is None: raise ValueError( "Element has no parent, so 'before' has no meaning.") if any(x is self for x in args): raise ValueError("Can't insert an element before itself.") for predecessor in args: # Extract first so that the index won't be screwed up if they # are siblings. if isinstance(predecessor, PageElement): predecessor.extract() index = parent.index(self) parent.insert(index, predecessor) def insert_after(self, *args): """Makes the given element(s) the immediate successor of this one. The elements will have the same parent, and the given elements will be immediately after this one. :param args: One or more PageElements. """ # Do all error checking before modifying the tree. parent = self.parent if parent is None: raise ValueError( "Element has no parent, so 'after' has no meaning.") if any(x is self for x in args): raise ValueError("Can't insert an element after itself.") offset = 0 for successor in args: # Extract first so that the index won't be screwed up if they # are siblings. if isinstance(successor, PageElement): successor.extract() index = parent.index(self) parent.insert(index+1+offset, successor) offset += 1 def find_next(self, name=None, attrs={}, text=None, **kwargs): """Find the first PageElement that matches the given criteria and appears later in the document than this PageElement. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self._find_one(self.find_all_next, name, attrs, text, **kwargs) findNext = find_next # BS3 def find_all_next(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Find all PageElements that match the given criteria and appear later in the document than this PageElement. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A ResultSet containing PageElements. """ return self._find_all(name, attrs, text, limit, self.next_elements, **kwargs) findAllNext = find_all_next # BS3 def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs): """Find the closest sibling to this PageElement that matches the given criteria and appears later in the document. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self._find_one(self.find_next_siblings, name, attrs, text, **kwargs) findNextSibling = find_next_sibling # BS3 def find_next_siblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Find all siblings of this PageElement that match the given criteria and appear later in the document. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A ResultSet of PageElements. :rtype: bs4.element.ResultSet """ return self._find_all(name, attrs, text, limit, self.next_siblings, **kwargs) findNextSiblings = find_next_siblings # BS3 fetchNextSiblings = find_next_siblings # BS2 def find_previous(self, name=None, attrs={}, text=None, **kwargs): """Look backwards in the document from this PageElement and find the first PageElement that matches the given criteria. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self._find_one( self.find_all_previous, name, attrs, text, **kwargs) findPrevious = find_previous # BS3 def find_all_previous(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Look backwards in the document from this PageElement and find all PageElements that match the given criteria. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A ResultSet of PageElements. :rtype: bs4.element.ResultSet """ return self._find_all(name, attrs, text, limit, self.previous_elements, **kwargs) findAllPrevious = find_all_previous # BS3 fetchPrevious = find_all_previous # BS2 def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this PageElement that matches the given criteria and appears earlier in the document. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self._find_one(self.find_previous_siblings, name, attrs, text, **kwargs) findPreviousSibling = find_previous_sibling # BS3 def find_previous_siblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all siblings to this PageElement that match the given criteria and appear earlier in the document. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A ResultSet of PageElements. :rtype: bs4.element.ResultSet """ return self._find_all(name, attrs, text, limit, self.previous_siblings, **kwargs) findPreviousSiblings = find_previous_siblings # BS3 fetchPreviousSiblings = find_previous_siblings # BS2 def find_parent(self, name=None, attrs={}, **kwargs): """Find the closest parent of this PageElement that matches the given criteria. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ # NOTE: We can't use _find_one because findParents takes a different # set of arguments. r = None l = self.find_parents(name, attrs, 1, **kwargs) if l: r = l[0] return r findParent = find_parent # BS3 def find_parents(self, name=None, attrs={}, limit=None, **kwargs): """Find all parents of this PageElement that match the given criteria. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self._find_all(name, attrs, None, limit, self.parents, **kwargs) findParents = find_parents # BS3 fetchParents = find_parents # BS2 @property def next(self): """The PageElement, if any, that was parsed just after this one. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self.next_element @property def previous(self): """The PageElement, if any, that was parsed just before this one. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ return self.previous_element #These methods do the real heavy lifting. def _find_one(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _find_all(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if text is None and 'string' in kwargs: text = kwargs['string'] del kwargs['string'] if isinstance(name, SoupStrainer): strainer = name else: strainer = SoupStrainer(name, attrs, text, **kwargs) if text is None and not limit and not attrs and not kwargs: if name is True or name is None: # Optimization to find all tags. result = (element for element in generator if isinstance(element, Tag)) return ResultSet(strainer, result) elif isinstance(name, str): # Optimization to find all tags with a given name. if name.count(':') == 1: # This is a name with a prefix. If this is a namespace-aware document, # we need to match the local name against tag.name. If not, # we need to match the fully-qualified name against tag.name. prefix, local_name = name.split(':', 1) else: prefix = None local_name = name result = (element for element in generator if isinstance(element, Tag) and ( element.name == name ) or ( element.name == local_name and (prefix is None or element.prefix == prefix) ) ) return ResultSet(strainer, result) results = ResultSet(strainer) while True: try: i = next(generator) except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These generators can be used to navigate starting from both #NavigableStrings and Tags. @property def next_elements(self): """All PageElements that were parsed after this one. :yield: A sequence of PageElements. """ i = self.next_element while i is not None: yield i i = i.next_element @property def next_siblings(self): """All PageElements that are siblings of this one but were parsed later. :yield: A sequence of PageElements. """ i = self.next_sibling while i is not None: yield i i = i.next_sibling @property def previous_elements(self): """All PageElements that were parsed before this one. :yield: A sequence of PageElements. """ i = self.previous_element while i is not None: yield i i = i.previous_element @property def previous_siblings(self): """All PageElements that are siblings of this one but were parsed earlier. :yield: A sequence of PageElements. """ i = self.previous_sibling while i is not None: yield i i = i.previous_sibling @property def parents(self): """All PageElements that are parents of this PageElement. :yield: A sequence of PageElements. """ i = self.parent while i is not None: yield i i = i.parent @property def decomposed(self): """Check whether a PageElement has been decomposed. :rtype: bool """ return getattr(self, '_decomposed', False) or False # Old non-property versions of the generators, for backwards # compatibility with BS3. def nextGenerator(self): return self.next_elements def nextSiblingGenerator(self): return self.next_siblings def previousGenerator(self): return self.previous_elements def previousSiblingGenerator(self): return self.previous_siblings def parentGenerator(self): return self.parents class NavigableString(str, PageElement): """A Python Unicode string that is part of a parse tree. When Beautiful Soup parses the markup <b>penguin</b>, it will create a NavigableString for the string "penguin". """ PREFIX = '' SUFFIX = '' # We can't tell just by looking at a string whether it's contained # in an XML document or an HTML document. known_xml = None def __new__(cls, value): """Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. """ if isinstance(value, str): u = str.__new__(cls, value) else: u = str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) u.setup() return u def __copy__(self): """A copy of a NavigableString has the same contents and class as the original, but it is not connected to the parse tree. """ return type(self)(self) def __getnewargs__(self): return (str(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError( "'%s' object has no attribute '%s'" % ( self.__class__.__name__, attr)) def output_ready(self, formatter="minimal"): """Run the string through the provided formatter. :param formatter: A Formatter object, or a string naming one of the standard formatters. """ output = self.format_string(self, formatter) return self.PREFIX + output + self.SUFFIX @property def name(self): """Since a NavigableString is not a Tag, it has no .name. This property is implemented so that code like this doesn't crash when run on a mixture of Tag and NavigableString objects: [x.name for x in tag.children] """ return None @name.setter def name(self, name): """Prevent NavigableString.name from ever being set.""" raise AttributeError("A NavigableString cannot be given a name.") class PreformattedString(NavigableString): """A NavigableString not subject to the normal formatting rules. This is an abstract class used for special kinds of strings such as comments (the Comment class) and CDATA blocks (the CData class). """ PREFIX = '' SUFFIX = '' def output_ready(self, formatter=None): """Make this string ready for output by adding any subclass-specific prefix or suffix. :param formatter: A Formatter object, or a string naming one of the standard formatters. The string will be passed into the Formatter, but only to trigger any side effects: the return value is ignored. :return: The string, with any subclass-specific prefix and suffix added on. """ if formatter is not None: ignore = self.format_string(self, formatter) return self.PREFIX + self + self.SUFFIX class CData(PreformattedString): """A CDATA block.""" PREFIX = '<![CDATA[' SUFFIX = ']]>' class ProcessingInstruction(PreformattedString): """A SGML processing instruction.""" PREFIX = '<?' SUFFIX = '>' class XMLProcessingInstruction(ProcessingInstruction): """An XML processing instruction.""" PREFIX = '<?' SUFFIX = '?>' class Comment(PreformattedString): """An HTML or XML comment.""" PREFIX = '<!--' SUFFIX = '-->' class Declaration(PreformattedString): """An XML declaration.""" PREFIX = '<?' SUFFIX = '?>' class Doctype(PreformattedString): """A document type declaration.""" @classmethod def for_name_and_ids(cls, name, pub_id, system_id): """Generate an appropriate document type declaration for a given public ID and system ID. :param name: The name of the document's root element, e.g. 'html'. :param pub_id: The Formal Public Identifier for this document type, e.g. '-//W3C//DTD XHTML 1.1//EN' :param system_id: The system identifier for this document type, e.g. 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd' :return: A Doctype. """ value = name or '' if pub_id is not None: value += ' PUBLIC "%s"' % pub_id if system_id is not None: value += ' "%s"' % system_id elif system_id is not None: value += ' SYSTEM "%s"' % system_id return Doctype(value) PREFIX = '<!DOCTYPE ' SUFFIX = '>\n' class Stylesheet(NavigableString): """A NavigableString representing an stylesheet (probably CSS). Used to distinguish embedded stylesheets from textual content. """ pass class Script(NavigableString): """A NavigableString representing an executable script (probably Javascript). Used to distinguish executable code from textual content. """ pass class TemplateString(NavigableString): """A NavigableString representing a string found inside an HTML template embedded in a larger document. Used to distinguish such strings from the main body of the document. """ pass class Tag(PageElement): """Represents an HTML or XML tag that is part of a parse tree, along with its attributes and contents. When Beautiful Soup parses the markup <b>penguin</b>, it will create a Tag object representing the <b> tag. """ def __init__(self, parser=None, builder=None, name=None, namespace=None, prefix=None, attrs=None, parent=None, previous=None, is_xml=None, sourceline=None, sourcepos=None, can_be_empty_element=None, cdata_list_attributes=None, preserve_whitespace_tags=None ): """Basic constructor. :param parser: A BeautifulSoup object. :param builder: A TreeBuilder. :param name: The name of the tag. :param namespace: The URI of this Tag's XML namespace, if any. :param prefix: The prefix for this Tag's XML namespace, if any. :param attrs: A dictionary of this Tag's attribute values. :param parent: The PageElement to use as this Tag's parent. :param previous: The PageElement that was parsed immediately before this tag. :param is_xml: If True, this is an XML tag. Otherwise, this is an HTML tag. :param sourceline: The line number where this tag was found in its source document. :param sourcepos: The character position within `sourceline` where this tag was found. :param can_be_empty_element: If True, this tag should be represented as <tag/>. If False, this tag should be represented as <tag></tag>. :param cdata_list_attributes: A list of attributes whose values should be treated as CDATA if they ever show up on this tag. :param preserve_whitespace_tags: A list of tag names whose contents should have their whitespace preserved. """ if parser is None: self.parser_class = None else: # We don't actually store the parser object: that lets extracted # chunks be garbage-collected. self.parser_class = parser.__class__ if name is None: raise ValueError("No value provided for new tag's name.") self.name = name self.namespace = namespace self.prefix = prefix if ((not builder or builder.store_line_numbers) and (sourceline is not None or sourcepos is not None)): self.sourceline = sourceline self.sourcepos = sourcepos if attrs is None: attrs = {} elif attrs: if builder is not None and builder.cdata_list_attributes: attrs = builder._replace_cdata_list_attribute_values( self.name, attrs) else: attrs = dict(attrs) else: attrs = dict(attrs) # If possible, determine ahead of time whether this tag is an # XML tag. if builder: self.known_xml = builder.is_xml else: self.known_xml = is_xml self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False if builder is None: # In the absence of a TreeBuilder, use whatever values were # passed in here. They're probably None, unless this is a copy of some # other tag. self.can_be_empty_element = can_be_empty_element self.cdata_list_attributes = cdata_list_attributes self.preserve_whitespace_tags = preserve_whitespace_tags else: # Set up any substitutions for this tag, such as the charset in a META tag. builder.set_up_substitutions(self) # Ask the TreeBuilder whether this tag might be an empty-element tag. self.can_be_empty_element = builder.can_be_empty_element(name) # Keep track of the list of attributes of this tag that # might need to be treated as a list. # # For performance reasons, we store the whole data structure # rather than asking the question of every tag. Asking would # require building a new data structure every time, and # (unlike can_be_empty_element), we almost never need # to check this. self.cdata_list_attributes = builder.cdata_list_attributes # Keep track of the names that might cause this tag to be treated as a # whitespace-preserved tag. self.preserve_whitespace_tags = builder.preserve_whitespace_tags parserClass = _alias("parser_class") # BS3 def __copy__(self): """A copy of a Tag is a new Tag, unconnected to the parse tree. Its contents are a copy of the old Tag's contents. """ clone = type(self)( None, self.builder, self.name, self.namespace, self.prefix, self.attrs, is_xml=self._is_xml, sourceline=self.sourceline, sourcepos=self.sourcepos, can_be_empty_element=self.can_be_empty_element, cdata_list_attributes=self.cdata_list_attributes, preserve_whitespace_tags=self.preserve_whitespace_tags ) for attr in ('can_be_empty_element', 'hidden'): setattr(clone, attr, getattr(self, attr)) for child in self.contents: clone.append(child.__copy__()) return clone @property def is_empty_element(self): """Is this tag an empty-element tag? (aka a self-closing tag) A tag that has contents is never an empty-element tag. A tag that has no contents may or may not be an empty-element tag. It depends on the builder used to create the tag. If the builder has a designated list of empty-element tags, then only a tag whose name shows up in that list is considered an empty-element tag. If the builder has no designated list of empty-element tags, then any tag with no contents is an empty-element tag. """ return len(self.contents) == 0 and self.can_be_empty_element isSelfClosing = is_empty_element # BS3 @property def string(self): """Convenience property to get the single string within this PageElement. TODO It might make sense to have NavigableString.string return itself. :return: If this element has a single string child, return value is that string. If this element has one child tag, return value is the 'string' attribute of the child tag, recursively. If this element is itself a string, has no children, or has more than one child, return value is None. """ if len(self.contents) != 1: return None child = self.contents[0] if isinstance(child, NavigableString): return child return child.string @string.setter def string(self, string): """Replace this PageElement's contents with `string`.""" self.clear() self.append(string.__class__(string)) def _all_strings(self, strip=False, types=(NavigableString, CData)): """Yield all strings of certain classes, possibly stripping them. :param strip: If True, all strings will be stripped before being yielded. :types: A tuple of NavigableString subclasses. Any strings of a subclass not found in this list will be ignored. By default, this means only NavigableString and CData objects will be considered. So no comments, processing instructions, etc. :yield: A sequence of strings. """ for descendant in self.descendants: if ( (types is None and not isinstance(descendant, NavigableString)) or (types is not None and type(descendant) not in types)): continue if strip: descendant = descendant.strip() if len(descendant) == 0: continue yield descendant strings = property(_all_strings) @property def stripped_strings(self): """Yield all strings in the document, stripping them first. :yield: A sequence of stripped strings. """ for string in self._all_strings(True): yield string def get_text(self, separator="", strip=False, types=(NavigableString, CData)): """Get all child strings, concatenated using the given separator. :param separator: Strings will be concatenated using this separator. :param strip: If True, strings will be stripped before being concatenated. :types: A tuple of NavigableString subclasses. Any strings of a subclass not found in this list will be ignored. By default, this means only NavigableString and CData objects will be considered. So no comments, processing instructions, stylesheets, etc. :return: A string. """ return separator.join([s for s in self._all_strings( strip, types=types)]) getText = get_text text = property(get_text) def decompose(self): """Recursively destroys this PageElement and its children. This element will be removed from the tree and wiped out; so will everything beneath it. The behavior of a decomposed PageElement is undefined and you should never use one for anything, but if you need to _check_ whether an element has been decomposed, you can use the `decomposed` property. """ self.extract() i = self while i is not None: n = i.next_element i.__dict__.clear() i.contents = [] i._decomposed = True i = n def clear(self, decompose=False): """Wipe out all children of this PageElement by calling extract() on them. :param decompose: If this is True, decompose() (a more destructive method) will be called instead of extract(). """ if decompose: for element in self.contents[:]: if isinstance(element, Tag): element.decompose() else: element.extract() else: for element in self.contents[:]: element.extract() def smooth(self): """Smooth out this element's children by consolidating consecutive strings. This makes pretty-printed output look more natural following a lot of operations that modified the tree. """ # Mark the first position of every pair of children that need # to be consolidated. Do this rather than making a copy of # self.contents, since in most cases very few strings will be # affected. marked = [] for i, a in enumerate(self.contents): if isinstance(a, Tag): # Recursively smooth children. a.smooth() if i == len(self.contents)-1: # This is the last item in .contents, and it's not a # tag. There's no chance it needs any work. continue b = self.contents[i+1] if (isinstance(a, NavigableString) and isinstance(b, NavigableString) and not isinstance(a, PreformattedString) and not isinstance(b, PreformattedString) ): marked.append(i) # Go over the marked positions in reverse order, so that # removing items from .contents won't affect the remaining # positions. for i in reversed(marked): a = self.contents[i] b = self.contents[i+1] b.extract() n = NavigableString(a+b) a.replace_with(n) def index(self, element): """Find the index of a child by identity, not value. Avoids issues with tag.contents.index(element) getting the index of equal elements. :param element: Look for this PageElement in `self.contents`. """ for i, child in enumerate(self.contents): if child is element: return i raise ValueError("Tag.index: element not in tag") def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self.attrs.get(key, default) def get_attribute_list(self, key, default=None): """The same as get(), but always returns a list. :param key: The attribute to look for. :param default: Use this value if the attribute is not present on this PageElement. :return: A list of values, probably containing only a single value. """ value = self.get(key, default) if not isinstance(value, list): value = [value] return value def has_attr(self, key): """Does this PageElement have an attribute with the given name?""" return key in self.attrs def __hash__(self): return str(self).__hash__() def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the Tag, and throws an exception if it's not there.""" return self.attrs[key] def __iter__(self): "Iterating over a Tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a Tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __bool__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self.attrs[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." self.attrs.pop(key, None) def __call__(self, *args, **kwargs): """Calling a Tag like a function is the same as calling its find_all() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return self.find_all(*args, **kwargs) def __getattr__(self, tag): """Calling tag.subtag is the same as calling tag.find(name="subtag")""" #print("Getattr %s.%s" % (self.__class__, tag)) if len(tag) > 3 and tag.endswith('Tag'): # BS3: soup.aTag -> "soup.find("a") tag_name = tag[:-3] warnings.warn( '.%(name)sTag is deprecated, use .find("%(name)s") instead. If you really were looking for a tag called %(name)sTag, use .find("%(name)sTag")' % dict( name=tag_name ) ) return self.find(tag_name) # We special case contents to avoid recursion. elif not tag.startswith("__") and not tag == "contents": return self.find(tag) raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__, tag)) def __eq__(self, other): """Returns true iff this Tag has the same name, the same attributes, and the same contents (recursively) as `other`.""" if self is other: return True if (not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other)): return False for i, my_child in enumerate(self.contents): if my_child != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this Tag is not identical to `other`, as defined in __eq__.""" return not self == other def __repr__(self, encoding="unicode-escape"): """Renders this PageElement as a string. :param encoding: The encoding to use (Python 2 only). :return: Under Python 2, a bytestring; under Python 3, a Unicode string. """ if PY3K: # "The return value must be a string object", i.e. Unicode return self.decode() else: # "The return value must be a string object", i.e. a bytestring. # By convention, the return value of __repr__ should also be # an ASCII string. return self.encode(encoding) def __unicode__(self): """Renders this PageElement as a Unicode string.""" return self.decode() def __str__(self): """Renders this PageElement as a generic string. :return: Under Python 2, a UTF-8 bytestring; under Python 3, a Unicode string. """ if PY3K: return self.decode() else: return self.encode() if PY3K: __str__ = __repr__ = __unicode__ def encode(self, encoding=DEFAULT_OUTPUT_ENCODING, indent_level=None, formatter="minimal", errors="xmlcharrefreplace"): """Render a bytestring representation of this PageElement and its contents. :param encoding: The destination encoding. :param indent_level: Each line of the rendering will be indented this many spaces. Used internally in recursive calls while pretty-printing. :param formatter: A Formatter object, or a string naming one of the standard formatters. :param errors: An error handling strategy such as 'xmlcharrefreplace'. This value is passed along into encode() and its value should be one of the constants defined by Python. :return: A bytestring. """ # Turn the data structure into Unicode, then encode the # Unicode. u = self.decode(indent_level, encoding, formatter) return u.encode(encoding, errors) def decode(self, indent_level=None, eventual_encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Render a Unicode representation of this PageElement and its contents. :param indent_level: Each line of the rendering will be indented this many spaces. Used internally in recursive calls while pretty-printing. :param eventual_encoding: The tag is destined to be encoded into this encoding. This method is _not_ responsible for performing that encoding. This information is passed in so that it can be substituted in if the document contains a <META> tag that mentions the document's encoding. :param formatter: A Formatter object, or a string naming one of the standard formatters. """ # First off, turn a non-Formatter `formatter` into a Formatter # object. This will stop the lookup from happening over and # over again. if not isinstance(formatter, Formatter): formatter = self.formatter_for_name(formatter) attributes = formatter.attributes(self) attrs = [] for key, val in attributes: if val is None: decoded = key else: if isinstance(val, list) or isinstance(val, tuple): val = ' '.join(val) elif not isinstance(val, str): val = str(val) elif ( isinstance(val, AttributeValueWithCharsetSubstitution) and eventual_encoding is not None ): val = val.encode(eventual_encoding) text = formatter.attribute_value(val) decoded = ( str(key) + '=' + formatter.quoted_attribute_value(text)) attrs.append(decoded) close = '' closeTag = '' prefix = '' if self.prefix: prefix = self.prefix + ":" if self.is_empty_element: close = formatter.void_element_close_prefix or '' else: closeTag = '</%s%s>' % (prefix, self.name) pretty_print = self._should_pretty_print(indent_level) space = '' indent_space = '' if indent_level is not None: indent_space = (' ' * (indent_level - 1)) if pretty_print: space = indent_space indent_contents = indent_level + 1 else: indent_contents = None contents = self.decode_contents( indent_contents, eventual_encoding, formatter ) if self.hidden: # This is the 'document root' object. s = contents else: s = [] attribute_string = '' if attrs: attribute_string = ' ' + ' '.join(attrs) if indent_level is not None: # Even if this particular tag is not pretty-printed, # we should indent up to the start of the tag. s.append(indent_space) s.append('<%s%s%s%s>' % ( prefix, self.name, attribute_string, close)) if pretty_print: s.append("\n") s.append(contents) if pretty_print and contents and contents[-1] != "\n": s.append("\n") if pretty_print and closeTag: s.append(space) s.append(closeTag) if indent_level is not None and closeTag and self.next_sibling: # Even if this particular tag is not pretty-printed, # we're now done with the tag, and we should add a # newline if appropriate. s.append("\n") s = ''.join(s) return s def _should_pretty_print(self, indent_level): """Should this tag be pretty-printed? Most of them should, but some (such as <pre> in HTML documents) should not. """ return ( indent_level is not None and ( not self.preserve_whitespace_tags or self.name not in self.preserve_whitespace_tags ) ) def prettify(self, encoding=None, formatter="minimal"): """Pretty-print this PageElement as a string. :param encoding: The eventual encoding of the string. If this is None, a Unicode string will be returned. :param formatter: A Formatter object, or a string naming one of the standard formatters. :return: A Unicode string (if encoding==None) or a bytestring (otherwise). """ if encoding is None: return self.decode(True, formatter=formatter) else: return self.encode(encoding, True, formatter=formatter) def decode_contents(self, indent_level=None, eventual_encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Renders the contents of this tag as a Unicode string. :param indent_level: Each line of the rendering will be indented this many spaces. Used internally in recursive calls while pretty-printing. :param eventual_encoding: The tag is destined to be encoded into this encoding. decode_contents() is _not_ responsible for performing that encoding. This information is passed in so that it can be substituted in if the document contains a <META> tag that mentions the document's encoding. :param formatter: A Formatter object, or a string naming one of the standard Formatters. """ # First off, turn a string formatter into a Formatter object. This # will stop the lookup from happening over and over again. if not isinstance(formatter, Formatter): formatter = self.formatter_for_name(formatter) pretty_print = (indent_level is not None) s = [] for c in self: text = None if isinstance(c, NavigableString): text = c.output_ready(formatter) elif isinstance(c, Tag): s.append(c.decode(indent_level, eventual_encoding, formatter)) preserve_whitespace = ( self.preserve_whitespace_tags and self.name in self.preserve_whitespace_tags ) if text and indent_level and not preserve_whitespace: text = text.strip() if text: if pretty_print and not preserve_whitespace: s.append(" " * (indent_level - 1)) s.append(text) if pretty_print and not preserve_whitespace: s.append("\n") return ''.join(s) def encode_contents( self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Renders the contents of this PageElement as a bytestring. :param indent_level: Each line of the rendering will be indented this many spaces. Used internally in recursive calls while pretty-printing. :param eventual_encoding: The bytestring will be in this encoding. :param formatter: A Formatter object, or a string naming one of the standard Formatters. :return: A bytestring. """ contents = self.decode_contents(indent_level, encoding, formatter) return contents.encode(encoding) # Old method for BS3 compatibility def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Deprecated method for BS3 compatibility.""" if not prettyPrint: indentLevel = None return self.encode_contents( indent_level=indentLevel, encoding=encoding) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Look in the children of this PageElement and find the first PageElement that matches the given criteria. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param recursive: If this is True, find() will perform a recursive search of this PageElement's children. Otherwise, only the direct children will be considered. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A PageElement. :rtype: bs4.element.Tag | bs4.element.NavigableString """ r = None l = self.find_all(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find #BS2 def find_all(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Look in the children of this PageElement and find all PageElements that match the given criteria. All find_* methods take a common set of arguments. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param recursive: If this is True, find_all() will perform a recursive search of this PageElement's children. Otherwise, only the direct children will be considered. :param limit: Stop looking after finding this many results. :kwargs: A dictionary of filters on attribute values. :return: A ResultSet of PageElements. :rtype: bs4.element.ResultSet """ generator = self.descendants if not recursive: generator = self.children return self._find_all(name, attrs, text, limit, generator, **kwargs) findAll = find_all # BS3 findChildren = find_all # BS2 #Generator methods @property def children(self): """Iterate over all direct children of this PageElement. :yield: A sequence of PageElements. """ # return iter() to make the purpose of the method clear return iter(self.contents) # XXX This seems to be untested. @property def descendants(self): """Iterate over all children of this PageElement in a breadth-first sequence. :yield: A sequence of PageElements. """ if not len(self.contents): return stopNode = self._last_descendant().next_element current = self.contents[0] while current is not stopNode: yield current current = current.next_element # CSS selector code def select_one(self, selector, namespaces=None, **kwargs): """Perform a CSS selection operation on the current element. :param selector: A CSS selector. :param namespaces: A dictionary mapping namespace prefixes used in the CSS selector to namespace URIs. By default, Beautiful Soup will use the prefixes it encountered while parsing the document. :param kwargs: Keyword arguments to be passed into SoupSieve's soupsieve.select() method. :return: A Tag. :rtype: bs4.element.Tag """ value = self.select(selector, namespaces, 1, **kwargs) if value: return value[0] return None def select(self, selector, namespaces=None, limit=None, **kwargs): """Perform a CSS selection operation on the current element. This uses the SoupSieve library. :param selector: A string containing a CSS selector. :param namespaces: A dictionary mapping namespace prefixes used in the CSS selector to namespace URIs. By default, Beautiful Soup will use the prefixes it encountered while parsing the document. :param limit: After finding this number of results, stop looking. :param kwargs: Keyword arguments to be passed into SoupSieve's soupsieve.select() method. :return: A ResultSet of Tags. :rtype: bs4.element.ResultSet """ if namespaces is None: namespaces = self._namespaces if limit is None: limit = 0 if soupsieve is None: raise NotImplementedError( "Cannot execute CSS selectors because the soupsieve package is not installed." ) results = soupsieve.select(selector, self, namespaces, limit, **kwargs) # We do this because it's more consistent and because # ResultSet.__getattr__ has a helpful error message. return ResultSet(None, results) # Old names for backwards compatibility def childGenerator(self): """Deprecated generator.""" return self.children def recursiveChildGenerator(self): """Deprecated generator.""" return self.descendants def has_key(self, key): """Deprecated method. This was kind of misleading because has_key() (attributes) was different from __in__ (contents). has_key() is gone in Python 3, anyway. """ warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % ( key)) return self.has_attr(key) # Next, a couple classes to represent queries and their results. class SoupStrainer(object): """Encapsulates a number of ways of matching a markup element (tag or string). This is primarily used to underpin the find_* methods, but you can create one yourself and pass it in as `parse_only` to the `BeautifulSoup` constructor, to parse a subset of a large document. """ def __init__(self, name=None, attrs={}, text=None, **kwargs): """Constructor. The SoupStrainer constructor takes the same arguments passed into the find_* methods. See the online documentation for detailed explanations. :param name: A filter on tag name. :param attrs: A dictionary of filters on attribute values. :param text: A filter for a NavigableString with specific text. :kwargs: A dictionary of filters on attribute values. """ self.name = self._normalize_search_value(name) if not isinstance(attrs, dict): # Treat a non-dict value for attrs as a search for the 'class' # attribute. kwargs['class'] = attrs attrs = None if 'class_' in kwargs: # Treat class_="foo" as a search for the 'class' # attribute, overriding any non-dict value for attrs. kwargs['class'] = kwargs['class_'] del kwargs['class_'] if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs normalized_attrs = {} for key, value in list(attrs.items()): normalized_attrs[key] = self._normalize_search_value(value) self.attrs = normalized_attrs self.text = self._normalize_search_value(text) def _normalize_search_value(self, value): # Leave it alone if it's a Unicode string, a callable, a # regular expression, a boolean, or None. if (isinstance(value, str) or isinstance(value, Callable) or hasattr(value, 'match') or isinstance(value, bool) or value is None): return value # If it's a bytestring, convert it to Unicode, treating it as UTF-8. if isinstance(value, bytes): return value.decode("utf8") # If it's listlike, convert it into a list of strings. if hasattr(value, '__iter__'): new_value = [] for v in value: if (hasattr(v, '__iter__') and not isinstance(v, bytes) and not isinstance(v, str)): # This is almost certainly the user's mistake. In the # interests of avoiding infinite loops, we'll let # it through as-is rather than doing a recursive call. new_value.append(v) else: new_value.append(self._normalize_search_value(v)) return new_value # Otherwise, convert it into a Unicode string. # The unicode(str()) thing is so this will do the same thing on Python 2 # and Python 3. return str(str(value)) def __str__(self): """A human-readable representation of this SoupStrainer.""" if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def search_tag(self, markup_name=None, markup_attrs={}): """Check whether a Tag with the given name and attributes would match this SoupStrainer. Used prospectively to decide whether to even bother creating a Tag object. :param markup_name: A tag name as found in some markup. :param markup_attrs: A dictionary of attributes as found in some markup. :return: True if the prospective tag would match this SoupStrainer; False otherwise. """ found = None markup = None if isinstance(markup_name, Tag): markup = markup_name markup_attrs = markup call_function_with_tag_data = ( isinstance(self.name, Callable) and not isinstance(markup_name, Tag)) if ((not self.name) or call_function_with_tag_data or (markup and self._matches(markup, self.name)) or (not markup and self._matches(markup_name, self.name))): if call_function_with_tag_data: match = self.name(markup_name, markup_attrs) else: match = True markup_attr_map = None for attr, match_against in list(self.attrs.items()): if not markup_attr_map: if hasattr(markup_attrs, 'get'): markup_attr_map = markup_attrs else: markup_attr_map = {} for k, v in markup_attrs: markup_attr_map[k] = v attr_value = markup_attr_map.get(attr) if not self._matches(attr_value, match_against): match = False break if match: if markup: found = markup else: found = markup_name if found and self.text and not self._matches(found.string, self.text): found = None return found # For BS3 compatibility. searchTag = search_tag def search(self, markup): """Find all items in `markup` that match this SoupStrainer. Used by the core _find_all() method, which is ultimately called by all find_* methods. :param markup: A PageElement or a list of them. """ # print('looking for %s in %s' % (self, markup)) found = None # If given a list of items, scan it for a text element that # matches. if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, str)): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text or self.name or self.attrs: found = self.search_tag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isinstance(markup, str): if not self.name and not self.attrs and self._matches(markup, self.text): found = markup else: raise Exception( "I don't know how to match against a %s" % markup.__class__) return found def _matches(self, markup, match_against, already_tried=None): # print(u"Matching %s against %s" % (markup, match_against)) result = False if isinstance(markup, list) or isinstance(markup, tuple): # This should only happen when searching a multi-valued attribute # like 'class'. for item in markup: if self._matches(item, match_against): return True # We didn't match any particular value of the multivalue # attribute, but maybe we match the attribute value when # considered as a string. if self._matches(' '.join(markup), match_against): return True return False if match_against is True: # True matches any non-None value. return markup is not None if isinstance(match_against, Callable): return match_against(markup) # Custom callables take the tag as an argument, but all # other ways of matching match the tag name as a string. original_markup = markup if isinstance(markup, Tag): markup = markup.name # Ensure that `markup` is either a Unicode string, or None. markup = self._normalize_search_value(markup) if markup is None: # None matches None, False, an empty string, an empty list, and so on. return not match_against if (hasattr(match_against, '__iter__') and not isinstance(match_against, str)): # We're asked to match against an iterable of items. # The markup must be match at least one item in the # iterable. We'll try each one in turn. # # To avoid infinite recursion we need to keep track of # items we've already seen. if not already_tried: already_tried = set() for item in match_against: if item.__hash__: key = item else: key = id(item) if key in already_tried: continue else: already_tried.add(key) if self._matches(original_markup, item, already_tried): return True else: return False # Beyond this point we might need to run the test twice: once against # the tag's name and once against its prefixed name. match = False if not match and isinstance(match_against, str): # Exact string match match = markup == match_against if not match and hasattr(match_against, 'search'): # Regexp match return match_against.search(markup) if (not match and isinstance(original_markup, Tag) and original_markup.prefix): # Try the whole thing again with the prefixed tag name. return self._matches( original_markup.prefix + ':' + original_markup.name, match_against ) return match class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source, result=()): """Constructor. :param source: A SoupStrainer. :param result: A list of PageElements. """ super(ResultSet, self).__init__(result) self.source = source def __getattr__(self, key): """Raise a helpful exception to explain a common code fix.""" raise AttributeError( "ResultSet object has no attribute '%s'. You're probably treating a list of elements like a single element. Did you call find_all() when you meant to call find()?" % key )
codeparrot/github-code-clean
# # gPrime - A web-based genealogy program # # Copyright (C) 2011 Michiel D. Nauta # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Unittest that tests the code involved in merging """ import unittest from .. import (Person, Surname, Name, NameType, Family, FamilyRelType, Event, EventType, Source, Place, PlaceName, Citation, Date, Repository, RepositoryType, Media, Note, NoteType, StyledText, StyledTextTag, StyledTextTagType, Tag, ChildRef, ChildRefType, Attribute, MediaRef, AttributeType, Url, UrlType, Address, EventRef, EventRoleType, RepoRef, FamilyRelType, LdsOrd, MediaRef, PersonRef, PlaceType, SrcAttribute, SrcAttributeType) from ..privacybase import PrivacyBase from ..urlbase import UrlBase from ..addressbase import AddressBase from ..attrbase import AttributeBase from ..ldsordbase import LdsOrdBase from ..mediabase import MediaBase from ..notebase import NoteBase from ..citationbase import CitationBase from ..surnamebase import SurnameBase from ..tagbase import TagBase from ..const import IDENTICAL, EQUAL, DIFFERENT class PrivacyBaseTest: def test_privacy_merge(self): self.assertEqual(self.phoenix.to_struct(), self.titanic.to_struct()) self.titanic.set_privacy(True) self.ref_obj.set_privacy(True) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) class NoteBaseTest: def test_note_merge(self): note_handle = '123456' self.titanic.add_note(note_handle) self.ref_obj.add_note(note_handle) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) class CitationBaseTest: def test_citation_merge(self): citation = Citation() citation.set_reference_handle('123456') citation.set_page('p.10') self.titanic.add_citation(citation.handle) self.ref_obj.add_citation(citation.handle) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) class MediaBaseTest: def test_media_merge(self): mediaref = MediaRef() mediaref.set_reference_handle('123456') self.titanic.add_media_reference(mediaref) self.ref_obj.add_media_reference(mediaref) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) class AttrBaseTest: def test_attribute_merge(self): attr = Attribute() attr.set_type(AttributeType.AGE) attr.set_value(10) self.titanic.add_attribute(attr) self.ref_obj.add_attribute(attr) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) class UrlBaseTest: def test_url_merge(self): url = Url() url.set_path('http://example.com') self.titanic.add_url(url) self.ref_obj.add_url(url) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) #=========================================================== class PrivacyCheck(unittest.TestCase): def test_privacy(self): known_values = ( (False, False, False), (True, False, True), (False, True, True), (True, True, True) ) phoenix = PrivacyBase() titanic = PrivacyBase() for value1, value2, value_merge in known_values: phoenix.set_privacy(value1) titanic.set_privacy(value2) phoenix._merge_privacy(titanic) self.assertEqual(phoenix.get_privacy(), value_merge) class UrlCheck(unittest.TestCase, PrivacyBaseTest): def setUp(self): self.phoenix = Url() self.phoenix.set_path('http://example1.com') self.phoenix.set_description('hello world') self.phoenix.set_type(UrlType.WEB_HOME) self.titanic = Url(self.phoenix) self.ref_obj = Url(self.phoenix) def test_path_equivalence(self): self.assertEqual(self.phoenix.is_equivalent(self.titanic), IDENTICAL) self.titanic.set_path('http://example2.com') self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_type_equivalence(self): self.titanic.set_type(UrlType.UNKNOWN) self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_desc_equivalence(self): self.titanic.set_description('goodby') self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_privacy_equivalence(self): self.titanic.set_privacy(True) self.assertEqual(self.phoenix.is_equivalent(self.titanic), EQUAL) def test_merge_path(self): self.titanic.set_path('example2.com') self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.is_equal(self.ref_obj), True) class UrlBaseCheck(unittest.TestCase): def setUp(self): self.phoenix = UrlBase() self.titanic = UrlBase() url = Url() url.set_path('example.com') self.phoenix.add_url(url) def test_identical(self): ref_url_list = UrlBase(self.phoenix) url = Url() url.set_path('example.com') self.titanic.add_url(url) self.phoenix._merge_url_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), ref_url_list.to_struct()) def test_equal(self): ref_url_list = UrlBase(self.phoenix) ref_url = ref_url_list.get_url_list()[0] ref_url.set_privacy(True) url = Url() url.set_path('example.com') url.set_privacy(True) self.titanic.add_url(url) self.phoenix._merge_url_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), ref_url_list.to_struct()) def test_different(self): ref_url_list = UrlBase(self.phoenix) url = Url() url.set_path('other.com') ref_url_list.add_url(url) self.titanic.add_url(url) self.phoenix._merge_url_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), ref_url_list.to_struct()) class AddressCheck(unittest.TestCase, PrivacyBaseTest, NoteBaseTest, CitationBaseTest): def setUp(self): self.phoenix = Address() self.phoenix.set_city('Amsterdam') self.titanic = Address(self.phoenix) self.ref_obj = Address(self.phoenix) def test_location_equivalence(self): self.assertEqual(self.phoenix.is_equivalent(self.titanic), IDENTICAL) self.titanic.set_city('Rotterdam') self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_date_equivalence(self): date = Date() date.set_yr_mon_day(1999,12,5) self.titanic.set_date_object(date) self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_privacy_equivalence(self): self.titanic.set_privacy(True) self.assertEqual(self.phoenix.is_equivalent(self.titanic), EQUAL) def test_location_merge(self): self.titanic.set_city('Rotterdam') self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.is_equal(self.ref_obj), True) class AddressBaseCheck(unittest.TestCase): def setUp(self): self.phoenix = AddressBase() self.titanic = AddressBase() self.ref_list = AddressBase() address = Address() address.set_city('Amsterdam') self.phoenix.add_address(address) def test_identical(self): address = Address() address.set_city('Amsterdam') self.ref_list.add_address(address) self.titanic.add_address(address) self.phoenix._merge_address_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_equal(self): note_handle = '123456' address = Address() address.set_city('Amsterdam') address.add_note(note_handle) self.titanic.add_address(address) self.ref_list.add_address(address) self.phoenix._merge_address_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_different(self): address = Address() address.set_country('Netherlands') self.titanic.add_address(address) self.ref_list = AddressBase(self.phoenix) self.ref_list.add_address(address) self.phoenix._merge_address_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) class AttributeCheck(unittest.TestCase, PrivacyBaseTest, NoteBaseTest, CitationBaseTest): def setUp(self): self.phoenix = Attribute() self.phoenix.set_type(AttributeType.AGE) self.phoenix.set_value(10) self.titanic = Attribute(self.phoenix) self.ref_obj = Attribute(self.phoenix) def test_type_equivalence(self): self.assertEqual(self.phoenix.is_equivalent(self.titanic), IDENTICAL) self.titanic.set_type(AttributeType.MOTHER_AGE) self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_value_equivalence(self): self.titanic.set_value(12) self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_privacy_equivalence(self): self.titanic.set_privacy(True) self.assertEqual(self.phoenix.is_equivalent(self.titanic), EQUAL) def test_value_merge(self): self.titanic.set_value(12) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.is_equal(self.ref_obj), True) class AttributeBaseCheck(unittest.TestCase): def setUp(self): self.phoenix = AttributeBase() self.titanic = AttributeBase() self.ref_list = AttributeBase() attr = Attribute() attr.set_type(AttributeType.AGE) attr.set_value(10) self.phoenix.add_attribute(attr) def test_identical(self): attr = Attribute() attr.set_type(AttributeType.AGE) attr.set_value(10) self.ref_list.add_attribute(attr) self.titanic.add_attribute(attr) self.phoenix._merge_attribute_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_equal(self): note_handle = '123456' attr = Attribute() attr.set_type(AttributeType.AGE) attr.set_value(10) attr.add_note(note_handle) self.titanic.add_attribute(attr) self.ref_list.add_attribute(attr) self.phoenix._merge_attribute_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_different(self): attr = Attribute() attr.set_type(AttributeType.AGE) attr.set_value(12) self.titanic.add_attribute(attr) self.ref_list = AttributeBase(self.phoenix) self.ref_list.add_attribute(attr) self.phoenix._merge_attribute_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) class ChildRefCheck(unittest.TestCase, PrivacyBaseTest, NoteBaseTest, CitationBaseTest): def setUp(self): self.phoenix = ChildRef() self.phoenix.set_reference_handle('123456') self.phoenix.set_father_relation(ChildRefType.UNKNOWN) self.phoenix.set_mother_relation(ChildRefType.UNKNOWN) self.titanic = ChildRef() self.titanic.set_reference_handle('123456') self.titanic.set_father_relation(ChildRefType.UNKNOWN) self.titanic.set_mother_relation(ChildRefType.UNKNOWN) self.ref_obj = ChildRef() self.ref_obj.set_reference_handle('123456') self.ref_obj.set_father_relation(ChildRefType.UNKNOWN) self.ref_obj.set_mother_relation(ChildRefType.UNKNOWN) def test_handle_equivalence(self): self.assertEqual(self.phoenix.is_equivalent(self.titanic), IDENTICAL) self.titanic.set_reference_handle('654321') self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_privacy_equivalence(self): self.titanic.set_privacy(True) self.assertEqual(self.phoenix.is_equivalent(self.titanic), EQUAL) def test_mrel_merge(self): self.titanic.set_mother_relation(ChildRefType.BIRTH) self.ref_obj.set_mother_relation(ChildRefType.BIRTH) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.is_equal(self.ref_obj), True) def test_frel_merge(self): self.titanic.set_father_relation(ChildRefType.ADOPTED) self.ref_obj.set_father_relation(ChildRefType.ADOPTED) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.is_equal(self.ref_obj), True) class EventCheck(unittest.TestCase, PrivacyBaseTest, NoteBaseTest, CitationBaseTest, MediaBaseTest, AttrBaseTest): def setUp(self): self.phoenix = Event() self.phoenix.set_description("hello world") self.titanic = Event.from_struct(self.phoenix.to_struct()) self.ref_obj = Event.from_struct(self.phoenix.to_struct()) class EventRefCheck(unittest.TestCase, PrivacyBaseTest, NoteBaseTest, AttrBaseTest): def setUp(self): self.phoenix = EventRef() self.phoenix.set_reference_handle('123456') self.titanic = EventRef(self.phoenix) self.ref_obj = EventRef(self.phoenix) def test_handle_equivalence(self): self.assertEqual(self.phoenix.is_equivalent(self.titanic), IDENTICAL) self.titanic.set_reference_handle('654321') self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_role_equivalence(self): self.titanic.set_role(EventRoleType.WITNESS) self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_privacy_equivalence(self): self.titanic.set_privacy(True) self.assertEqual(self.phoenix.is_equivalent(self.titanic), EQUAL) def test_replace(self): attr1 = Attribute() attr1.set_type(AttributeType.AGE) attr1.set_value(10) citation1 = Citation() citation1.set_reference_handle('123456') citation1.set_page('p.10') citation2 = Citation() citation2.set_reference_handle('234567') citation2.set_page('p.20') attr1.add_citation(citation1.handle) attr1.add_citation(citation2.handle) attr2 = Attribute() attr2.set_type(AttributeType.AGE) attr2.set_value(10) citation3 = Citation() citation3.set_reference_handle('123456') citation3.set_page('p.10') citation4 = Citation() citation4.set_reference_handle('654321') citation4.set_page('p.20') attr2.add_citation(citation3.handle) attr2.add_citation(citation4.handle) self.phoenix.add_attribute(attr1) self.ref_obj.add_attribute(attr2) self.phoenix.replace_citation_references('234567','654321') self.assertTrue(self.phoenix.is_equal(self.ref_obj)) class FamilyCheck(unittest.TestCase, PrivacyBaseTest, NoteBaseTest, CitationBaseTest, MediaBaseTest, AttrBaseTest): def setUp(self): self.phoenix = Family() self.phoenix.set_father_handle('123456') self.phoenix.set_mother_handle('654321') self.phoenix.set_relationship(FamilyRelType.MARRIED) self.titanic = Family() self.titanic.set_father_handle('123456') self.titanic.set_mother_handle('654321') self.titanic.set_relationship(FamilyRelType.MARRIED) self.ref_obj = Family() self.ref_obj.set_father_handle('123456') self.ref_obj.set_mother_handle('654321') self.ref_obj.set_relationship(FamilyRelType.MARRIED) def test_relation_merge(self): self.phoenix.set_relationship(FamilyRelType.UNKNOWN) self.titanic.set_relationship(FamilyRelType.UNMARRIED) self.ref_obj.set_relationship(FamilyRelType.UNMARRIED) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_eventref_merge(self): evtref = EventRef() evtref.set_role(EventRoleType.WITNESS) self.titanic.add_event_ref(evtref) self.ref_obj.add_event_ref(evtref) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_ldsord_merge(self): ldsord = LdsOrd() ldsord.set_temple('London') self.titanic.add_lds_ord(ldsord) self.ref_obj.add_lds_ord(ldsord) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_childref_merge(self): childref = ChildRef() childref.set_reference_handle('123456') self.titanic.add_child_ref(childref) self.ref_obj.add_child_ref(childref) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_mergechildref_identical(self): childref1 = ChildRef() childref1.set_reference_handle('123456') childref2 = ChildRef() childref2.set_reference_handle('123456') childref3 = ChildRef() childref3.set_reference_handle('123456') self.phoenix.add_child_ref(childref1) self.titanic.add_child_ref(childref2) self.ref_obj.add_child_ref(childref3) self.phoenix._merge_child_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_mergechildref_equal(self): childref1 = ChildRef() childref1.set_reference_handle('123456') childref2 = ChildRef() childref2.set_reference_handle('123456') childref2.add_note('N1') childref3 = ChildRef() childref3.set_reference_handle('123456') childref3.add_note('N1') self.phoenix.add_child_ref(childref1) self.titanic.add_child_ref(childref2) self.ref_obj.add_child_ref(childref3) self.phoenix._merge_child_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_mergechildref_different(self): childref1 = ChildRef() childref1.set_reference_handle('123456') childref2 = ChildRef() childref2.set_reference_handle('654321') childref3 = ChildRef() childref3.set_reference_handle('123456') childref4 = ChildRef() childref4.set_reference_handle('654321') self.phoenix.add_child_ref(childref1) self.titanic.add_child_ref(childref2) self.ref_obj.add_child_ref(childref3) self.ref_obj.add_child_ref(childref4) self.phoenix._merge_child_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_childref_absent(self): childref1 = ChildRef() childref1.set_reference_handle('234567') childref2 = ChildRef() childref2.set_reference_handle('345678') childref3 = ChildRef() childref3.set_reference_handle('765432') childref4 = ChildRef() childref4.set_reference_handle('345678') self.phoenix.add_child_ref(childref1) self.phoenix.add_child_ref(childref2) self.ref_obj.add_child_ref(childref3) self.ref_obj.add_child_ref(childref4) self.phoenix.replace_handle_reference('Person', '234567', '765432') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_childref_identical(self): childref1 = ChildRef() childref1.set_reference_handle('234567') childref2 = ChildRef() childref2.set_reference_handle('765432') childref3 = ChildRef() childref3.set_reference_handle('765432') self.phoenix.add_child_ref(childref1) self.phoenix.add_child_ref(childref2) self.ref_obj.add_child_ref(childref3) self.phoenix.replace_handle_reference('Person', '234567', '765432') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_childref_equal(self): childref1 = ChildRef() childref1.set_reference_handle('234567') childref1.set_privacy(True) childref2 = ChildRef() childref2.set_reference_handle('765432') childref3 = ChildRef() childref3.set_reference_handle('765432') childref3.set_privacy(True) self.phoenix.add_child_ref(childref1) self.phoenix.add_child_ref(childref2) self.ref_obj.add_child_ref(childref3) self.phoenix.replace_handle_reference('Person', '234567', '765432') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_childref_different(self): # impossible, is_equivalent is only DIFFERENT if handles differ. childref1 = ChildRef() childref1.set_reference_handle('234567') childref1.set_mother_relation('Adopted') childref2 = ChildRef() childref2.set_reference_handle('765432') childref3 = ChildRef() childref3.set_reference_handle('765432') self.phoenix.add_child_ref(childref1) self.phoenix.add_child_ref(childref2) self.ref_obj.add_child_ref(childref3) self.phoenix.replace_handle_reference('Person', '234567', '765432') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_mergeeventref_identical(self): eventref1 = EventRef() eventref1.set_role(EventRoleType.WITNESS) eventref2 = EventRef() eventref2.set_role(EventRoleType.WITNESS) eventref3 = EventRef() eventref3.set_role(EventRoleType.WITNESS) self.phoenix.add_event_ref(eventref1) self.titanic.add_event_ref(eventref2) self.ref_obj.add_event_ref(eventref3) self.phoenix._merge_event_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_mergeeventref_equal(self): eventref1 = EventRef() eventref1.set_role(EventRoleType.WITNESS) eventref2 = EventRef() eventref2.set_role(EventRoleType.WITNESS) eventref2.add_note('N1') eventref3 = EventRef() eventref3.set_role(EventRoleType.WITNESS) eventref3.add_note('N1') self.phoenix.add_event_ref(eventref1) self.titanic.add_event_ref(eventref2) self.ref_obj.add_event_ref(eventref3) self.phoenix._merge_event_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_mergeeventref_different(self): eventref1 = EventRef() eventref1.set_role(EventRoleType.WITNESS) eventref2 = EventRef() eventref2.set_role(EventRoleType.CLERGY) eventref3 = EventRef() eventref3.set_role(EventRoleType.WITNESS) eventref4 = EventRef() eventref4.set_role(EventRoleType.CLERGY) self.phoenix.add_event_ref(eventref1) self.titanic.add_event_ref(eventref2) self.ref_obj.add_event_ref(eventref3) self.ref_obj.add_event_ref(eventref4) self.phoenix._merge_event_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_event_absent(self): eventref1 = EventRef() eventref1.set_reference_handle('123456') eventref2 = EventRef() eventref2.set_reference_handle('234567') eventref3 = EventRef() eventref3.set_reference_handle('654321') eventref4 = EventRef() eventref4.set_reference_handle('234567') self.phoenix.add_event_ref(eventref1) self.phoenix.add_event_ref(eventref2) self.ref_obj.add_event_ref(eventref3) self.ref_obj.add_event_ref(eventref4) self.phoenix.replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_event_identical(self): eventref1 = EventRef() eventref1.set_reference_handle('123456') eventref2 = EventRef() eventref2.set_reference_handle('654321') eventref3 = EventRef() eventref3.set_reference_handle('654321') self.phoenix.add_event_ref(eventref1) self.phoenix.add_event_ref(eventref2) self.ref_obj.add_event_ref(eventref3) self.phoenix.replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_event_equal(self): eventref1 = EventRef() eventref1.set_reference_handle('123456') eventref1.set_privacy(True) eventref2 = EventRef() eventref2.set_reference_handle('654321') eventref3 = EventRef() eventref3.set_reference_handle('654321') eventref3.set_privacy(True) self.phoenix.add_event_ref(eventref1) self.phoenix.add_event_ref(eventref2) self.ref_obj.add_event_ref(eventref3) self.phoenix.replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_event_different(self): eventref1 = EventRef() eventref1.set_reference_handle('123456') eventref1.set_role(EventRoleType.WITNESS) eventref2 = EventRef() eventref2.set_reference_handle('654321') eventref3 = EventRef() eventref3.set_reference_handle('654321') eventref3.set_role(EventRoleType.WITNESS) eventref4 = EventRef() eventref4.set_reference_handle('654321') self.phoenix.add_event_ref(eventref1) self.phoenix.add_event_ref(eventref2) self.ref_obj.add_event_ref(eventref3) self.ref_obj.add_event_ref(eventref4) self.phoenix.replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_event_order_first(self): eventref1 = EventRef() eventref1.set_reference_handle('123456') eventref2 = EventRef() eventref2.set_reference_handle('234567') eventref3 = EventRef() eventref3.set_reference_handle('654321') eventref4 = EventRef() eventref4.set_reference_handle('123456') eventref5 = EventRef() eventref5.set_reference_handle('234567') self.phoenix.add_event_ref(eventref1) self.phoenix.add_event_ref(eventref2) self.phoenix.add_event_ref(eventref3) self.ref_obj.add_event_ref(eventref4) self.ref_obj.add_event_ref(eventref5) self.phoenix.replace_handle_reference('Event', '654321', '123456') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_event_order_last(self): eventref1 = EventRef() eventref1.set_reference_handle('123456') eventref2 = EventRef() eventref2.set_reference_handle('234567') eventref3 = EventRef() eventref3.set_reference_handle('654321') eventref4 = EventRef() eventref4.set_reference_handle('234567') eventref5 = EventRef() eventref5.set_reference_handle('654321') self.phoenix.add_event_ref(eventref1) self.phoenix.add_event_ref(eventref2) self.phoenix.add_event_ref(eventref3) self.ref_obj.add_event_ref(eventref4) self.ref_obj.add_event_ref(eventref5) self.phoenix.replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) class LdsordCheck(unittest.TestCase, PrivacyBaseTest, NoteBaseTest, CitationBaseTest): def setUp(self): self.phoenix = LdsOrd() self.phoenix.set_temple('London, England') self.titanic = LdsOrd(self.phoenix) self.ref_obj = LdsOrd(self.phoenix) def test_type_equivalence(self): self.assertEqual(self.phoenix.is_equivalent(self.titanic), IDENTICAL) self.titanic.set_type(LdsOrd.CONFIRMATION) self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_date_equivalence(self): date = Date() date.set_yr_mon_day(1999,12,5) self.titanic.set_date_object(date) self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_temple_equivalence(self): self.titanic.set_temple('Baton Rouge, Louisiana') self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_status_equivalence(self): self.titanic.set_status(LdsOrd.STATUS_CLEARED) self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_famc_equivalence(self): self.titanic.set_family_handle('F1') self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_privacy_equivalence(self): self.titanic.set_privacy(True) self.assertEqual(self.phoenix.is_equivalent(self.titanic), EQUAL) class LdsordBaseCheck(unittest.TestCase): def setUp(self): self.phoenix = LdsOrdBase() self.titanic = LdsOrdBase() self.ref_list = LdsOrdBase() ldsord = LdsOrd() ldsord.set_temple('London, England') self.phoenix.add_lds_ord(ldsord) def test_identical(self): ldsord = LdsOrd() ldsord.set_temple('London, England') self.titanic.add_lds_ord(ldsord) self.ref_list.add_lds_ord(ldsord) self.phoenix._merge_lds_ord_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_equal(self): ldsord = LdsOrd() ldsord.set_temple('London, England') ldsord.set_privacy(True) self.titanic.add_lds_ord(ldsord) self.ref_list.add_lds_ord(ldsord) self.phoenix._merge_lds_ord_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_different(self): ldsord = LdsOrd() ldsord.set_temple('Baton Rouge, Louisiana') self.titanic.add_lds_ord(ldsord) self.ref_list = LdsOrdBase(self.phoenix) self.ref_list.add_lds_ord(ldsord) self.phoenix._merge_lds_ord_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) class MediaBaseCheck(unittest.TestCase): def setUp(self): self.phoenix = MediaBase() self.titanic = MediaBase() self.ref_list = MediaBase() mediaref = MediaRef() mediaref.set_reference_handle('123456') mediaref.set_rectangle('10 10 90 90') self.phoenix.add_media_reference(mediaref) def test_merge_identical(self): mediaref = MediaRef() mediaref.set_reference_handle('123456') mediaref.set_rectangle('10 10 90 90') self.titanic.add_media_reference(mediaref) self.ref_list.add_media_reference(mediaref) self.phoenix._merge_media_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_merge_equal(self): mediaref = MediaRef() mediaref.set_reference_handle('123456') mediaref.set_rectangle('10 10 90 90') mediaref.set_privacy(True) self.titanic.add_media_reference(mediaref) self.ref_list.add_media_reference(mediaref) self.phoenix._merge_media_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_merge_different(self): mediaref1 = MediaRef() mediaref1.set_reference_handle('123456') mediaref1.set_rectangle('10 10 90 90') mediaref2 = MediaRef() mediaref2.set_reference_handle('123456') mediaref2.set_rectangle('20 10 90 90') self.titanic.add_media_reference(mediaref2) self.ref_list.add_media_reference(mediaref1) self.ref_list.add_media_reference(mediaref2) self.phoenix._merge_media_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_replace_absent(self): mediaref1 = MediaRef() mediaref1.set_reference_handle('654321') mediaref1.set_rectangle('10 10 90 90') self.ref_list.add_media_reference(mediaref1) self.phoenix.replace_media_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_replace_identical(self): mediaref1 = MediaRef() mediaref1.set_reference_handle('654321') mediaref1.set_rectangle('10 10 90 90') mediaref2 = MediaRef() mediaref2.set_reference_handle('654321') mediaref2.set_rectangle('10 10 90 90') self.phoenix.add_media_reference(mediaref1) self.ref_list.add_media_reference(mediaref2) self.phoenix.replace_media_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_replace_equal(self): mediaref1 = MediaRef() mediaref1.set_reference_handle('654321') mediaref1.set_rectangle('10 10 90 90') mediaref1.set_privacy(True) mediaref2 = MediaRef() mediaref2.set_reference_handle('654321') mediaref2.set_rectangle('10 10 90 90') mediaref2.set_privacy(True) self.phoenix.add_media_reference(mediaref1) self.ref_list.add_media_reference(mediaref2) self.phoenix.replace_media_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_replace_different(self): mediaref1 = MediaRef() mediaref1.set_reference_handle('654321') mediaref1.set_rectangle('20 20 90 90') mediaref2 = MediaRef() mediaref2.set_reference_handle('654321') mediaref2.set_rectangle('10 10 90 90') mediaref3 = MediaRef() mediaref3.set_reference_handle('654321') mediaref3.set_rectangle('20 20 90 90') self.phoenix.add_media_reference(mediaref1) self.ref_list.add_media_reference(mediaref2) self.ref_list.add_media_reference(mediaref3) self.phoenix.replace_media_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_replace_order_first(self): mediaref1 = MediaRef() mediaref1.set_reference_handle('234567') mediaref1.set_rectangle('10 10 90 90') mediaref2 = MediaRef() mediaref2.set_reference_handle('654321') mediaref2.set_rectangle('10 10 90 90') mediaref3 = MediaRef() mediaref3.set_reference_handle('123456') mediaref3.set_rectangle('10 10 90 90') mediaref4 = MediaRef() mediaref4.set_reference_handle('234567') mediaref4.set_rectangle('10 10 90 90') self.phoenix.add_media_reference(mediaref1) self.phoenix.add_media_reference(mediaref2) self.ref_list.add_media_reference(mediaref3) self.ref_list.add_media_reference(mediaref4) self.phoenix.replace_media_references('654321','123456') self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_replace_order_last(self): mediaref1 = MediaRef() mediaref1.set_reference_handle('234567') mediaref1.set_rectangle('10 10 90 90') mediaref2 = MediaRef() mediaref2.set_reference_handle('654321') mediaref2.set_rectangle('10 10 90 90') mediaref3 = MediaRef() mediaref3.set_reference_handle('234567') mediaref3.set_rectangle('10 10 90 90') mediaref4 = MediaRef() mediaref4.set_reference_handle('654321') mediaref4.set_rectangle('10 10 90 90') self.phoenix.add_media_reference(mediaref1) self.phoenix.add_media_reference(mediaref2) self.ref_list.add_media_reference(mediaref3) self.ref_list.add_media_reference(mediaref4) self.phoenix.replace_media_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) class MediaCheck(unittest.TestCase, PrivacyBaseTest, AttrBaseTest, NoteBaseTest, CitationBaseTest): def setUp(self): self.phoenix = Media() self.phoenix.set_path('example.png') self.titanic = Media.from_struct(self.phoenix.to_struct()) self.ref_obj = Media.from_struct(self.phoenix.to_struct()) class MediaRefCheck(unittest.TestCase, PrivacyBaseTest, AttrBaseTest, CitationBaseTest, NoteBaseTest): def setUp(self): self.phoenix = MediaRef() self.phoenix.set_rectangle("10 10 90 90") self.titanic = MediaRef(self.phoenix) self.ref_obj = MediaRef(self.phoenix) def test_ref_equivalence(self): self.assertEqual(self.phoenix.is_equivalent(self.titanic), IDENTICAL) self.titanic.set_reference_handle('123456') self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_rect_equivalence(self): self.titanic.set_rectangle("20 20 80 80") self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_privacy_equivalence(self): self.titanic.set_privacy(True) self.assertEqual(self.phoenix.is_equivalent(self.titanic), EQUAL) class NameCheck(unittest.TestCase, PrivacyBaseTest, NoteBaseTest, CitationBaseTest): def setUp(self): self.phoenix = Name() self.phoenix.set_first_name('Willem') surname = Surname() surname.set_surname("Oranje") self.phoenix.add_surname(surname) self.titanic = Name(self.phoenix) self.ref_obj = Name(self.phoenix) def test_datalist_equivalence(self): self.assertEqual(self.phoenix.is_equivalent(self.titanic), IDENTICAL) self.titanic.set_first_name('Maurits') self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_date_equivalence(self): date = Date() date.set_yr_mon_day(1999,12,5) self.titanic.set_date_object(date) self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_surname_equivalence(self): surname = Surname() surname.set_surname("Nassau") self.titanic.add_surname(surname) self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_privacy_equivalence(self): self.titanic.set_privacy(True) self.assertEqual(self.phoenix.is_equivalent(self.titanic), EQUAL) class NoteCheck(unittest.TestCase, PrivacyBaseTest): def setUp(self): self.phoenix = Note("hello world") self.titanic = Note("hello world") self.ref_obj = Note("hello world") class NoteBaseCheck(unittest.TestCase): def setUp(self): self.phoenix = NoteBase() self.titanic = NoteBase() note = Note("hello world") note.set_handle('123456') self.phoenix.add_note(note.get_handle()) def test_identical(self): ref_note_list = NoteBase(self.phoenix) self.titanic.add_note(self.phoenix.get_note_list()[0]) self.phoenix._merge_note_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), ref_note_list.to_struct()) def test_different(self): ref_note_list = NoteBase(self.phoenix) note = Note("note other") self.titanic.add_note(note.get_handle()) ref_note_list.add_note(note.get_handle()) self.phoenix._merge_note_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), ref_note_list.to_struct()) def test_replace_nonew(self): note = Note("note other") note.set_handle('654321') ref_note_list = NoteBase() ref_note_list.add_note(note.get_handle()) self.phoenix.replace_note_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), ref_note_list.to_struct()) def test_replace_newpresent(self): note = Note("note other") note.set_handle('654321') note2 = Note("yet another note") note2.set_handle('234567') self.phoenix.add_note(note2.get_handle()) self.phoenix.add_note(note.get_handle()) ref_note_list = NoteBase() ref_note_list.add_note(note2.get_handle()) ref_note_list.add_note(note.get_handle()) self.phoenix.replace_note_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), ref_note_list.to_struct()) def todo_test_replace_child(self): ref_note_list = NoteBase() note = Note("") note.set_handle('123456') ref_note_list.add_note(note.get_handle()) self.phoenix.replace_note_references('','') self.assertEqual(self.phoenix.to_struct(), ref_note_list.to_struct()) class PersonCheck(unittest.TestCase, PrivacyBaseTest, MediaBaseTest, AttrBaseTest, NoteBaseTest, CitationBaseTest): def setUp(self): self.phoenix = Person() name = Name() name.set_first_name('Adam') self.phoenix.set_primary_name(name) self.titanic = Person() self.titanic.set_primary_name(name) self.ref_obj = Person() self.ref_obj.set_primary_name(name) def test_replace_eventhandle_nonew(self): evtref = EventRef() evtref.set_reference_handle('123456') evtref2 = EventRef() evtref2.set_reference_handle('654321') self.phoenix.add_event_ref(evtref) self.ref_obj.add_event_ref(evtref2) self.phoenix._replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_eventhandle_identical(self): evtref = EventRef() evtref.set_reference_handle('123456') evtref2 = EventRef() evtref2.set_reference_handle('234567') evtref3 = EventRef() evtref3.set_reference_handle('654321') self.phoenix.add_event_ref(evtref) self.phoenix.add_event_ref(evtref2) self.phoenix.add_event_ref(evtref3) self.ref_obj.add_event_ref(evtref2) self.ref_obj.add_event_ref(evtref3) self.phoenix._replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_eventhandle_equal(self): evtref = EventRef() evtref.set_reference_handle('123456') evtref2 = EventRef() evtref2.set_reference_handle('234567') evtref3 = EventRef() evtref3.set_reference_handle('654321') evtref3.set_privacy(True) self.phoenix.add_event_ref(evtref) self.phoenix.add_event_ref(evtref2) self.phoenix.add_event_ref(evtref3) self.ref_obj.add_event_ref(evtref2) self.ref_obj.add_event_ref(evtref3) self.phoenix._replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_eventhandle_different(self): evtref = EventRef() evtref.set_reference_handle('123456') evtref2 = EventRef() evtref2.set_reference_handle('234567') evtref3 = EventRef() evtref3.set_reference_handle('654321') self.phoenix.add_event_ref(evtref) self.phoenix.add_event_ref(evtref2) self.ref_obj.add_event_ref(evtref3) self.ref_obj.add_event_ref(evtref2) self.phoenix._replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_birth_lower(self): evtref = EventRef() evtref.set_reference_handle('123456') evtref2 = EventRef() evtref2.set_reference_handle('654321') self.phoenix.add_event_ref(evtref) self.phoenix.add_event_ref(evtref2) self.phoenix.birth_ref_index = 2 self.ref_obj.add_event_ref(evtref2) self.ref_obj.birth_ref_index = 1 self.phoenix._replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_birth_minusone(self): evtref = EventRef() evtref.set_reference_handle('654321') evtref2 = EventRef() evtref2.set_reference_handle('123456') self.phoenix.add_event_ref(evtref) self.phoenix.add_event_ref(evtref2) self.phoenix.birth_ref_index = 1 self.ref_obj.add_event_ref(evtref2) self.ref_obj.birth_ref_index = -1 self.phoenix._replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_death_lower(self): evtref = EventRef() evtref.set_reference_handle('123456') evtref2 = EventRef() evtref2.set_reference_handle('654321') self.phoenix.add_event_ref(evtref) self.phoenix.add_event_ref(evtref2) self.phoenix.death_ref_index = 2 self.ref_obj.add_event_ref(evtref2) self.ref_obj.death_ref_index = 1 self.phoenix._replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_death_minusone(self): evtref = EventRef() evtref.set_reference_handle('654321') evtref2 = EventRef() evtref2.set_reference_handle('123456') self.phoenix.add_event_ref(evtref) self.phoenix.add_event_ref(evtref2) self.phoenix.death_ref_index = 1 self.ref_obj.add_event_ref(evtref2) self.ref_obj.death_ref_index = -1 self.phoenix._replace_handle_reference('Event', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_personhandle_nonew(self): personref = PersonRef() personref.set_reference_handle('123456') self.phoenix.add_person_ref(personref) personref2 = PersonRef() personref2.set_reference_handle('654321') self.ref_obj.add_person_ref(personref2) self.phoenix._replace_handle_reference('Person', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_personhandle_identical(self): personref = PersonRef() personref.set_reference_handle('123456') personref2 = PersonRef() personref2.set_reference_handle('234567') personref3 = PersonRef() personref3.set_reference_handle('654321') self.phoenix.add_person_ref(personref) self.phoenix.add_person_ref(personref2) self.phoenix.add_person_ref(personref3) self.ref_obj.add_person_ref(personref2) self.ref_obj.add_person_ref(personref3) self.phoenix._replace_handle_reference('Person', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_personhandle_equal(self): personref = PersonRef() personref.set_reference_handle('123456') personref.set_privacy(True) personref2 = PersonRef() personref2.set_reference_handle('234567') personref3 = PersonRef() personref3.set_reference_handle('654321') personref4 = PersonRef() personref4.set_reference_handle('654321') personref4.set_privacy(True) self.phoenix.add_person_ref(personref) self.phoenix.add_person_ref(personref2) self.phoenix.add_person_ref(personref3) self.ref_obj.add_person_ref(personref2) self.ref_obj.add_person_ref(personref4) self.phoenix._replace_handle_reference('Person', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_personhandle_different(self): personref = PersonRef() personref.set_reference_handle('123456') personref2 = PersonRef() personref2.set_reference_handle('234567') personref3 = PersonRef() personref3.set_reference_handle('654321') self.phoenix.add_person_ref(personref) self.phoenix.add_person_ref(personref2) self.ref_obj.add_person_ref(personref3) self.ref_obj.add_person_ref(personref2) self.phoenix._replace_handle_reference('Person', '123456', '654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_person_primaryname(self): name = Name() name.set_first_name('Abel') self.titanic.set_primary_name(name) self.ref_obj.add_alternate_name(name) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_person_altname(self): name = Name() name.set_first_name('Abel') self.titanic.add_alternate_name(name) self.ref_obj.add_alternate_name(name) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_person_eventref(self): evtref = EventRef() evtref.set_reference_handle('123456') self.titanic.add_event_ref(evtref) self.ref_obj.add_event_ref(evtref) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_person_ldsord(self): ldsord = LdsOrd() ldsord.set_type(LdsOrd.BAPTISM) self.titanic.add_lds_ord(ldsord) self.ref_obj.add_lds_ord(ldsord) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_person_address(self): address = Address() address.set_city('The Hague') self.titanic.add_address(address) self.ref_obj.add_address(address) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_person_personref(self): personref = PersonRef() personref.set_reference_handle('123456') self.titanic.add_person_ref(personref) self.ref_obj.add_person_ref(personref) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def todo_test_merge_person_aschild(self): pass def todo_test_merge_person_asparent(self): pass def test_altname_identical(self): name = Name() name.set_first_name('Abel') name2 = Name() name2.set_first_name('Abel') self.phoenix.add_alternate_name(name) self.titanic.add_alternate_name(name2) self.ref_obj.add_alternate_name(name) self.phoenix._merge_alternate_names(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_altname_equal(self): name = Name() name.set_first_name('Abel') name2 = Name() name2.set_first_name('Abel') name2.set_privacy(True) self.phoenix.add_alternate_name(name) self.titanic.add_alternate_name(name2) self.ref_obj.add_alternate_name(name2) self.phoenix._merge_alternate_names(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_altname_different(self): name = Name() name.set_first_name('Abel') name2 = Name() name2.set_first_name('Cain') self.phoenix.add_alternate_name(name) self.titanic.add_alternate_name(name2) self.ref_obj.add_alternate_name(name) self.ref_obj.add_alternate_name(name2) self.phoenix._merge_alternate_names(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_eventrefs_identical(self): evtref = EventRef() evtref.set_reference_handle('123456') evtref2 = EventRef() evtref2.set_reference_handle('123456') self.phoenix.add_event_ref(evtref) self.titanic.add_event_ref(evtref2) self.ref_obj.add_event_ref(evtref) self.phoenix._merge_event_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_eventrefs_equal(self): evtref = EventRef() evtref.set_reference_handle('123456') evtref2 = EventRef() evtref2.set_reference_handle('123456') evtref2.set_privacy(True) self.phoenix.add_event_ref(evtref) self.titanic.add_event_ref(evtref2) self.ref_obj.add_event_ref(evtref2) self.phoenix._merge_event_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_eventrefs_different(self): evtref = EventRef() evtref.set_reference_handle('123456') evtref2 = EventRef() evtref2.set_reference_handle('234567') self.phoenix.add_event_ref(evtref) self.titanic.add_event_ref(evtref2) self.ref_obj.add_event_ref(evtref) self.ref_obj.add_event_ref(evtref2) self.phoenix._merge_event_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_eventrefs_birthref(self): evtref = EventRef() evtref.set_reference_handle('123456') evtref2 = EventRef() evtref2.set_reference_handle('234567') evtref3 = EventRef() evtref3.set_reference_handle('123456') self.phoenix.add_event_ref(evtref2) self.titanic.add_event_ref(evtref) self.titanic.birth_ref_index = 0 self.ref_obj.add_event_ref(evtref2) self.ref_obj.add_event_ref(evtref3) self.ref_obj.birth_ref_index = 1 self.phoenix._merge_event_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_eventrefs_deathref(self): evtref = EventRef() evtref.set_reference_handle('123456') evtref2 = EventRef() evtref2.set_reference_handle('234567') evtref3 = EventRef() evtref3.set_reference_handle('123456') self.phoenix.add_event_ref(evtref2) self.titanic.add_event_ref(evtref) self.titanic.death_ref_index = 0 self.ref_obj.add_event_ref(evtref2) self.ref_obj.add_event_ref(evtref3) self.ref_obj.death_ref_index = 1 self.phoenix._merge_event_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_personrefs_identical(self): personref = PersonRef() personref.set_reference_handle('123456') self.phoenix.add_person_ref(personref) self.titanic.add_person_ref(personref) self.ref_obj.add_person_ref(personref) self.phoenix._merge_person_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_personrefs_equal(self): personref = PersonRef() personref.set_reference_handle('123456') personref2 = PersonRef() personref2.set_reference_handle('123456') personref2.set_privacy(True) self.phoenix.add_person_ref(personref) self.titanic.add_person_ref(personref2) self.ref_obj.add_person_ref(personref2) self.phoenix._merge_person_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_personrefs_different(self): personref = PersonRef() personref.set_reference_handle('123456') personref2 = PersonRef() personref2.set_reference_handle('234567') self.phoenix.add_person_ref(personref) self.titanic.add_person_ref(personref2) self.ref_obj.add_person_ref(personref) self.ref_obj.add_person_ref(personref2) self.phoenix._merge_person_ref_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) class PlaceCheck(unittest.TestCase, PrivacyBaseTest, MediaBaseTest, UrlBaseTest, NoteBaseTest, CitationBaseTest): def setUp(self): self.phoenix = Place() self.phoenix.set_title('Place 1') # __init__ copy has bad side effects, don't use it # self.titanic = Place(self.phoenix) self.titanic = Place() self.titanic.set_title('Place 1') # __init__ copy has bad side effects, don't use it # self.ref_obj = Place(self.phoenix) self.ref_obj = Place() self.ref_obj.set_title('Place 1') self.amsterdam = PlaceName() self.amsterdam.set_value('Amsterdam') self.rotterdam = PlaceName() self.rotterdam.set_value('Rotterdam') self.utrecht = PlaceName() self.utrecht.set_value('Utrecht') self.leiden = PlaceName() self.leiden.set_value('Leiden') def test_merge_primary_identical(self): self.phoenix.set_name(self.amsterdam) self.phoenix.set_type(PlaceType.CITY) self.titanic.set_title('Place 2') self.titanic.set_name(self.amsterdam) self.titanic.set_type(PlaceType.CITY) self.ref_obj.set_name(self.amsterdam) self.ref_obj.set_type(PlaceType.CITY) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_primary_different(self): self.phoenix.set_name(self.amsterdam) self.phoenix.set_type(PlaceType.CITY) self.titanic.set_title('Place 2') self.titanic.set_name(self.rotterdam) self.titanic.set_type(PlaceType.CITY) self.ref_obj.set_name(self.amsterdam) self.ref_obj.set_type(PlaceType.CITY) self.ref_obj.add_alternative_name(self.rotterdam) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_both_different(self): self.phoenix.set_name(self.amsterdam) self.phoenix.set_type(PlaceType.CITY) self.phoenix.add_alternative_name(self.utrecht) self.titanic.set_title('Place 2') self.titanic.set_name(self.rotterdam) self.titanic.set_type(PlaceType.CITY) self.titanic.add_alternative_name(self.leiden) self.ref_obj.set_name(self.amsterdam) self.ref_obj.set_type(PlaceType.CITY) # Base name shouldn't be in alt_names list # self.ref_obj.add_alternative_name(self.amsterdam) # alt_names must be in correct order for test to pass self.ref_obj.add_alternative_name(self.utrecht) self.ref_obj.add_alternative_name(self.rotterdam) self.ref_obj.add_alternative_name(self.leiden) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_alternative_identical(self): self.phoenix.set_name(self.amsterdam) self.phoenix.set_type(PlaceType.CITY) self.phoenix.add_alternative_name(self.rotterdam) self.titanic.set_title('Place 2') self.titanic.set_name(self.amsterdam) self.titanic.set_type(PlaceType.CITY) self.titanic.add_alternative_name(self.rotterdam) self.ref_obj.set_name(self.amsterdam) self.ref_obj.set_type(PlaceType.CITY) self.ref_obj.add_alternative_name(self.rotterdam) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_alternative_different(self): self.phoenix.set_name(self.amsterdam) self.phoenix.set_type(PlaceType.CITY) self.phoenix.add_alternative_name(self.rotterdam) self.titanic.set_title('Place 2') self.titanic.set_name(self.amsterdam) self.titanic.set_type(PlaceType.CITY) self.titanic.add_alternative_name(self.utrecht) self.ref_obj.set_name(self.amsterdam) self.ref_obj.set_type(PlaceType.CITY) self.ref_obj.add_alternative_name(self.rotterdam) self.ref_obj.add_alternative_name(self.utrecht) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_prialt_identical(self): self.phoenix.set_name(self.amsterdam) self.phoenix.set_type(PlaceType.CITY) self.phoenix.add_alternative_name(self.rotterdam) self.titanic.set_title('Place 2') self.titanic.set_name(self.rotterdam) self.titanic.set_type(PlaceType.CITY) self.ref_obj.set_name(self.amsterdam) self.ref_obj.set_type(PlaceType.CITY) self.ref_obj.add_alternative_name(self.rotterdam) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_prialt2(self): self.phoenix.set_name(self.amsterdam) self.phoenix.set_type(PlaceType.CITY) self.phoenix.add_alternative_name(self.rotterdam) self.titanic.set_title('Place 2') self.titanic.set_name(self.rotterdam) self.titanic.set_type(PlaceType.CITY) self.titanic.add_alternative_name(self.amsterdam) self.ref_obj.set_name(self.amsterdam) self.ref_obj.set_type(PlaceType.CITY) self.ref_obj.add_alternative_name(self.rotterdam) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_empty(self): self.phoenix.set_name(self.amsterdam) self.phoenix.set_type(PlaceType.CITY) self.phoenix.add_alternative_name(self.rotterdam) self.titanic.set_title('Place 2') # titanic gets empty name self.titanic.set_type(PlaceType.CITY) self.titanic.add_alternative_name(self.utrecht) self.titanic.add_alternative_name(PlaceName()) # empty alt_name self.ref_obj.set_name(self.amsterdam) self.ref_obj.set_type(PlaceType.CITY) self.ref_obj.add_alternative_name(self.rotterdam) self.ref_obj.add_alternative_name(self.utrecht) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) class RepoCheck(unittest.TestCase, PrivacyBaseTest, NoteBaseTest, UrlBaseTest): def setUp(self): self.phoenix = Repository() self.phoenix.set_name('Repo 1') self.phoenix.set_type(RepositoryType.LIBRARY) self.titanic = Repository() self.titanic.set_name('Repo 1') self.titanic.set_type(RepositoryType.LIBRARY) self.ref_obj = Repository() self.ref_obj.set_name('Repo 1') self.ref_obj.set_type(RepositoryType.LIBRARY) def test_address(self): address = Address() address.set_city('Amsterdam') self.titanic.add_address(address) self.ref_obj.add_address(address) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace(self): address = Address() address.set_city('Utrecht') citation = Citation() citation.set_reference_handle('123456') address.add_citation(citation.handle) self.phoenix.add_address(address) address2 = Address() address2.set_city('Utrecht') citation2 = Citation() citation2.set_reference_handle('654321') address2.add_citation(citation2.handle) self.ref_obj.add_address(address2) self.phoenix.replace_citation_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) class RepoRefCheck(unittest.TestCase, PrivacyBaseTest, NoteBaseTest): def setUp(self): self.phoenix = RepoRef() self.phoenix.set_reference_handle('123456') self.titanic = RepoRef(self.phoenix) self.ref_obj = RepoRef(self.phoenix) def test_handle_equivalence(self): self.assertEqual(self.phoenix.is_equivalent(self.titanic), IDENTICAL) self.titanic.set_reference_handle('654321') self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_callnr_equivalence(self): self.titanic.set_call_number('10') self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_privacy_equivalence(self): self.titanic.set_privacy(True) self.assertEqual(self.phoenix.is_equivalent(self.titanic), EQUAL) class SourceCheck(unittest.TestCase, PrivacyBaseTest, NoteBaseTest, MediaBaseTest): def setUp(self): self.phoenix = Source() self.phoenix.set_title("Source 1") self.titanic = Source() self.titanic.set_title("Source 1") self.ref_obj = Source() self.ref_obj.set_title("Source 1") def todo_test_replace(self): pass def test_merge_datamap(self): attr1 = SrcAttribute() attr1.set_type('A') attr1.set_value('a') attr2 = SrcAttribute() attr2.set_type('B') attr2.set_value('b') attr3 = SrcAttribute() attr3.set_type('B') attr3.set_value('bb') attr4 = SrcAttribute() attr4.set_type('C') attr4.set_value('c') self.phoenix.set_attribute_list([attr1, attr2]) self.titanic.set_attribute_list([attr3, attr4]) self.ref_obj.set_attribute_list([attr1, attr2, attr3, attr4]) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_reporef(self): reporef = RepoRef() reporef.set_reference_handle('123456') self.titanic.add_repo_reference(reporef) self.ref_obj.add_repo_reference(reporef) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_reporef_identical(self): reporef = RepoRef() reporef.set_reference_handle('123456') self.phoenix.add_repo_reference(reporef) self.titanic.add_repo_reference(reporef) self.ref_obj.add_repo_reference(reporef) self.phoenix._merge_reporef_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_reporef_equal(self): reporef = RepoRef() reporef.set_reference_handle('123456') reporef2 = RepoRef() reporef2.set_reference_handle('123456') reporef2.set_privacy(True) self.phoenix.add_repo_reference(reporef) self.titanic.add_repo_reference(reporef2) self.ref_obj.add_repo_reference(reporef2) self.phoenix._merge_reporef_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_reporef_different(self): reporef = RepoRef() reporef.set_reference_handle('123456') reporef2 = RepoRef() reporef2.set_reference_handle('234567') self.phoenix.add_repo_reference(reporef) self.titanic.add_repo_reference(reporef2) self.ref_obj.add_repo_reference(reporef) self.ref_obj.add_repo_reference(reporef2) self.phoenix._merge_reporef_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_reporef_nonew(self): reporef = RepoRef() reporef.set_reference_handle('123456') reporef2 = RepoRef() reporef2.set_reference_handle('654321') self.phoenix.add_repo_reference(reporef) self.ref_obj.add_repo_reference(reporef2) self.phoenix.replace_repo_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_reporef_identical(self): reporef = RepoRef() reporef.set_reference_handle('123456') reporef2 = RepoRef() reporef2.set_reference_handle('234567') reporef3 = RepoRef() reporef3.set_reference_handle('654321') self.phoenix.add_repo_reference(reporef) self.phoenix.add_repo_reference(reporef2) self.phoenix.add_repo_reference(reporef3) self.ref_obj.add_repo_reference(reporef2) self.ref_obj.add_repo_reference(reporef3) self.phoenix.replace_repo_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_reporef_equal(self): reporef = RepoRef() reporef.set_reference_handle('123456') reporef2 = RepoRef() reporef2.set_reference_handle('234567') reporef3 = RepoRef() reporef3.set_reference_handle('654321') reporef3.set_privacy(True) self.phoenix.add_repo_reference(reporef) self.phoenix.add_repo_reference(reporef2) self.phoenix.add_repo_reference(reporef3) self.ref_obj.add_repo_reference(reporef2) self.ref_obj.add_repo_reference(reporef3) self.phoenix.replace_repo_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_replace_reporef_different(self): reporef = RepoRef() reporef.set_reference_handle('123456') reporef2 = RepoRef() reporef2.set_reference_handle('234567') reporef3 = RepoRef() reporef3.set_reference_handle('654321') reporef3.set_call_number('100') reporef4 = RepoRef() reporef4.set_reference_handle('654321') self.phoenix.add_repo_reference(reporef) self.phoenix.add_repo_reference(reporef2) self.phoenix.add_repo_reference(reporef3) self.ref_obj.add_repo_reference(reporef4) self.ref_obj.add_repo_reference(reporef2) self.ref_obj.add_repo_reference(reporef3) self.phoenix.replace_repo_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) class CitationBaseCheck(unittest.TestCase): def setUp(self): self.phoenix = CitationBase() citation = Citation() citation.set_reference_handle('123456') self.phoenix.add_citation(citation.handle) self.titanic = CitationBase() self.obj_list = CitationBase() def test_replace_nonew(self): citation = Citation() citation.set_reference_handle('654321') self.obj_list.add_citation(citation.handle) self.phoenix.replace_citation_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), self.obj_list.to_struct()) def test_replace_newpresent(self): citation = Citation() citation.set_reference_handle('654321') citation.set_page('p.10') citation2 = Citation() citation2.set_reference_handle('234567') self.phoenix.add_citation(citation.handle) self.phoenix.add_citation(citation2.handle) self.obj_list.add_citation(citation2.handle) self.obj_list.add_citation(citation.handle) self.phoenix.replace_citation_references('123456','654321') self.assertEqual(self.phoenix.to_struct(), self.obj_list.to_struct()) def todo_test_replace_child(self): pass def test_merge_identical(self): citation = Citation() citation.set_reference_handle('123456') self.titanic.add_citation(citation.handle) self.obj_list.add_citation(citation.handle) self.phoenix._merge_citation_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.obj_list.to_struct()) def test_merge_different(self): citation = Citation() citation.set_reference_handle('234567') citation2 = Citation() citation2.set_reference_handle('123456') self.titanic.add_citation(citation.handle) self.obj_list.add_citation(citation2.handle) self.obj_list.add_citation(citation.handle) self.phoenix._merge_citation_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.obj_list.to_struct()) class CitationCheck(unittest.TestCase, PrivacyBaseTest, MediaBaseTest, NoteBaseTest): def setUp(self): self.phoenix = Citation() self.phoenix.set_reference_handle('123456') self.phoenix.set_page('p.10') self.titanic = Citation() self.titanic.set_reference_handle('123456') self.titanic.set_page('p.10') self.ref_obj = Citation() self.ref_obj.set_reference_handle('123456') self.ref_obj.set_page('p.10') def test_merge_confidence(self): known_values = ( (0, 0, 0), (0, 1, 0), (0, 2, 0), (0, 3, 0), (0, 4, 0), (1, 0, 0), (1, 1, 1), (1, 2, 1), (1, 3, 1), (1, 4, 4), (2, 0, 0), (2, 1, 1), (2, 2, 2), (2, 3, 3), (2, 4, 4), (3, 0, 0), (3, 1, 1), (3, 2, 3), (3, 3, 3), (3, 4, 4), (4, 0, 0), (4, 1, 4), (4, 2, 4), (4, 3, 4), (4, 4, 4)) for val1, val2, val_merge in known_values: self.phoenix.set_confidence_level(val1) self.titanic.set_confidence_level(val2) self.ref_obj.set_confidence_level(val_merge) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) def test_merge_datamap(self): attr1 = SrcAttribute() attr1.set_type('A') attr1.set_value('a') attr2 = SrcAttribute() attr2.set_type('B') attr2.set_value('b') attr3 = SrcAttribute() attr3.set_type('B') attr3.set_value('bb') attr4 = SrcAttribute() attr4.set_type('C') attr4.set_value('c') self.phoenix.set_attribute_list([attr1, attr2]) self.titanic.set_attribute_list([attr3, attr4]) self.ref_obj.set_attribute_list([attr1, attr2, attr3, attr4]) self.phoenix.merge(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_obj.to_struct()) class SurnameCheck(unittest.TestCase): def setUp(self): self.phoenix = Surname() self.phoenix.set_prefix('van') self.titanic = Surname(self.phoenix) def test_datalist_equivalence(self): self.assertEqual(self.phoenix.is_equivalent(self.titanic), IDENTICAL) self.titanic.set_prefix('von') self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) def test_primary_equivalence(self): self.titanic.set_primary(False) self.assertEqual(self.phoenix.is_equivalent(self.titanic), DIFFERENT) # A Surname can never be EQUAL to another Surname. # There is no merge method to check. class SurnameBaseCheck(unittest.TestCase): def setUp(self): self.phoenix = SurnameBase() surname = Surname() surname.set_surname("Oranje") self.phoenix.add_surname(surname) self.titanic = SurnameBase() self.ref_list = SurnameBase() def test_identical(self): surname = Surname() surname.set_surname("Oranje") self.ref_list.add_surname(surname) self.titanic.add_surname(surname) self.phoenix._merge_surname_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_different(self): surname = Surname() surname.set_surname("Biesterfelt") self.titanic.add_surname(surname) self.ref_list = SurnameBase(self.phoenix) self.ref_list.add_surname(surname) self.phoenix._merge_surname_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) class TagBaseCheck(unittest.TestCase): def setUp(self): self.phoenix = TagBase() tag_handle = '123456' self.phoenix.add_tag(tag_handle) self.titanic = TagBase() def test_identical(self): self.ref_list = TagBase(self.phoenix) self.titanic.add_tag(self.phoenix.get_tag_list()[0]) self.phoenix._merge_tag_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) def test_different(self): self.titanic.set_tag_list([]) tag_handle = '654321' self.titanic.add_tag(tag_handle) self.ref_list = TagBase(self.phoenix) self.ref_list.add_tag(tag_handle) self.phoenix._merge_tag_list(self.titanic) self.assertEqual(self.phoenix.to_struct(), self.ref_list.to_struct()) if __name__ == "__main__": unittest.main()
codeparrot/github-code-clean
import math import unittest from datetime import datetime, time, timedelta, tzinfo from asynctest.mock import patch from tests.unittest.base_custom import TestCustomField # Needs to be imported last from lib.data import timezones from lib.data.timedelta import format from ..custom import countdown class TimeZone(tzinfo): def __init__(self, offset): self.offset = offset def utcoffset(self, dt): return timedelta(seconds=self.offset) def dst(self, dt): return timedelta() class TestCustomCommandCustomCountdownParse(unittest.TestCase): def setUp(self): patcher = patch(countdown.__name__ + '.timezones', autospec=True) self.addCleanup(patcher.stop) self.mock_timezones = patcher.start() self.mock_timezones.abbreviations = { 'utc': TimeZone(0), 'utc-00:00': TimeZone(0), 'utc+00:00': TimeZone(0), 'utc-12:00': TimeZone(0), 'utc+12:00': TimeZone(0), 'utc-08:00': TimeZone(-8 * 3600), 'utc+08:00': TimeZone(8 * 3600) } self.mock_timezones.utc = TimeZone(0) def test(self): self.assertIsNone(countdown.parse_date_string('')) def test_time_of_day(self): self.assertEqual( countdown.parse_date_string('0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), None, None, True)) self.assertEqual( countdown.parse_date_string('00:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), None, None, True)) self.assertEqual( countdown.parse_date_string('12:34'), countdown.DateTimeInstance(time(12, 34, 0, 0, timezones.utc), None, None, True)) self.assertEqual( countdown.parse_date_string('23:59'), countdown.DateTimeInstance(time(23, 59, 0, 0, timezones.utc), None, None, True)) self.assertIsNone(countdown.parse_date_string('24:00')) self.assertIsNone(countdown.parse_date_string('0:60')) self.assertIsNone(countdown.parse_date_string('000:00')) self.assertIsNone(countdown.parse_date_string('0:000')) def test_time_of_day_seconds(self): self.assertEqual( countdown.parse_date_string('0:00:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), None, None, True)) self.assertEqual( countdown.parse_date_string('0:00:59'), countdown.DateTimeInstance(time(0, 0, 59, 0, timezones.utc), None, None, True)) self.assertIsNone(countdown.parse_date_string('0:00:60')) def test_time_of_day_seconds_microseconds(self): self.assertEqual( countdown.parse_date_string('0:00:00.000000'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), None, None, True)) self.assertEqual( countdown.parse_date_string('0:00:00.000001'), countdown.DateTimeInstance(time(0, 0, 0, 1, timezones.utc), None, None, True)) self.assertEqual( countdown.parse_date_string('0:00:00.999999'), countdown.DateTimeInstance(time(0, 0, 0, 999999, timezones.utc), None, None, True)) self.assertIsNone(countdown.parse_date_string('0:00:00.0000000')) self.assertIsNone(countdown.parse_date_string('0:00:00.9999999')) def test_time_of_day_meridiem(self): self.assertEqual( countdown.parse_date_string('12:00AM'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), None, None, False)) self.assertEqual( countdown.parse_date_string('1:23PM'), countdown.DateTimeInstance(time(13, 23, 0, 0, timezones.utc), None, None, False)) self.assertEqual( countdown.parse_date_string('01:23am'), countdown.DateTimeInstance(time(1, 23, 0, 0, timezones.utc), None, None, False)) self.assertEqual( countdown.parse_date_string('11:59pm'), countdown.DateTimeInstance(time(23, 59, 0, 0, timezones.utc), None, None, False)) self.assertIsNone(countdown.parse_date_string('12:00BM')) self.assertIsNone(countdown.parse_date_string('0:00AM')) self.assertIsNone(countdown.parse_date_string('0:60AM')) self.assertIsNone(countdown.parse_date_string('13:00AM')) self.assertIsNone(countdown.parse_date_string('000:00AM')) self.assertIsNone(countdown.parse_date_string('0:000AM')) def test_time_of_day_seconds_meridiem(self): self.assertEqual( countdown.parse_date_string('12:00:00AM'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), None, None, False)) self.assertEqual( countdown.parse_date_string('12:00:59PM'), countdown.DateTimeInstance(time(12, 0, 59, 0, timezones.utc), None, None, False)) self.assertIsNone(countdown.parse_date_string('0:00:60AM')) def test_time_of_day_seconds_microseconds_meridiem(self): self.assertEqual( countdown.parse_date_string('12:00:00.000000AM'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), None, None, False)) self.assertEqual( countdown.parse_date_string('12:00:00.000001PM'), countdown.DateTimeInstance(time(12, 0, 0, 1, timezones.utc), None, None, False)) self.assertEqual( countdown.parse_date_string('12:00:00.999999AM'), countdown.DateTimeInstance(time(0, 0, 0, 999999, timezones.utc), None, None, False)) self.assertIsNone(countdown.parse_date_string('0:00:00.0000000AM')) self.assertIsNone(countdown.parse_date_string('0:00:00.9999999PM')) def test_time_of_day_timezone(self): self.assertEqual( countdown.parse_date_string('0:00 UTC'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), None, None, True)) self.assertEqual( countdown.parse_date_string('0:00 utc'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), None, None, True)) self.assertEqual( countdown.parse_date_string('0:00 UTC-00:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), None, None, True)) self.assertEqual( countdown.parse_date_string('12:00AM UTC+12:00'), countdown.DateTimeInstance( time(0, 0, 0, 0, self.mock_timezones.abbreviations['utc+12:00']), None, None, False)) self.assertIsNone(countdown.parse_date_string('0:00 ABC')) def test_day_of_week_time_of_day(self): self.assertEqual( countdown.parse_date_string('Sunday 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.SUNDAY, None, True)) self.assertEqual( countdown.parse_date_string('Monday 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.MONDAY, None, True)) self.assertEqual( countdown.parse_date_string('Tuesday 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.TUESDAY, None, True)) self.assertEqual( countdown.parse_date_string('Wednesday 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.WEDNESDAY, None, True)) self.assertEqual( countdown.parse_date_string('Thursday 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.THURSDAY, None, True)) self.assertEqual( countdown.parse_date_string('Friday 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.FRIDAY, None, True)) self.assertEqual( countdown.parse_date_string('Saturday 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.SATURDAY, None, True)) self.assertEqual( countdown.parse_date_string('SUNDAY 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.SUNDAY, None, True)) self.assertEqual( countdown.parse_date_string('sunday 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.SUNDAY, None, True)) self.assertEqual( countdown.parse_date_string('Sun 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.SUNDAY, None, True)) self.assertEqual( countdown.parse_date_string('Mon 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.MONDAY, None, True)) self.assertEqual( countdown.parse_date_string('Tue 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.TUESDAY, None, True)) self.assertEqual( countdown.parse_date_string('Wed 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.WEDNESDAY, None, True)) self.assertEqual( countdown.parse_date_string('Thu 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.THURSDAY, None, True)) self.assertEqual( countdown.parse_date_string('Fri 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.FRIDAY, None, True)) self.assertEqual( countdown.parse_date_string('Sat 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.SATURDAY, None, True)) self.assertEqual( countdown.parse_date_string('SUN 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.SUNDAY, None, True)) self.assertEqual( countdown.parse_date_string('sun 0:00'), countdown.DateTimeInstance(time(0, 0, 0, 0, timezones.utc), countdown.SUNDAY, None, True)) self.assertIsNone(countdown.parse_date_string('abc 0:00')) def test_negative_timezone_time_of_day(self): self.assertEqual( countdown.parse_date_string('8:00PM UTC-08:00'), countdown.DateTimeInstance( time(20, 0, 0, 0, self.mock_timezones.abbreviations['utc-08:00']), None, None, False)) def test_month_day_time_of_day(self): self.assertEqual( countdown.parse_date_string('1/1 0:00'), countdown.DateTimeInstance( time(0, 0, 0, 0, timezones.utc), None, countdown.Date(None, 1, 1), True)) self.assertEqual( countdown.parse_date_string('12-31 0:00'), countdown.DateTimeInstance( time(0, 0, 0, 0, timezones.utc), None, countdown.Date(None, 12, 31), True)) self.assertEqual( countdown.parse_date_string('2/29 0:00'), countdown.DateTimeInstance( time(0, 0, 0, 0, timezones.utc), None, countdown.Date(None, 2, 29), True)) self.assertIsNone(countdown.parse_date_string('1/32 0:00')) self.assertIsNone(countdown.parse_date_string('13/1 0:00')) self.assertIsNone(countdown.parse_date_string('2/30 0:00')) def test_month_day_year_time_of_day(self): self.assertEqual( countdown.parse_date_string('1/1/2000 0:00'), countdown.DateTimeInstance( time(0, 0, 0, 0, timezones.utc), None, countdown.Date(2000, 1, 1), True)) self.assertEqual( countdown.parse_date_string('12-31-2016 0:00'), countdown.DateTimeInstance( time(0, 0, 0, 0, timezones.utc), None, countdown.Date(2016, 12, 31), True)) self.assertEqual( countdown.parse_date_string('2/29/2000 0:00'), countdown.DateTimeInstance( time(0, 0, 0, 0, timezones.utc), None, countdown.Date(2000, 2, 29), True)) self.assertIsNone(countdown.parse_date_string('1/32/2015 0:00')) self.assertIsNone(countdown.parse_date_string('13/1/2014 0:00')) self.assertIsNone(countdown.parse_date_string('2/30/2000 0:00')) self.assertIsNone(countdown.parse_date_string('2/29/2001 0:00')) def test_many(self): self.assertEqual( countdown.parse_date_string('6/15/2000 10:48:23.987654PM UTC'), countdown.DateTimeInstance( time(22, 48, 23, 987654, timezones.utc), None, countdown.Date(2000, 6, 15), False)) self.assertEqual( countdown.parse_date_string('Wed 16:49:31.456187 UTC'), countdown.DateTimeInstance(time(16, 49, 31, 456187, timezones.utc), countdown.WEDNESDAY, None, True)) self.assertIsNone( countdown.parse_date_string('UTC 16:49:31.456187 Wed')) self.assertIsNone( countdown.parse_date_string('UTC 10:48:23.987654PM 6/15/2000')) class TestCustomCommandCustomCountdownNextDatetime(unittest.TestCase): def setUp(self): self.now = datetime(2000, 1, 1, tzinfo=timezones.utc) patcher = patch('lib.data.timezones', autospec=True) self.addCleanup(patcher.stop) self.mock_timezones = patcher.start() self.mock_timezones.abbreviations = {'utc-08:00': TimeZone(-8 * 3600), 'utc+08:00': TimeZone(8 * 3600)} self.mock_timezones.utc = TimeZone(0) def test_time_of_day(self): self.assertEqual( countdown.next_datetime(self.now, time(0, 0, 0, 0, timezones.utc), None, None, True), countdown.DateTime(datetime(2000, 1, 2, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.next_datetime(self.now, time(0, 1, 0, 0, timezones.utc), None, None, False), countdown.DateTime(datetime(2000, 1, 1, 0, 1, 0, 0, timezones.utc), False)) self.assertEqual( countdown.next_datetime(self.now, time(23, 59, 59, 999999, timezones.utc), None, None, False), countdown.DateTime(datetime(2000, 1, 1, 23, 59, 59, 999999, timezones.utc), False)) def test_time_of_day_day_of_week(self): self.assertEqual( countdown.next_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.SUNDAY, None, True), countdown.DateTime(datetime(2000, 1, 2, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.next_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.MONDAY, None, True), countdown.DateTime(datetime(2000, 1, 3, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.next_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.TUESDAY, None, True), countdown.DateTime(datetime(2000, 1, 4, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.next_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.WEDNESDAY, None, True), countdown.DateTime(datetime(2000, 1, 5, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.next_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.THURSDAY, None, True), countdown.DateTime(datetime(2000, 1, 6, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.next_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.FRIDAY, None, True), countdown.DateTime(datetime(2000, 1, 7, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.next_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.SATURDAY, None, True), countdown.DateTime(datetime(2000, 1, 8, 0, 0, 0, 0, timezones.utc), True)) def test_time_of_day_day_of_week_timezone(self): self.assertEqual( countdown.next_datetime( self.now, time(20, 0, 0, 0, self.mock_timezones.abbreviations['utc-08:00']), countdown.FRIDAY, None, True), countdown.DateTime(datetime(2000, 1, 1, 4, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.next_datetime( self.now, time(4, 0, 0, 0, self.mock_timezones.abbreviations['utc+08:00']), countdown.SUNDAY, None, True), countdown.DateTime(datetime(2000, 1, 1, 20, 0, 0, 0, timezones.utc), True)) def test_time_of_day_month_day(self): self.assertEqual( countdown.next_datetime(self.now, time(0, 0, 0, 0, timezones.utc), None, countdown.Date(None, 1, 1), True), countdown.DateTime(datetime(2001, 1, 1, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.next_datetime(self.now, time(0, 0, 0, 0, timezones.utc), None, countdown.Date(None, 12, 31), True), countdown.DateTime(datetime(2000, 12, 31, 0, 0, 0, 0, timezones.utc), True)) def test_time_of_day_year_month_day(self): self.assertIsNone( countdown.next_datetime(self.now, time(0, 0, 0, 0, timezones.utc), None, countdown.Date(2000, 1, 1), True)) self.assertEqual( countdown.next_datetime(self.now, time(0, 0, 0, 0, timezones.utc), None, countdown.Date(2000, 12, 31), True), countdown.DateTime(datetime(2000, 12, 31, 0, 0, 0, 0, timezones.utc), True)) class TesCustomCommandtCustomCountdownPastDatetime(unittest.TestCase): def setUp(self): self.now = datetime(2000, 1, 1, tzinfo=timezones.utc) patcher = patch('lib.data.timezones', autospec=True) self.addCleanup(patcher.stop) self.mock_timezones = patcher.start() self.mock_timezones.abbreviations = {'utc-08:00': TimeZone(-8 * 3600), 'utc+08:00': TimeZone(8 * 3600)} self.mock_timezones.utc = TimeZone(0) def test_time_of_day(self): self.assertEqual( countdown.past_datetime(self.now, time(0, 0, 0, 0, timezones.utc), None, None, True), countdown.DateTime(datetime(2000, 1, 1, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.past_datetime(self.now, time(0, 1, 0, 0, timezones.utc), None, None, False), countdown.DateTime(datetime(1999, 12, 31, 0, 1, 0, 0, timezones.utc), False)) self.assertEqual( countdown.past_datetime(self.now, time(23, 59, 59, 999999, timezones.utc), None, None, False), countdown.DateTime(datetime(1999, 12, 31, 23, 59, 59, 999999, timezones.utc), False)) def test_time_of_day_day_of_week(self): self.assertEqual( countdown.past_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.SUNDAY, None, True), countdown.DateTime(datetime(1999, 12, 26, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.past_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.MONDAY, None, True), countdown.DateTime(datetime(1999, 12, 27, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.past_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.TUESDAY, None, True), countdown.DateTime(datetime(1999, 12, 28, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.past_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.WEDNESDAY, None, True), countdown.DateTime(datetime(1999, 12, 29, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.past_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.THURSDAY, None, True), countdown.DateTime(datetime(1999, 12, 30, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.past_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.FRIDAY, None, True), countdown.DateTime(datetime(1999, 12, 31, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.past_datetime(self.now, time(0, 0, 0, 0, timezones.utc), countdown.SATURDAY, None, True), countdown.DateTime(datetime(2000, 1, 1, 0, 0, 0, 0, timezones.utc), True)) def test_time_of_day_day_of_week_timezone(self): self.assertEqual( countdown.past_datetime( self.now, time(20, 0, 0, 0, self.mock_timezones.abbreviations['utc-08:00']), countdown.SATURDAY, None, True), countdown.DateTime(datetime(1999, 12, 26, 4, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.past_datetime( self.now, time(4, 0, 0, 0, self.mock_timezones.abbreviations['utc+08:00']), countdown.SATURDAY, None, True), countdown.DateTime(datetime(1999, 12, 31, 20, 0, 0, 0, timezones.utc), True)) def test_time_of_day_month_day(self): self.assertEqual( countdown.past_datetime(self.now, time(0, 0, 0, 0, timezones.utc), None, countdown.Date(None, 1, 1), True), countdown.DateTime(datetime(2000, 1, 1, 0, 0, 0, 0, timezones.utc), True)) self.assertEqual( countdown.past_datetime(self.now, time(0, 0, 0, 0, timezones.utc), None, countdown.Date(None, 12, 31), True), countdown.DateTime(datetime(1999, 12, 31, 0, 0, 0, 0, timezones.utc), True)) def test_time_of_day_year_month_day(self): self.assertEqual( countdown.past_datetime(self.now, time(0, 0, 0, 0, timezones.utc), None, countdown.Date(2000, 1, 1), True), countdown.DateTime(datetime(2000, 1, 1, 0, 0, 0, 0, timezones.utc), True)) self.assertIsNone( countdown.past_datetime(self.now, time(0, 0, 0, 0, timezones.utc), None, countdown.Date(2000, 12, 31), True)) class TestCustomCommandCustomCountdownParseCooldown(unittest.TestCase): def test_percent(self): self.assertEqual(countdown.parse_cooldown('0%'), 0.0) self.assertEqual(countdown.parse_cooldown('100%'), 1.0) self.assertEqual(countdown.parse_cooldown('42%'), 0.42) self.assertIsNone(countdown.parse_cooldown('101%')) self.assertIsNone(countdown.parse_cooldown('1000%')) self.assertIsNone(countdown.parse_cooldown('-0%')) def test_weeks(self): self.assertEqual(countdown.parse_cooldown('0w'), timedelta(weeks=0)) self.assertEqual(countdown.parse_cooldown('1w'), timedelta(weeks=1)) self.assertEqual(countdown.parse_cooldown('2w'), timedelta(weeks=2)) self.assertEqual(countdown.parse_cooldown('15961w'), timedelta(weeks=15961)) def test_days(self): self.assertEqual(countdown.parse_cooldown('0d'), timedelta(days=0)) self.assertEqual(countdown.parse_cooldown('1d'), timedelta(days=1)) self.assertEqual(countdown.parse_cooldown('2d'), timedelta(days=2)) self.assertEqual(countdown.parse_cooldown('89156d'), timedelta(days=89156)) def test_hours(self): self.assertEqual(countdown.parse_cooldown('0h'), timedelta(hours=0)) self.assertEqual(countdown.parse_cooldown('1h'), timedelta(hours=1)) self.assertEqual(countdown.parse_cooldown('23h'), timedelta(hours=23)) self.assertIsNone(countdown.parse_cooldown('24h')) def test_minutes(self): self.assertEqual(countdown.parse_cooldown('0m'), timedelta(minutes=0)) self.assertEqual(countdown.parse_cooldown('1m'), timedelta(minutes=1)) self.assertEqual(countdown.parse_cooldown('59m'), timedelta(minutes=59)) self.assertIsNone(countdown.parse_cooldown('60m')) def test_seconds(self): self.assertEqual(countdown.parse_cooldown('0s'), timedelta(seconds=0)) self.assertEqual(countdown.parse_cooldown('1s'), timedelta(seconds=1)) self.assertEqual(countdown.parse_cooldown('59s'), timedelta(seconds=59)) self.assertIsNone(countdown.parse_cooldown('60s')) def test_multiple(self): self.assertEqual(countdown.parse_cooldown('1w1d'), timedelta(weeks=1, days=1)) self.assertEqual(countdown.parse_cooldown('1d1h'), timedelta(days=1, hours=1)) self.assertEqual(countdown.parse_cooldown('1h1m'), timedelta(hours=1, minutes=1)) self.assertEqual(countdown.parse_cooldown('1m1s'), timedelta(minutes=1, seconds=1)) self.assertIsNone(countdown.parse_cooldown('1d1w')) self.assertIsNone(countdown.parse_cooldown('1h1d')) self.assertIsNone(countdown.parse_cooldown('1m1h')) self.assertIsNone(countdown.parse_cooldown('1s1m')) self.assertIsNone(countdown.parse_cooldown('1s1m1h1d1w')) self.assertEqual(countdown.parse_cooldown('0w0d0h0m0s'), timedelta()) self.assertEqual( countdown.parse_cooldown('1w1d1h1m1s'), timedelta(weeks=1, days=1, hours=1, minutes=1, seconds=1)) self.assertEqual( countdown.parse_cooldown('2w13d23h59m59s'), timedelta(weeks=3, days=6, hours=23, minutes=59, seconds=59)) class TestCustomCommandCustomCountdownTestCooldown(unittest.TestCase): def test(self): self.assertEqual( countdown.test_cooldown(None, datetime(2000, 1, 1, 0, 0, 0, 0), datetime(2000, 1, 2, 0, 0, 0, 0), datetime(1999, 12, 31, 0, 0, 0, 0)), 0) def test_timedelta(self): duration = timedelta(hours=1) past = datetime(2000, 1, 1, 0, 0, 0, 0) future = datetime(2000, 1, 2, 0, 0, 0, 0) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(1999, 12, 31, 0, 0, 0, 0)), -math.inf) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 0, 0, 0, 0)), -1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 0, 59, 59, 999999)), -1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 1, 0, 0, 0)), 0) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 12, 0, 0, 0)), 0) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 23, 0, 0, 0)), 0) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 23, 0, 0, 1)), 1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 2, 0, 0, 0, 0)), 1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 3, 0, 0, 0, 0)), math.inf) def test_timedelta_over_half(self): duration = timedelta(hours=20) past = datetime(2000, 1, 1, 0, 0, 0, 0) future = datetime(2000, 1, 2, 0, 0, 0, 0) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(1999, 12, 31, 0, 0, 0, 0)), -math.inf) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 0, 0, 0, 0)), -1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 3, 59, 59, 999999)), -1) test = countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 4, 0, 0, 0)) self.assertTrue(math.isnan(test), test) test = countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 12, 0, 0, 0)) self.assertTrue(math.isnan(test), test) test = countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 20, 0, 0, 0)) self.assertTrue(math.isnan(test), test) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 20, 0, 0, 1)), 1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 2, 0, 0, 0, 0)), 1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 3, 0, 0, 0, 0)), math.inf) def test_timedelta_over_full(self): duration = timedelta(days=2) past = datetime(2000, 1, 1, 0, 0, 0, 0) future = datetime(2000, 1, 2, 0, 0, 0, 0) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(1999, 12, 31, 0, 0, 0, 0)), -math.inf) test = countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 0, 0, 0, 0)) self.assertTrue(math.isnan(test), test) test = countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 12, 0, 0, 0)) self.assertTrue(math.isnan(test), test) test = countdown.test_cooldown(duration, past, future, datetime(2000, 1, 2, 0, 0, 0, 0)) self.assertTrue(math.isnan(test), test) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 3, 0, 0, 0, 0)), math.inf) def test_float(self): duration = 1 / 24 past = datetime(2000, 1, 1, 0, 0, 0, 0) future = datetime(2000, 1, 2, 0, 0, 0, 0) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(1999, 12, 31, 0, 0, 0, 0)), -math.inf) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 0, 0, 0, 0)), -1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 0, 59, 59, 999999)), -1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 1, 0, 0, 0)), 0) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 12, 0, 0, 0)), 0) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 23, 0, 0, 0)), 0) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 23, 0, 0, 1)), 1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 2, 0, 0, 0, 0)), 1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 3, 0, 0, 0, 0)), math.inf) def test_float_over_half(self): duration = 20 / 24 past = datetime(2000, 1, 1, 0, 0, 0, 0) future = datetime(2000, 1, 2, 0, 0, 0, 0) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(1999, 12, 31, 0, 0, 0, 0)), -math.inf) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 0, 0, 0, 0)), -1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 3, 59, 59, 999999)), -1) test = countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 4, 0, 0, 0)) self.assertTrue(math.isnan(test), test) test = countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 12, 0, 0, 0)) self.assertTrue(math.isnan(test), test) test = countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 20, 0, 0, 0)) self.assertTrue(math.isnan(test), test) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 20, 0, 0, 1)), 1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 2, 0, 0, 0, 0)), 1) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 3, 0, 0, 0, 0)), math.inf) def test_float_over_full(self): duration = 2.0 past = datetime(2000, 1, 1, 0, 0, 0, 0) future = datetime(2000, 1, 2, 0, 0, 0, 0) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(1999, 12, 31, 0, 0, 0, 0)), -math.inf) test = countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 0, 0, 0, 0)) self.assertTrue(math.isnan(test), test) test = countdown.test_cooldown(duration, past, future, datetime(2000, 1, 1, 12, 0, 0, 0)) self.assertTrue(math.isnan(test), test) test = countdown.test_cooldown(duration, past, future, datetime(2000, 1, 2, 0, 0, 0, 0)) self.assertTrue(math.isnan(test), test) self.assertEqual( countdown.test_cooldown(duration, past, future, datetime(2000, 1, 3, 0, 0, 0, 0)), math.inf) class TestCustomCommandCustomCountdownParseNextPastCooldown(unittest.TestCase): def setUp(self): self.now = datetime(2000, 1, 1, tzinfo=timezones.utc) def test_blank(self): times = '' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown(None, None, None)) def test(self): times = 'abcd' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown(None, None, None)) def test_cooldown(self): times = '1h' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown(None, None, None)) def test_single_exact_next(self): times = '1/2/2000 0:00' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None)) def test_single_exact_past(self): times = '12/31/1999 12:00AM' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), False), None)) def test_multiple_exact(self): times = '12/31/1999 12:00AM,1/2/2000 0:00' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), False), 0)) def test_hour_minute(self): times = '0:00' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(2000, 1, 1, 0, 0, tzinfo=timezones.utc), True), 0)) def test_day_of_week(self): times = 'Sun 0:00' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 26, 0, 0, tzinfo=timezones.utc), True), 0)) def test_month_day(self): times = '1/1 0:00' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2001, 1, 1, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(2000, 1, 1, 0, 0, tzinfo=timezones.utc), True), 0)) def test_multiple(self): times = '12/25/1999 6:00PM,1/31 0:00,7:00AM,19:00,Wed 20:00' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 1, 7, 0, tzinfo=timezones.utc), False), countdown.DateTime( datetime(1999, 12, 31, 19, 0, tzinfo=timezones.utc), True), 0)) def test_multiple_2(self): times = '6:00AM,18:00,12/31 23:00,1/1 1:00AM' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 1, 1, 0, tzinfo=timezones.utc), False), countdown.DateTime( datetime(1999, 12, 31, 23, 0, tzinfo=timezones.utc), True), 0)) def test_hour_minute_cooldown(self): times = '0:00,1h' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(2000, 1, 1, 0, 0, tzinfo=timezones.utc), True), 0)) def test_cooldown_hour_minute(self): times = '4h,12:00' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 1, 12, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 12, 0, tzinfo=timezones.utc), True), 0)) def test_cooldown_hour_minute_early_cooldown(self): times = '4h,23:00' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 1, 23, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 23, 0, tzinfo=timezones.utc), True), -1)) def test_cooldown_hour_minute_late_cooldown(self): times = '4h,1:00' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 1, 1, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 1, 0, tzinfo=timezones.utc), True), 1)) def test_cooldown_hour_minute_overlap_cooldown(self): times = '18h,12:00' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 1, 12, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 12, 0, tzinfo=timezones.utc), True), math.nan)) def test_cooldown_hour_minute_multiple(self): times = '50%,9:00,21:00' self.assertEqual( countdown.parse_next_past_cooldown(times, self.now), countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 1, 9, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 21, 0, tzinfo=timezones.utc), True), -1)) class TestCustomCommandCustomCountdownFieldCountdown(TestCustomField): def setUp(self): super().setUp() self.args = self.args._replace(field='countdown', param='') patcher = patch(countdown.__name__ + '.parse_next_past_cooldown', autospec=True) self.addCleanup(patcher.stop) self.mock_parse = patcher.start() self.mock_parse.return_value = countdown.NextPastCooldown(None, None, None) async def test(self): self.args = self.args._replace(field='') self.assertIsNone(await countdown.fieldCountdown(self.args)) self.assertFalse(self.mock_parse.called) async def test_none_time(self): self.args = self.args._replace(param=None) self.assertIsNone(await countdown.fieldCountdown(self.args)) self.assertFalse(self.mock_parse.called) async def test_invalid_time(self): self.assertIsNone(await countdown.fieldCountdown(self.args)) self.assertTrue(self.mock_parse.called) async def test_default(self): self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual( await countdown.fieldCountdown(self.args), 'has passed') self.assertTrue(self.mock_parse.called) async def test_default_prefix_suffix(self): self.args = self.args._replace(prefix='[', suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual( await countdown.fieldCountdown(self.args), 'has passed') self.assertTrue(self.mock_parse.called) async def test_default_default(self): self.args = self.args._replace(default='Kappa') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldCountdown(self.args), 'Kappa') self.assertTrue(self.mock_parse.called) async def test_default_default_prefix_suffix(self): self.args = self.args._replace(default='Kappa', prefix='[', suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldCountdown(self.args), 'Kappa') self.assertTrue(self.mock_parse.called) async def test_time(self): self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldCountdown(self.args), format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) async def test_time_prefix(self): self.args = self.args._replace(prefix='[') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldCountdown(self.args), '[' + format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) async def test_time_prefix_blank(self): self.args = self.args._replace(prefix='') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldCountdown(self.args), format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) async def test_time_suffix(self): self.args = self.args._replace(suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldCountdown(self.args), format(timedelta(days=1)) + ']') self.assertTrue(self.mock_parse.called) async def test_time_suffix_blank(self): self.args = self.args._replace(suffix='') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldCountdown(self.args), format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) async def test_not_cooldown(self): self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), 0) self.assertEqual(await countdown.fieldCountdown(self.args), format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), 1) self.assertEqual(await countdown.fieldCountdown(self.args), format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), math.inf) self.assertEqual(await countdown.fieldCountdown(self.args), format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) async def test_cooldown(self): self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), -math.inf) self.assertEqual(await countdown.fieldCountdown(self.args), 'has passed') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), -1) self.assertEqual(await countdown.fieldCountdown(self.args), 'has passed') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), math.nan) self.assertEqual(await countdown.fieldCountdown(self.args), 'has passed') self.assertTrue(self.mock_parse.called) async def test_cooldown_prefix_suffix(self): self.args = self.args._replace(prefix='[', suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), -1) self.assertEqual( await countdown.fieldCountdown(self.args), 'has passed') self.assertTrue(self.mock_parse.called) async def test_cooldown_default(self): self.args = self.args._replace(default='Kappa') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), -1) self.assertEqual(await countdown.fieldCountdown(self.args), 'Kappa') self.assertTrue(self.mock_parse.called) async def test_cooldown_default_prefix_suffix(self): self.args = self.args._replace(default='Kappa', prefix='[', suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), -1) self.assertEqual(await countdown.fieldCountdown(self.args), 'Kappa') self.assertTrue(self.mock_parse.called) class TestCustomCommandCustomCountdownFieldSince(TestCustomField): def setUp(self): super().setUp() self.args = self.args._replace(field='since', param='') patcher = patch(countdown.__name__ + '.parse_next_past_cooldown', autospec=True) self.addCleanup(patcher.stop) self.mock_parse = patcher.start() self.mock_parse.return_value = countdown.NextPastCooldown(None, None, None) async def test(self): self.args = self.args._replace(field='') self.assertIsNone(await countdown.fieldSince(self.args)) self.assertFalse(self.mock_parse.called) async def test_none_time(self): self.args = self.args._replace(param=None) self.assertIsNone(await countdown.fieldSince(self.args)) self.assertFalse(self.mock_parse.called) async def test_invalid_time(self): self.assertIsNone(await countdown.fieldSince(self.args)) self.assertTrue(self.mock_parse.called) async def test_default(self): self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldSince(self.args), 'is coming') self.assertTrue(self.mock_parse.called) async def test_default_prefix_suffix(self): self.args = self.args._replace(prefix='[', suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldSince(self.args), 'is coming') self.assertTrue(self.mock_parse.called) async def test_default_default(self): self.args = self.args._replace(default='Kappa') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldSince(self.args), 'Kappa') self.assertTrue(self.mock_parse.called) async def test_default_default_prefix_suffix(self): self.args = self.args._replace(default='Kappa', prefix='[', suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldSince(self.args), 'Kappa') self.assertTrue(self.mock_parse.called) async def test_time(self): self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldSince(self.args), format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) async def test_time_prefix(self): self.args = self.args._replace(prefix='[') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldSince(self.args), '[' + format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) async def test_time_prefix_blank(self): self.args = self.args._replace(prefix='') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldSince(self.args), format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) async def test_time_suffix(self): self.args = self.args._replace(suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldSince(self.args), format(timedelta(days=1)) + ']') self.assertTrue(self.mock_parse.called) async def test_time_suffix_blank(self): self.args = self.args._replace(suffix='') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldSince(self.args), format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) async def test_not_cooldown(self): self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), 0) self.assertEqual(await countdown.fieldSince(self.args), format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), -1) self.assertEqual(await countdown.fieldSince(self.args), format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.assertEqual(await countdown.fieldSince(self.args), format(timedelta(days=1))) self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), -math.inf) self.assertEqual(await countdown.fieldSince(self.args), format(timedelta(days=1))) self.assertTrue(self.mock_parse.called) async def test_cooldown(self): self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), math.inf) self.assertEqual(await countdown.fieldSince(self.args), 'is coming') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), 1) self.assertEqual(await countdown.fieldSince(self.args), 'is coming') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), math.nan) self.assertEqual(await countdown.fieldSince(self.args), 'is coming') self.assertTrue(self.mock_parse.called) async def test_cooldown_prefix_suffix(self): self.args = self.args._replace(prefix='[', suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), 1) self.assertEqual(await countdown.fieldSince(self.args), 'is coming') self.assertTrue(self.mock_parse.called) async def test_cooldown_default(self): self.args = self.args._replace(default='Kappa') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), 1) self.assertEqual(await countdown.fieldSince(self.args), 'Kappa') self.assertTrue(self.mock_parse.called) async def test_cooldown_default_prefix_suffix(self): self.args = self.args._replace(default='Kappa', prefix='[', suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), 1) self.assertEqual(await countdown.fieldSince(self.args), 'Kappa') self.assertTrue(self.mock_parse.called) class TestCustomCommandCustomCountdownFieldNext(TestCustomField): def setUp(self): super().setUp() self.args = self.args._replace(field='next', param='') patcher = patch(countdown.__name__ + '.parse_next_past_cooldown', autospec=True) self.addCleanup(patcher.stop) self.mock_parse = patcher.start() self.mock_parse.return_value = countdown.NextPastCooldown(None, None, None) async def test(self): self.args = self.args._replace(field='') self.assertIsNone(await countdown.fieldNext(self.args)) self.assertFalse(self.mock_parse.called) async def test_none_time(self): self.args = self.args._replace(param=None) self.assertIsNone(await countdown.fieldNext(self.args)) self.assertFalse(self.mock_parse.called) async def test_invalid_time(self): self.assertIsNone(await countdown.fieldNext(self.args)) self.assertTrue(self.mock_parse.called) async def test_default(self): self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldNext(self.args), 'None') self.assertTrue(self.mock_parse.called) async def test_default_prefix_suffix(self): self.args = self.args._replace(prefix='[', suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldNext(self.args), 'None') self.assertTrue(self.mock_parse.called) async def test_default_default(self): self.args = self.args._replace(default='Kappa') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldNext(self.args), 'Kappa') self.assertTrue(self.mock_parse.called) async def test_default_default_prefix_suffix(self): self.args = self.args._replace(default='Kappa', prefix='[', suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldNext(self.args), 'Kappa') self.assertTrue(self.mock_parse.called) async def test_time(self): self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldNext(self.args), '01/02/2000 00:00 UTC') self.assertTrue(self.mock_parse.called) async def test_time_12_hour(self): self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), False), None, None) self.assertEqual(await countdown.fieldNext(self.args), '01/02/2000 12:00AM UTC') self.assertTrue(self.mock_parse.called) async def test_future(self): self.args = self.args._replace(field='future') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldNext(self.args), '01/02/2000 00:00 UTC') self.assertTrue(self.mock_parse.called) async def test_time_prefix(self): self.args = self.args._replace(prefix='[') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldNext(self.args), '[01/02/2000 00:00 UTC') self.assertTrue(self.mock_parse.called) async def test_time_prefix_blank(self): self.args = self.args._replace(prefix='') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldNext(self.args), '01/02/2000 00:00 UTC') self.assertTrue(self.mock_parse.called) async def test_time_suffix(self): self.args = self.args._replace(suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldNext(self.args), '01/02/2000 00:00 UTC]') self.assertTrue(self.mock_parse.called) async def test_time_suffix_blank(self): self.args = self.args._replace(suffix='') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldNext(self.args), '01/02/2000 00:00 UTC') self.assertTrue(self.mock_parse.called) async def test_cooldown(self): self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), 0) self.assertEqual(await countdown.fieldNext(self.args), '01/02/2000 00:00 UTC') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), math.inf) self.assertEqual(await countdown.fieldNext(self.args), '01/02/2000 00:00 UTC') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), 1) self.assertEqual(await countdown.fieldNext(self.args), '01/02/2000 00:00 UTC') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), math.nan) self.assertEqual(await countdown.fieldNext(self.args), '01/02/2000 00:00 UTC') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), -1) self.assertEqual(await countdown.fieldNext(self.args), '01/02/2000 00:00 UTC') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), -math.inf) self.assertEqual(await countdown.fieldNext(self.args), '01/02/2000 00:00 UTC') self.assertTrue(self.mock_parse.called) class TestCustomCommandCustomCountdownFieldPrevious(TestCustomField): def setUp(self): super().setUp() self.args = self.args._replace(field='previous', param='') patcher = patch(countdown.__name__ + '.parse_next_past_cooldown', autospec=True) self.addCleanup(patcher.stop) self.mock_parse = patcher.start() self.mock_parse.return_value = countdown.NextPastCooldown(None, None, None) async def test(self): self.args = self.args._replace(field='') self.assertIsNone(await countdown.fieldPrevious(self.args)) self.assertFalse(self.mock_parse.called) async def test_none_time(self): self.args = self.args._replace(param=None) self.assertIsNone(await countdown.fieldPrevious(self.args)) self.assertFalse(self.mock_parse.called) async def test_invalid_time(self): self.assertIsNone(await countdown.fieldPrevious(self.args)) self.assertTrue(self.mock_parse.called) async def test_default(self): self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldPrevious(self.args), 'None') self.assertTrue(self.mock_parse.called) async def test_default_prefix_suffix(self): self.args = self.args._replace(prefix='[', suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldPrevious(self.args), 'None') self.assertTrue(self.mock_parse.called) async def test_default_default(self): self.args = self.args._replace(default='Kappa') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldPrevious(self.args), 'Kappa') self.assertTrue(self.mock_parse.called) async def test_default_default_prefix_suffix(self): self.args = self.args._replace(default='Kappa', prefix='[', suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), None, None) self.assertEqual(await countdown.fieldPrevious(self.args), 'Kappa') self.assertTrue(self.mock_parse.called) async def test_time(self): self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 00:00 UTC') self.assertTrue(self.mock_parse.called) async def test_time_12_hour(self): self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), False), None) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 12:00AM UTC') self.assertTrue(self.mock_parse.called) async def test_past(self): self.args = self.args._replace(field='past') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 00:00 UTC') self.assertTrue(self.mock_parse.called) async def test_prev(self): self.args = self.args._replace(field='prev') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 00:00 UTC') self.assertTrue(self.mock_parse.called) async def test_time_prefix(self): self.args = self.args._replace(prefix='[') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldPrevious(self.args), '[12/31/1999 00:00 UTC') self.assertTrue(self.mock_parse.called) async def test_time_prefix_blank(self): self.args = self.args._replace(prefix='') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 00:00 UTC') self.assertTrue(self.mock_parse.called) async def test_time_suffix(self): self.args = self.args._replace(suffix=']') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 00:00 UTC]') self.assertTrue(self.mock_parse.called) async def test_time_suffix_blank(self): self.args = self.args._replace(suffix='') self.mock_parse.return_value = countdown.NextPastCooldown( None, countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), None) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 00:00 UTC') self.assertTrue(self.mock_parse.called) async def test_cooldown(self): self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), 0) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 00:00 UTC') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), math.inf) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 00:00 UTC') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), 1) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 00:00 UTC') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), math.nan) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 00:00 UTC') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), -1) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 00:00 UTC') self.assertTrue(self.mock_parse.called) self.mock_parse.reset_mock() self.mock_parse.return_value = countdown.NextPastCooldown( countdown.DateTime( datetime(2000, 1, 2, 0, 0, tzinfo=timezones.utc), True), countdown.DateTime( datetime(1999, 12, 31, 0, 0, tzinfo=timezones.utc), True), -math.inf) self.assertEqual(await countdown.fieldPrevious(self.args), '12/31/1999 00:00 UTC') self.assertTrue(self.mock_parse.called)
codeparrot/github-code-clean
from distutils.command.clean import clean import logging import os from crispy_forms.bootstrap import Alert, InlineRadios from crispy_forms.layout import (HTML, Button, Field, Fieldset, Layout, Row, Div) from django import forms from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ObjectDoesNotExist, ValidationError from django.core.files.base import File from django.db import models, transaction from django.db.models import Max, Q, F from django.forms import ModelChoiceField, ModelForm, widgets from django.forms.forms import Form from django.forms.models import ModelMultipleChoiceField from django.forms.widgets import CheckboxSelectMultiple, HiddenInput, Select from django.urls.base import reverse from django.utils import timezone from django.utils.encoding import force_text from django.utils.html import format_html from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ import django_filters from cmj.mixins import GoogleRecapthaMixin from cmj.utils import CHOICE_SIGNEDS import sapl from sapl.base.models import AppConfig, Autor, TipoAutor from sapl.comissoes.models import Comissao, Participacao, Composicao from sapl.compilacao.models import (STATUS_TA_IMMUTABLE_PUBLIC, STATUS_TA_PRIVATE) from sapl.crispy_layout_mixin import (SaplFormLayout, form_actions, to_column, to_row) from sapl.crispy_layout_mixin import SaplFormHelper from sapl.materia.models import (AssuntoMateria, Autoria, MateriaAssunto, MateriaLegislativa, Orgao, RegimeTramitacao, TipoDocumento, TipoProposicao, StatusTramitacao, UnidadeTramitacao) from sapl.norma.models import (LegislacaoCitada, NormaJuridica, TipoNormaJuridica) from sapl.parlamentares.models import Legislatura, Partido, Parlamentar from sapl.protocoloadm.models import Protocolo, DocumentoAdministrativo, Anexado from sapl.settings import MAX_DOC_UPLOAD_SIZE from sapl.utils import (YES_NO_CHOICES, SEPARADOR_HASH_PROPOSICAO, ChoiceWithoutValidationField, MateriaPesquisaOrderingFilter, RangeWidgetOverride, autor_label, autor_modal, gerar_hash_arquivo, models_with_gr_for_model, qs_override_django_filter, choice_anos_com_materias, FilterOverridesMetaMixin, FileFieldCheckMixin, lista_anexados) from .models import (AcompanhamentoMateria, Anexada, Autoria, DespachoInicial, DocumentoAcessorio, Numeracao, Proposicao, Relatoria, TipoMateriaLegislativa, Tramitacao, UnidadeTramitacao) def CHOICE_TRAMITACAO(): return [('', 'Ambos'), (1, 'Sim'), (0, 'Não')] def CHOICE_TIPO_LISTAGEM(): return [ (1, _('Detalhada')), (2, _('Simplificada')), ] class AdicionarVariasAutoriasFilterSet(django_filters.FilterSet): class Meta: model = Autor fields = ['nome'] def __init__(self, *args, **kwargs): super(AdicionarVariasAutoriasFilterSet, self).__init__(*args, **kwargs) row1 = to_row([('nome', 12)]) self.form.helper = SaplFormHelper() self.form.helper.form_method = 'GET' self.form.helper.layout = Layout( Fieldset(_('Filtrar Autores'), row1, form_actions(label='Filtrar')) ) class OrgaoForm(ModelForm): class Meta: model = Orgao fields = ['nome', 'sigla', 'unidade_deliberativa', 'endereco', 'telefone'] @transaction.atomic def save(self, commit=True): orgao = super(OrgaoForm, self).save(commit) content_type = ContentType.objects.get_for_model(Orgao) object_id = orgao.pk tipo = TipoAutor.objects.get(content_type=content_type) nome = orgao.nome + ' - ' + orgao.sigla Autor.objects.create( content_type=content_type, object_id=object_id, tipo=tipo, nome=nome ) return orgao class ReceberProposicaoForm(Form): cod_hash = forms.CharField(label='Código do Documento', required=True) def __init__(self, *args, **kwargs): row1 = to_row([('cod_hash', 12)]) self.helper = SaplFormHelper() self.helper.layout = Layout( Fieldset( _('Incorporar Proposição'), row1, form_actions(label='Buscar Proposição') ) ) super(ReceberProposicaoForm, self).__init__(*args, **kwargs) class MateriaSimplificadaForm(FileFieldCheckMixin, ModelForm): logger = logging.getLogger(__name__) class Meta: model = MateriaLegislativa fields = ['tipo', 'numero', 'ano', 'data_apresentacao', 'numero_protocolo', 'regime_tramitacao', 'em_tramitacao', 'ementa', 'tipo_apresentacao', 'texto_original'] widgets = { 'numero_protocolo': forms.TextInput(attrs={'readonly': True}), } def __init__(self, *args, **kwargs): row1 = to_row([('tipo', 6), ('numero', 3), ('ano', 3)]) row2 = to_row([('data_apresentacao', 6), ('numero_protocolo', 6)]) row3 = to_row([('regime_tramitacao', 6), ('em_tramitacao', 3), ('tipo_apresentacao', 3)]) row4 = to_row([('ementa', 12)]) row5 = to_row([('texto_original', 12)]) self.helper = SaplFormHelper() self.helper.layout = Layout( Fieldset( _('Formulário Simplificado'), row1, row2, row3, row4, row5, form_actions(label='Salvar') ) ) super(MateriaSimplificadaForm, self).__init__(*args, **kwargs) class MateriaLegislativaForm(FileFieldCheckMixin, ModelForm): logger = logging.getLogger(__name__) tipo_autor = ModelChoiceField(label=_('Tipo Autor'), required=False, queryset=TipoAutor.objects.all(), empty_label=_('------'),) autor = forms.ModelChoiceField(required=False, empty_label='------', queryset=Autor.objects.all() ) class Meta: model = MateriaLegislativa exclude = ['texto_articulado', 'autores', 'proposicao', 'anexadas', 'data_ultima_atualizacao', '_paginas', 'checkcheck', 'arquivado', 'metadata'] widgets = { 'user': forms.HiddenInput(), 'ip': forms.HiddenInput(), } def __init__(self, *args, **kwargs): super(MateriaLegislativaForm, self).__init__(*args, **kwargs) self.fields['ementa'].widget.attrs['maxlength'] = 1000 if self.instance and self.instance.pk: self.fields['tipo_autor'] = forms.CharField(required=False, widget=forms.HiddenInput()) self.fields['autor'] = forms.CharField(required=False, widget=forms.HiddenInput()) p = Protocolo.objects.filter( numero=kwargs['instance'].numero_protocolo, ano=kwargs['instance'].ano) if kwargs['instance'].numero_protocolo and p.exists(): if not kwargs['initial']['user'].is_superuser: self.fields['numero_protocolo'].widget.attrs['readonly'] = True self.fields['numero_protocolo'].help_text = p[0].epigrafe def clean(self): super(MateriaLegislativaForm, self).clean() if not self.is_valid(): return self.cleaned_data cleaned_data = self.cleaned_data data_apresentacao = cleaned_data['data_apresentacao'] ano = cleaned_data['ano'] protocolo = cleaned_data['numero_protocolo'] protocolo_antigo = self.instance.numero_protocolo if protocolo: pn = Protocolo.objects.filter(numero=protocolo, ano=ano) if not pn.exists(): self.logger.error("Protocolo %s/%s não" " existe." % (protocolo, ano)) raise ValidationError(_('Protocolo %s/%s não' ' existe' % (protocolo, ano))) if not cleaned_data['user'].is_superuser: if pn.first().conteudo_protocolado and \ pn.first().conteudo_protocolado != self.instance: self.logger.error("Protocolo %s/%s ja possui" " documento vinculado." % (protocolo, ano)) raise ValidationError(_('Protocolo %s/%s ja possui' ' documento vinculado - %s.' % (protocolo, ano, pn.first().conteudo_protocolado))) if pn.first().tipo_conteudo_protocolado != cleaned_data['tipo']: self.logger.error("Tipo do Protocolo ({}) deve ser o mesmo do Tipo Matéria ({})." .format(cleaned_data['tipo'], pn.first().tipo_conteudo_protocolado)) raise ValidationError( _('Tipo do Protocolo deve ser o mesmo do Tipo de Matéria')) elif protocolo_antigo and not protocolo: if not cleaned_data['user'].is_superuser: self.logger.error( "Usuário não possui permissão para desvincular protocolo via edição de matéria") raise ValidationError( _('Usuário não possui permissão para desvincular protocolo via edição de matéria')) ano_origem_externa = cleaned_data['ano_origem_externa'] data_origem_externa = cleaned_data['data_origem_externa'] if ano_origem_externa and data_origem_externa and \ ano_origem_externa != data_origem_externa.year: self.logger.error("O ano de origem externa da matéria ({}) é " " diferente do ano na data de origem externa ({})." .format(ano_origem_externa, data_origem_externa)) raise ValidationError(_("O ano de origem externa da matéria não " "pode ser diferente do ano na data de " "origem externa")) texto_original = self.cleaned_data.get('texto_original', False) if texto_original and texto_original.size > MAX_DOC_UPLOAD_SIZE: raise ValidationError("O arquivo Texto Original deve ser menor que {0:.1f} mb, o tamanho atual desse arquivo é {1:.1f} mb" .format((MAX_DOC_UPLOAD_SIZE / 1024) / 1024, (texto_original.size / 1024) / 1024)) return cleaned_data def save(self, commit=False): iold = None if not self.instance.pk: primeiro_autor = True else: primeiro_autor = False iold = MateriaLegislativa.objects.get(pk=self.instance.pk) ano = self.cleaned_data['ano'] protocolo = self.cleaned_data['numero_protocolo'] ano_antigo = iold.ano if iold else 0 protocolo_antigo = iold.numero_protocolo if iold else 0 materia = super(MateriaLegislativaForm, self).save(commit) materia.save() if protocolo: pn = Protocolo.objects.filter(numero=protocolo, ano=ano).first() pn.conteudo_protocolado = materia pn.tipo_conteudo_protocolado = materia.tipo pn.save() if protocolo_antigo and protocolo != protocolo_antigo: po = Protocolo.objects.filter( numero=protocolo_antigo, ano=ano_antigo).first() if po: po.conteudo_protocolado = None po.tipo_conteudo_protocolado = None po.save() if self.cleaned_data['autor']: autoria = Autoria() autoria.primeiro_autor = primeiro_autor autoria.materia = materia autoria.autor = self.cleaned_data['autor'] autoria.save() return materia class UnidadeTramitacaoForm(ModelForm): logger = logging.getLogger(__name__) class Meta: model = UnidadeTramitacao fields = ['comissao', 'orgao', 'parlamentar'] def clean(self): super(UnidadeTramitacaoForm, self).clean() if not self.is_valid(): return self.cleaned_data cleaned_data = self.cleaned_data for key in list(cleaned_data.keys()): if cleaned_data[key] is None: del cleaned_data[key] if len(cleaned_data) != 1: msg = _('Somente um campo deve ser preenchido!') self.logger.error("Somente um campo deve ser preenchido!") raise ValidationError(msg) return cleaned_data def save(self, commit=False): unidade = super(UnidadeTramitacaoForm, self).save(commit) cd = self.cleaned_data if not cd.get('orgao'): unidade.orgao = None if not cd.get('parlamentar'): unidade.parlamentar = None if not cd.get('comissao'): unidade.comissao = None unidade.save() return unidade class AcompanhamentoMateriaForm(GoogleRecapthaMixin, ModelForm): class Meta: model = AcompanhamentoMateria fields = ['email'] def __init__(self, *args, **kwargs): kwargs['title_label'] = _('Acompanhamento de Matéria por e-mail') kwargs['action_label'] = _('Cadastrar') super().__init__(*args, **kwargs) class DocumentoAcessorioForm(FileFieldCheckMixin, ModelForm): data = forms.DateField(required=True) numero_protocolo = forms.IntegerField( required=False, label=_('Número do Protocolo')) ano_protocolo = forms.IntegerField( required=False, label=_('Ano do Protocolo')) class Meta: model = DocumentoAcessorio fields = ['tipo', 'nome', 'data', 'autor', 'ementa', 'indexacao', 'arquivo', 'numero_protocolo', 'ano_protocolo'] def __init__(self, *args, **kwargs): self.user = kwargs['initial'].pop('user', None) super().__init__(*args, **kwargs) if self.instance.pk: if self.instance.protocolo_gr.exists(): self.fields['numero_protocolo'].initial = self.instance.protocolo_gr.first( ).numero self.fields['ano_protocolo'].initial = self.instance.protocolo_gr.first( ).ano if not self.user or not self.user.is_superuser: self.fields['numero_protocolo'].widget.attrs['readonly'] = True self.fields['ano_protocolo'].widget.attrs['readonly'] = True def clean(self): super(DocumentoAcessorioForm, self).clean() if not self.is_valid(): return self.cleaned_data cd = self.cleaned_data arquivo = cd.get('arquivo', False) if arquivo and arquivo.size > MAX_DOC_UPLOAD_SIZE: raise ValidationError("O arquivo Texto Integral deve ser menor que {0:.1f} mb, o tamanho atual desse arquivo é {1:.1f} mb" .format((MAX_DOC_UPLOAD_SIZE / 1024) / 1024, (arquivo.size / 1024) / 1024)) if not self.instance.pk or self.user.is_superuser: if cd['numero_protocolo'] and cd['ano_protocolo']: p_list = Protocolo.objects.filter( numero=cd['numero_protocolo'], ano=cd['ano_protocolo']) if not p_list.exists(): raise ValidationError(_('Protocolo não encontrado!')) return cd def save(self, commit=True): cd = self.cleaned_data documento = super().save(commit) p = Protocolo.objects.filter( numero=cd['numero_protocolo'], ano=cd['ano_protocolo']).first() if p: p.tipo_conteudo_protocolado = documento.tipo p.conteudo_protocolado = documento p.save() return documento class DocumentoAcessorioProtocoloForm(FileFieldCheckMixin, ModelForm): logger = logging.getLogger(__name__) data = forms.DateField(required=True) materia = forms.ModelChoiceField( label=MateriaLegislativa._meta.verbose_name, required=False, queryset=MateriaLegislativa.objects.all(), empty_label='Selecione', widget=forms.HiddenInput()) tipo_materia = forms.ModelChoiceField( label=TipoMateriaLegislativa._meta.verbose_name, required=False, queryset=TipoMateriaLegislativa.objects.all(), empty_label='Selecione') numero_materia = forms.CharField( label='Número', required=False) ano_materia = forms.CharField( label='Ano', required=False) class Meta: model = DocumentoAcessorio fields = ['tipo', 'nome', 'data', 'autor', 'ementa', 'indexacao', 'arquivo', 'tipo_materia', 'numero_materia', 'ano_materia', 'materia' ] def clean(self): super(DocumentoAcessorioProtocoloForm, self).clean() if not self.is_valid(): return self.cleaned_data cleaned_data = self.cleaned_data arquivo = self.cleaned_data.get('arquivo', False) if arquivo and arquivo.size > MAX_DOC_UPLOAD_SIZE: raise ValidationError("O arquivo Texto Integral deve ser menor que {0:.1f} mb, o tamanho atual desse arquivo é {1:.1f} mb" .format((MAX_DOC_UPLOAD_SIZE / 1024) / 1024, (arquivo.size / 1024) / 1024)) tm, am, nm = (cleaned_data.get('tipo_materia', ''), cleaned_data.get('ano_materia', ''), cleaned_data.get('numero_materia', '')) if tm and am and nm: try: self.logger.debug("Tentando obter objeto MateriaLegislativa (tipo_id={}, ano={}, numero={})." .format(tm, am, nm)) materia_de_vinculo = MateriaLegislativa.objects.get( tipo_id=tm, ano=am, numero=nm ) except ObjectDoesNotExist: self.logger.error("Objeto MateriaLegislativa vinculada (tipo_id={}, ano={}, numero={}) não existe!" .format(tm, am, nm)) raise ValidationError(_('Matéria Vinculada não existe!')) else: self.logger.info("MateriaLegislativa vinculada (tipo_id={}, ano={}, numero={}) com sucesso." .format(tm, am, nm)) cleaned_data['materia'] = materia_de_vinculo return self.cleaned_data def save(self, commit=True): documento = super(DocumentoAcessorioProtocoloForm, self).save(commit) protocolo = self.initial['protocolo'] protocolo.tipo_conteudo_protocolado = documento.tipo protocolo.conteudo_protocolado = documento protocolo.save() return documento def __init__(self, *args, **kwargs): fields = [] row1 = to_row( [('tipo', 4), ('nome', 5), ('data', 3)]) row2 = to_row( [('autor', 5), ('arquivo', 7), ]) row3 = to_row( [('ementa', 8), ('indexacao', 4), ]) fields.append( Fieldset(_('Dados do Documento Acessório'), row1, row2, row3) ) row0 = to_row( [('tipo_materia', 6), ('numero_materia', 3), ('ano_materia', 3)]) fields.append( Fieldset(_('Vincular a Matéria Legislativa'), row0, to_column( (Alert('<strong></strong><br><span></span>', css_class="ementa_materia hidden alert-info", dismiss=False), 12))) ) self.helper = SaplFormHelper() self.helper.layout = SaplFormLayout(*fields) super().__init__(*args, **kwargs) class RelatoriaForm(ModelForm): logger = logging.getLogger(__name__) composicao = forms.ModelChoiceField( required=True, empty_label='---------', queryset=Composicao.objects.all(), label=_('Composição') ) class Meta: model = Relatoria fields = [ 'comissao', 'data_designacao_relator', 'data_destituicao_relator', 'tipo_fim_relatoria', 'composicao', 'parlamentar' ] widgets = {'comissao': forms.Select(attrs={'disabled': 'disabled'})} def __init__(self, *args, **kwargs): row1 = to_row([('comissao', 12)]) row2 = to_row([('data_designacao_relator', 4), ('data_destituicao_relator', 4), ('tipo_fim_relatoria', 4)]) row3 = to_row([('composicao', 4), ('parlamentar', 8)]) self.helper = SaplFormHelper() self.helper.layout = SaplFormLayout( Fieldset(_('Relatoria'), row1, row2, row3)) super().__init__(*args, **kwargs) comissao_pk = kwargs['initial']['comissao'] composicoes = Composicao.objects.filter(comissao_id=comissao_pk) self.fields['composicao'].choices = [('', '---------')] + \ [(c.pk, c) for c in composicoes] # UPDATE if self.initial.get('composicao') and self.initial.get('parlamentar'): parlamentares = [(p.parlamentar.id, p.parlamentar) for p in Participacao.objects.filter(composicao__comissao_id=comissao_pk, composicao_id=self.initial['composicao'])] self.fields['parlamentar'].choices = [ ('', '---------')] + parlamentares # INSERT else: self.fields['parlamentar'].choices = [('', '---------')] def clean(self): super().clean() cleaned_data = self.cleaned_data if not self.is_valid(): return cleaned_data try: self.logger.debug("Tentando obter objeto Comissao.") comissao = Comissao.objects.get(id=self.initial['comissao']) except ObjectDoesNotExist as e: self.logger.error( "Objeto Comissao não encontrado com id={}. A localização atual deve ser uma comissão. ".format( self.initial['comissao']) + str(e)) msg = _('A localização atual deve ser uma comissão.') raise ValidationError(msg) else: cleaned_data['comissao'] = comissao if cleaned_data['data_designacao_relator'] < cleaned_data['composicao'].periodo.data_inicio \ or cleaned_data['data_designacao_relator'] > cleaned_data['composicao'].periodo.data_fim: raise ValidationError( _('Data de designação deve estar dentro do período da composição.')) return cleaned_data class TramitacaoForm(ModelForm): urgente = forms.ChoiceField(required=True, choices=YES_NO_CHOICES, initial=False, label=_("Urgente?")) logger = logging.getLogger(__name__) class Meta: model = Tramitacao fields = ['data_tramitacao', 'unidade_tramitacao_local', 'status', 'turno', 'urgente', 'unidade_tramitacao_destino', 'data_encaminhamento', 'data_fim_prazo', 'texto', 'user', 'ip'] widgets = {'user': forms.HiddenInput(), 'ip': forms.HiddenInput()} def __init__(self, *args, **kwargs): super(TramitacaoForm, self).__init__(*args, **kwargs) self.fields['data_tramitacao'].initial = timezone.now().date() ust = UnidadeTramitacao.objects.select_related().all() unidade_tramitacao_destino = [('', '---------')] + [(ut.pk, ut) for ut in ust if ut.comissao and ut.comissao.ativa] unidade_tramitacao_destino.extend( [(ut.pk, ut) for ut in ust if ut.orgao]) unidade_tramitacao_destino.extend( [(ut.pk, ut) for ut in ust if ut.parlamentar]) self.fields['unidade_tramitacao_destino'].choices = unidade_tramitacao_destino def clean(self): super(TramitacaoForm, self).clean() if not self.is_valid(): return self.cleaned_data cleaned_data = self.cleaned_data if 'data_encaminhamento' in cleaned_data: data_enc_form = cleaned_data['data_encaminhamento'] if 'data_fim_prazo' in cleaned_data: data_prazo_form = cleaned_data['data_fim_prazo'] if 'data_tramitacao' in cleaned_data: data_tram_form = cleaned_data['data_tramitacao'] ultima_tramitacao = Tramitacao.objects.filter( materia_id=self.instance.materia_id).exclude( id=self.instance.id).order_by( '-data_tramitacao', '-id').first() if not self.instance.data_tramitacao: if ultima_tramitacao: destino = ultima_tramitacao.unidade_tramitacao_destino if (destino != self.cleaned_data['unidade_tramitacao_local']): self.logger.error("A origem da nova tramitação ({}) não é igual ao " "destino da última adicionada ({})!" .format(self.cleaned_data['unidade_tramitacao_local'], destino)) msg = _('A origem da nova tramitação deve ser igual ao ' 'destino da última adicionada!') raise ValidationError(msg) if cleaned_data['data_tramitacao'] > timezone.now().date(): self.logger.error('A data de tramitação informada ({}) não é ' + 'menor ou igual a data de hoje!'.format(cleaned_data['data_tramitacao'])) msg = _( 'A data de tramitação deve ser ' + 'menor ou igual a data de hoje!') raise ValidationError(msg) if (ultima_tramitacao and data_tram_form < ultima_tramitacao.data_tramitacao): msg = _('A data da nova tramitação deve ser ' + 'maior que a data da última tramitação!') self.logger.error("A data da nova tramitação ({}) deve ser " "maior que a data da última tramitação ({})!" .format(data_tram_form, ultima_tramitacao.data_tramitacao)) raise ValidationError(msg) if data_enc_form: if data_enc_form < data_tram_form: msg = _('A data de encaminhamento deve ser ' + 'maior que a data de tramitação!') self.logger.error("A data de encaminhamento ({}) deve ser " "maior que a data de tramitação! ({})" .format(data_enc_form, data_tram_form)) raise ValidationError(msg) if data_prazo_form: if data_prazo_form < data_tram_form: msg = _('A data fim de prazo deve ser ' + 'maior que a data de tramitação!') self.logger.error("A data fim de prazo ({}) deve ser " + "maior que a data de tramitação ({})!" .format(data_prazo_form, data_tram_form)) raise ValidationError(msg) return cleaned_data @transaction.atomic def save(self, commit=True): tramitacao = super(TramitacaoForm, self).save(commit) materia = tramitacao.materia materia.em_tramitacao = False if tramitacao.status.indicador == "F" else True materia.save() tramitar_anexadas = sapl.base.models.AppConfig.attr( 'tramitacao_materia') if tramitar_anexadas: lista_tramitacao = [] anexadas_list = lista_anexados(materia) for ma in anexadas_list: if not ma.tramitacao_set.all() \ or ma.tramitacao_set.first().unidade_tramitacao_destino == tramitacao.unidade_tramitacao_local: ma.em_tramitacao = False if tramitacao.status.indicador == "F" else True ma.save() lista_tramitacao.append(Tramitacao( status=tramitacao.status, materia=ma, data_tramitacao=tramitacao.data_tramitacao, unidade_tramitacao_local=tramitacao.unidade_tramitacao_local, data_encaminhamento=tramitacao.data_encaminhamento, unidade_tramitacao_destino=tramitacao.unidade_tramitacao_destino, urgente=tramitacao.urgente, turno=tramitacao.turno, texto=tramitacao.texto, data_fim_prazo=tramitacao.data_fim_prazo, user=tramitacao.user, ip=tramitacao.ip )) Tramitacao.objects.bulk_create(lista_tramitacao) return tramitacao # Compara se os campos de duas tramitações são iguais, # exceto os campos id, documento_id e timestamp def compara_tramitacoes_mat(tramitacao1, tramitacao2): if not tramitacao1 or not tramitacao2: return False lst_items = ['id', 'materia_id', 'timestamp'] values = [(k, v) for k, v in tramitacao1.__dict__.items() if ((k not in lst_items) and (k[0] != '_'))] other_values = [(k, v) for k, v in tramitacao2.__dict__.items() if (k not in lst_items and k[0] != '_')] return values == other_values class TramitacaoUpdateForm(TramitacaoForm): unidade_tramitacao_local = forms.ModelChoiceField( queryset=UnidadeTramitacao.objects.all(), widget=forms.HiddenInput()) data_tramitacao = forms.DateField(widget=forms.HiddenInput()) logger = logging.getLogger(__name__) class Meta: model = Tramitacao fields = ['data_tramitacao', 'unidade_tramitacao_local', 'status', 'turno', 'urgente', 'unidade_tramitacao_destino', 'data_encaminhamento', 'data_fim_prazo', 'texto', 'user', 'ip' ] widgets = { 'data_encaminhamento': forms.DateInput(format='%d/%m/%Y'), 'data_fim_prazo': forms.DateInput(format='%d/%m/%Y'), 'user': forms.HiddenInput(), 'ip': forms.HiddenInput() } def clean(self): super(TramitacaoUpdateForm, self).clean() if not self.is_valid(): return self.cleaned_data cd = self.cleaned_data obj = self.instance ultima_tramitacao = Tramitacao.objects.filter( materia_id=obj.materia_id).order_by( '-data_tramitacao', '-id').first() # Se a Tramitação que está sendo editada não for a mais recente, # ela não pode ter seu destino alterado. if ultima_tramitacao != obj: if cd['unidade_tramitacao_destino'] != \ obj.unidade_tramitacao_destino: self.logger.error("Você não pode mudar a Unidade de Destino desta " "tramitação para {}, pois irá conflitar com a Unidade " "Local da tramitação seguinte ({})." .format(cd['unidade_tramitacao_destino'], obj.unidade_tramitacao_destino)) raise ValidationError( 'Você não pode mudar a Unidade de Destino desta ' 'tramitação, pois irá conflitar com a Unidade ' 'Local da tramitação seguinte') cd['data_tramitacao'] = obj.data_tramitacao cd['unidade_tramitacao_local'] = obj.unidade_tramitacao_local return cd @transaction.atomic def save(self, commit=True): ant_tram_principal = Tramitacao.objects.get(id=self.instance.id) nova_tram_principal = super(TramitacaoUpdateForm, self).save(commit) materia = nova_tram_principal.materia if materia.em_tramitacao != (False if nova_tram_principal.status.indicador == "F" else True): materia.em_tramitacao = False if nova_tram_principal.status.indicador == "F" else True materia.save() tramitar_anexadas = sapl.base.models.AppConfig.attr( 'tramitacao_materia') if tramitar_anexadas: anexadas_list = lista_anexados(materia) for ma in anexadas_list: tram_anexada = ma.tramitacao_set.first() if compara_tramitacoes_mat(ant_tram_principal, tram_anexada): tram_anexada.status = nova_tram_principal.status tram_anexada.data_tramitacao = nova_tram_principal.data_tramitacao tram_anexada.unidade_tramitacao_local = nova_tram_principal.unidade_tramitacao_local tram_anexada.data_encaminhamento = nova_tram_principal.data_encaminhamento tram_anexada.unidade_tramitacao_destino = nova_tram_principal.unidade_tramitacao_destino tram_anexada.urgente = nova_tram_principal.urgente tram_anexada.turno = nova_tram_principal.turno tram_anexada.texto = nova_tram_principal.texto tram_anexada.data_fim_prazo = nova_tram_principal.data_fim_prazo tram_anexada.user = nova_tram_principal.user tram_anexada.ip = nova_tram_principal.ip tram_anexada.save() ma.em_tramitacao = False if nova_tram_principal.status.indicador == "F" else True ma.save() return nova_tram_principal class LegislacaoCitadaForm(ModelForm): tipo = forms.ModelChoiceField( label=_('Tipo Norma'), required=True, queryset=TipoNormaJuridica.objects.all(), empty_label='Selecione', ) numero = forms.CharField(label='Número', required=True) ano = forms.CharField(label='Ano', required=True) logger = logging.getLogger(__name__) class Meta: model = LegislacaoCitada fields = ['tipo', 'numero', 'ano', 'disposicoes', 'parte', 'livro', 'titulo', 'capitulo', 'secao', 'subsecao', 'artigo', 'paragrafo', 'inciso', 'alinea', 'item'] def clean(self): super(LegislacaoCitadaForm, self).clean() if not self.is_valid(): return self.cleaned_data cleaned_data = self.cleaned_data try: self.logger.debug("Tentando obter objeto NormalJuridica (numero={}, ano={}, tipo={})." .format(cleaned_data['numero'], cleaned_data['ano'], cleaned_data['tipo'])) norma = NormaJuridica.objects.get( numero=cleaned_data['numero'], ano=cleaned_data['ano'], tipo=cleaned_data['tipo']) except ObjectDoesNotExist: self.logger.error("A norma a ser inclusa (numero={}, ano={}, tipo={}) " "não existe no cadastro de Normas." .format(cleaned_data['numero'], cleaned_data['ano'], cleaned_data['tipo'])) msg = _('A norma a ser inclusa não existe no cadastro' ' de Normas.') raise ValidationError(msg) else: cleaned_data['norma'] = norma filtro_base = LegislacaoCitada.objects.filter( materia=self.instance.materia, norma=self.cleaned_data['norma'], disposicoes=self.cleaned_data['disposicoes'], parte=self.cleaned_data['parte'], livro=self.cleaned_data['livro'], titulo=self.cleaned_data['titulo'], capitulo=self.cleaned_data['capitulo'], secao=self.cleaned_data['secao'], subsecao=self.cleaned_data['subsecao'], artigo=self.cleaned_data['artigo'], paragrafo=self.cleaned_data['paragrafo'], inciso=self.cleaned_data['inciso'], alinea=self.cleaned_data['alinea'], item=self.cleaned_data['item']) if not self.instance.id: if filtro_base.exists(): msg = _('Essa Legislação já foi cadastrada.') self.logger.error("Essa Legislação já foi cadastrada.") raise ValidationError(msg) else: if filtro_base.exclude(id=self.instance.id).exists(): msg = _('Essa Legislação já foi cadastrada.') self.logger.error("Essa Legislação já foi cadastrada.") raise ValidationError(msg) return cleaned_data def save(self, commit=False): legislacao = super(LegislacaoCitadaForm, self).save(commit) legislacao.norma = self.cleaned_data['norma'] legislacao.save() return legislacao class NumeracaoForm(ModelForm): logger = logging.getLogger(__name__) class Meta: model = Numeracao fields = ['tipo_materia', 'numero_materia', 'ano_materia', 'data_materia'] def clean(self): super(NumeracaoForm, self).clean() if not self.is_valid(): return self.cleaned_data try: self.logger.info("Tentando obter objeto MateriaLegislativa (numero={}, ano={}. tipo={})." .format(self.cleaned_data['numero_materia'], self.cleaned_data['ano_materia'], self.cleaned_data['tipo_materia'])) MateriaLegislativa.objects.get( numero=self.cleaned_data['numero_materia'], ano=self.cleaned_data['ano_materia'], tipo=self.cleaned_data['tipo_materia']) except ObjectDoesNotExist: msg = _('A matéria a ser inclusa não existe no cadastro' ' de matérias legislativas.') self.logger.error("A MateriaLegislativa a ser inclusa (numero={}, ano={}. tipo={}) não existe no cadastro de matérias legislativas." .format(self.cleaned_data['numero_materia'], self.cleaned_data['ano_materia'], self.cleaned_data['tipo_materia'])) raise ValidationError(msg) if Numeracao.objects.filter( materia=self.instance.materia, tipo_materia=self.cleaned_data['tipo_materia'], ano_materia=self.cleaned_data['ano_materia'], numero_materia=self.cleaned_data['numero_materia'] ).exists(): msg = _('Essa numeração já foi cadastrada.') self.logger.error("Essa numeração (materia={}, tipo_materia={}, ano_materia={}, numero_materia={}) " "já foi cadastrada.".format(self.instance.materia, self.cleaned_data['tipo_materia'], self.cleaned_data['ano_materia'], self.cleaned_data['numero_materia'])) raise ValidationError(msg) return self.cleaned_data class AnexadaForm(ModelForm): logger = logging.getLogger(__name__) tipo = forms.ModelChoiceField( label='Tipo', required=True, queryset=TipoMateriaLegislativa.objects.all(), empty_label='Selecione', ) numero = forms.IntegerField(label='Número', required=True) ano = forms.CharField(label='Ano', required=True) def __init__(self, *args, **kwargs): return super(AnexadaForm, self).__init__(*args, **kwargs) def clean(self): super(AnexadaForm, self).clean() if not self.is_valid(): return self.cleaned_data cleaned_data = self.cleaned_data data_anexacao = cleaned_data['data_anexacao'] data_desanexacao = cleaned_data['data_desanexacao'] if cleaned_data['data_desanexacao'] else data_anexacao if data_anexacao > data_desanexacao: self.logger.error( "Data de anexação posterior à data de desanexação.") raise ValidationError( _("Data de anexação posterior à data de desanexação.")) try: self.logger.info("Tentando obter objeto MateriaLegislativa (numero={}, ano={}, tipo={})." .format(cleaned_data['numero'], cleaned_data['ano'], cleaned_data['tipo'])) materia_anexada = MateriaLegislativa.objects.get( numero=cleaned_data['numero'], ano=cleaned_data['ano'], tipo=cleaned_data['tipo']) except ObjectDoesNotExist: msg = _('A {} {}/{} não existe no cadastro de matérias legislativas.' .format(cleaned_data['tipo'], cleaned_data['numero'], cleaned_data['ano'])) self.logger.error("A matéria a ser anexada não existe no cadastro" " de matérias legislativas.") raise ValidationError(msg) materia_principal = self.instance.materia_principal if materia_principal == materia_anexada: self.logger.error("Matéria não pode ser anexada a si mesma.") raise ValidationError(_('Matéria não pode ser anexada a si mesma')) is_anexada = Anexada.objects.filter( materia_principal=materia_principal, materia_anexada=materia_anexada ).exclude(pk=self.instance.pk).exists() if is_anexada: self.logger.error("Matéria já se encontra anexada.") raise ValidationError(_('Matéria já se encontra anexada')) ciclico = False anexadas_anexada = Anexada.objects.filter( materia_principal=materia_anexada) while anexadas_anexada and not ciclico: anexadas = [] for anexa in anexadas_anexada: if materia_principal == anexa.materia_anexada: ciclico = True else: for a in Anexada.objects.filter(materia_principal=anexa.materia_anexada): anexadas.append(a) anexadas_anexada = anexadas if ciclico: self.logger.error( "A matéria não pode ser anexada por uma de suas anexadas.") raise ValidationError( _("A matéria não pode ser anexada por uma de suas anexadas.")) cleaned_data['materia_anexada'] = materia_anexada return cleaned_data def save(self, commit=False): anexada = super(AnexadaForm, self).save(commit) anexada.materia_anexada = self.cleaned_data['materia_anexada'] anexada.save() return anexada class Meta: model = Anexada fields = ['tipo', 'numero', 'ano', 'data_anexacao', 'data_desanexacao'] class MateriaLegislativaFilterSet(django_filters.FilterSet): ano = django_filters.ChoiceFilter(required=False, label='Ano da Matéria', choices=choice_anos_com_materias) numero = django_filters.CharFilter( label=_('Número'), method='filter_numero' ) autoria__autor = django_filters.CharFilter(widget=forms.HiddenInput()) autoria__primeiro_autor = django_filters.BooleanFilter( required=False, label=_('Primeiro Autor')) autoria__autor__parlamentar_set__filiacao__partido = django_filters.ModelChoiceFilter( queryset=Partido.objects.all(), label=_('Matérias por Partido')) ementa = django_filters.CharFilter( label=_( 'Pesquisar expressões na ementa'), help_text=_( '"Para busca no conteúdo das matérias, use a Busca Textual acima"'), method='filter_ementa' ) indexacao = django_filters.CharFilter(lookup_expr='icontains', label=_('Indexação')) em_tramitacao = django_filters.ChoiceFilter(required=False, label='Em tramitação', choices=CHOICE_TRAMITACAO) materiaassunto__assunto = django_filters.ModelChoiceFilter( queryset=AssuntoMateria.objects.all(), label=_('Assunto')) numeracao__numero_materia = django_filters.NumberFilter( required=False, label=_('Número do processo')) signeds = django_filters.ChoiceFilter( required=False, choices=CHOICE_SIGNEDS, label=_('Com Assinatura Digital?'), method='filter_signeds') o = MateriaPesquisaOrderingFilter(help_text='') tipo_listagem = forms.ChoiceField( required=False, choices=CHOICE_TIPO_LISTAGEM, label=_('Tipo da Pesquisa')) class Meta(FilterOverridesMetaMixin): model = MateriaLegislativa fields = ['numero', 'numero_protocolo', 'numeracao__numero_materia', 'ano', 'tipo', 'data_apresentacao', 'data_publicacao', 'autoria__autor__tipo', 'autoria__primeiro_autor', 'autoria__autor__parlamentar_set__filiacao__partido', 'relatoria__parlamentar_id', 'tramitacao__unidade_tramitacao_destino', 'tramitacao__status', 'materiaassunto__assunto', 'em_tramitacao', 'tipo_origem_externa', 'numero_origem_externa', 'ano_origem_externa', 'data_origem_externa', 'local_origem_externa', ] def filter_signeds(self, queryset, name, value): q = Q() if not value: return queryset if value == '1': q &= Q(metadata__signs__texto_original__signs__0__isnull=False) else: q &= (Q(metadata__signs__texto_original__signs__isnull=True) | Q(metadata__signs__texto_original__signs__len=0)) return queryset.filter(q) def filter_numero(self, qs, name, value): value = value.replace('.', '') value = value.replace(',', '') if len(value) > 2: qs = qs.filter(numero__icontains=value) else: qs = qs.filter(numero=value) return qs def filter_ementa(self, queryset, name, value): texto = value.split() q = Q() for t in texto: q &= Q(ementa__icontains=t) return queryset.filter(q) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # self.filters['tipo'].label = 'Tipo de Matéria' self.filters[ 'autoria__autor__parlamentar_set__filiacao__partido' ].label = 'Partido do Autor' self.filters['autoria__autor__tipo'].label = _('Tipo de Autor') self.filters['relatoria__parlamentar_id'].label = _('Relatoria') self.filters['tramitacao__unidade_tramitacao_destino'].label = _( 'Unidade de tramitação atual') self.filters['tramitacao__status'].label = _( 'Status da tramitação atual') self.filters['tramitacao__status'].label = _( 'Status da tramitação atual') self.filters['o'].label = _('Ordenação') self.form.fields['tipo_listagem'] = self.tipo_listagem row1 = to_row( [ ('em_tramitacao', 2), ('tipo', 4), ('numero', 3), ('ano', 3), ] ) row2 = to_row( [ ('ementa', 12) ] ) # row2 = to_row( # [ # ('numeracao__numero_materia', 3), # ('numero_protocolo', 3), # ] #) row3 = to_row( [('data_apresentacao', 6), ('data_publicacao', 6)]) row4 = to_row([ ('autoria__autor', 0), (Button('pesquisar', 'Selecionar Autor', css_class='btn btn-secondary btn-sm'), 2), (Button('limpar', 'limpar Autor', css_class='btn btn-secondary btn-sm'), 2), #('autoria__primeiro_autor', 2), ('autoria__autor__tipo', 4), ('autoria__autor__parlamentar_set__filiacao__partido', 4) ]) row6 = to_row( [('relatoria__parlamentar_id', 6), ]) row7 = to_row( [('tramitacao__unidade_tramitacao_destino', 6), ('tramitacao__status', 6), ]) row9 = to_row( [('materiaassunto__assunto', 6), ('indexacao', 6)]) row8 = to_row( [ ('o', 5), ('signeds', 4), ('tipo_listagem', 3) ]) # row10 = to_row([ # ('tipo_origem_externa', 4), # ('numero_origem_externa', 4), # ('ano_origem_externa', 4), # ]) # row11 = to_row([ # ('data_origem_externa', 8), # ('local_origem_externa', 4) # ]) self.form.helper = SaplFormHelper() self.form.helper.form_method = 'GET' self.form.helper.layout = Layout( Fieldset( _( ''' Pesquisa Parametrizada<br> <small> <strong class="text-red">TODOS OS CAMPOS SÃO OPCIONAIS!</strong> </small> ''' ), row1, to_row([ (row2, 'col'), (form_actions(label=_('Processar Pesquisa')), 'col-md-auto mt-3 pt-3') ]) ), Fieldset( _('Como listar os resultados da pesquisa'), row8, ), # Fieldset(_('Origem externa'), # row10, row11 # ), Fieldset( _('Pesquisa Avançada'), HTML(autor_label), HTML(autor_modal), row4, row3, row7, # row6, # row9, ) ) @property def qs(self): qs = qs_override_django_filter(self) if hasattr(self.form, 'cleaned_data') and self.form.cleaned_data[ 'autoria__autor__parlamentar_set__filiacao__partido']: q_data_inicio_e_fim = Q(data_apresentacao__gte=F( 'autoria__autor__parlamentar_set__filiacao__data'), data_apresentacao__lte=F( 'autoria__autor__parlamentar_set__filiacao__data_desfiliacao')) q_data_inicio = Q( data_apresentacao__gte=F( 'autoria__autor__parlamentar_set__filiacao__data'), autoria__autor__parlamentar_set__filiacao__data_desfiliacao__isnull=True ) qs = qs.filter( q_data_inicio_e_fim | q_data_inicio ) return qs def pega_ultima_tramitacao(): return Tramitacao.objects.values( 'materia_id').annotate(data_encaminhamento=Max( 'data_encaminhamento'), id=Max('id')).values_list('id', flat=True) def filtra_tramitacao_status(status): lista = pega_ultima_tramitacao() return Tramitacao.objects.filter( id__in=lista, status=status).distinct().values_list('materia_id', flat=True) def filtra_tramitacao_destino(destino): lista = pega_ultima_tramitacao() return Tramitacao.objects.filter( id__in=lista, unidade_tramitacao_destino=destino).distinct().values_list( 'materia_id', flat=True) def filtra_tramitacao_destino_and_status(status, destino): lista = pega_ultima_tramitacao() return Tramitacao.objects.filter( id__in=lista, status=status, unidade_tramitacao_destino=destino).distinct().values_list( 'materia_id', flat=True) class DespachoInicialCreateForm(forms.Form): comissao = forms.ModelMultipleChoiceField( queryset=Comissao.objects.filter(ativa=True), widget=forms.CheckboxSelectMultiple(), label=Comissao._meta.verbose_name_plural) def __init__(self, *args, **kwargs): row1 = to_row( [('comissao', 12), ]) self.helper = SaplFormHelper() self.helper.form_method = 'POST' self.helper.layout = SaplFormLayout(row1) super().__init__(*args, **kwargs) def clean(self): super().clean() comissoes = self.cleaned_data.get('comissao') if not comissoes: msg = _('Você deve escolher pelo menos uma comissão.') raise ValidationError(msg) if not self.is_valid(): return self.cleaned_data errors = [] for comissao in comissoes: if DespachoInicial.objects.filter( materia=self.initial['materia'], comissao=comissao, ).exists(): msg = _('Já existe um Despacho cadastrado para %s' % comissao) errors.append(msg) if errors: raise ValidationError(errors) return self.cleaned_data class DespachoInicialForm(ModelForm): comissao = forms.ModelChoiceField( queryset=Comissao.objects.filter(ativa=True), label=_('Comissão')) class Meta: model = DespachoInicial fields = ['comissao'] def clean(self): super(DespachoInicialForm, self).clean() if not self.is_valid(): return self.cleaned_data if DespachoInicial.objects.filter( materia=self.instance.materia, comissao=self.cleaned_data['comissao'], ).exclude(pk=self.instance.pk).exists(): msg = _('Já existe um Despacho cadastrado para %s' % self.cleaned_data['comissao']) raise ValidationError(msg) return self.cleaned_data class AutoriaForm(ModelForm): tipo_autor = ModelChoiceField(label=_('Tipo Autor'), required=True, queryset=TipoAutor.objects.all(), empty_label=_('Selecione'),) data_relativa = forms.DateField( widget=forms.HiddenInput(), required=False) logger = logging.getLogger(__name__) def __init__(self, *args, **kwargs): super(AutoriaForm, self).__init__(*args, **kwargs) self.fields['primeiro_autor'].required = True if 'initial' in kwargs and 'materia' in kwargs['initial']: materia = kwargs['initial']['materia'] self.fields['primeiro_autor'].initial = Autoria.objects.filter( materia=materia).count() == 0 row1 = to_row([('tipo_autor', 4), ('autor', 4), ('primeiro_autor', 4)]) self.helper = SaplFormHelper() self.helper.layout = Layout( Fieldset(_('Autoria'), row1, 'data_relativa', form_actions(label='Salvar'))) if not kwargs['instance']: self.fields['autor'].choices = [] class Meta: model = Autoria fields = ['tipo_autor', 'autor', 'primeiro_autor', 'data_relativa'] def clean(self): cd = super(AutoriaForm, self).clean() if not self.is_valid(): return self.cleaned_data autorias = Autoria.objects.filter( materia=self.instance.materia, autor=cd['autor']) pk = self.instance.pk if ((not pk and autorias.exists()) or (pk and autorias.exclude(pk=pk).exists())): self.logger.error( "Esse Autor (pk={}) já foi cadastrado.".format(pk)) raise ValidationError(_('Esse Autor já foi cadastrado.')) return cd class AutoriaMultiCreateForm(Form): logger = logging.getLogger(__name__) tipo_autor = ModelChoiceField(label=_('Tipo Autor'), required=True, queryset=TipoAutor.objects.all(), empty_label=_('Selecione'),) data_relativa = forms.DateField( widget=forms.HiddenInput(), required=False) autor = ModelMultipleChoiceField( queryset=Autor.objects.all(), label=_('Possiveis Autores'), required=True, widget=CheckboxSelectMultiple) autores = ModelMultipleChoiceField( queryset=Autor.objects.all(), required=False, widget=HiddenInput) primeiro_autor = forms.ChoiceField( required=True, choices=YES_NO_CHOICES, label="Primeiro Autor?" ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if 'initial' in kwargs and 'autores' in kwargs['initial']: self.fields['primeiro_autor'].initial = kwargs['initial']['autores'].count( ) == 0 row1 = to_row([('tipo_autor', 10), ('primeiro_autor', 2)]) row2 = to_row([('autor', 12), ]) self.helper = SaplFormHelper() self.helper.layout = Layout( Fieldset( _('Autorias'), row1, row2, 'data_relativa', 'autores', form_actions(label='Incluir Autores Selecionados'))) self.fields['autor'].choices = [] def clean(self): cd = super().clean() if 'autores' in self.errors: del self.errors['autores'] if 'autor' not in cd or not cd['autor'].exists(): self.logger.error( "Ao menos um autor deve ser selecionado para inclusão") raise ValidationError( _('Ao menos um autor deve ser selecionado para inclusão')) return cd class AcessorioEmLoteFilterSet(django_filters.FilterSet): class Meta(FilterOverridesMetaMixin): model = MateriaLegislativa fields = ['tipo', 'data_apresentacao'] def __init__(self, *args, **kwargs): super(AcessorioEmLoteFilterSet, self).__init__(*args, **kwargs) self.filters['tipo'].label = 'Tipo de Matéria' self.filters['data_apresentacao'].label = 'Data (Inicial - Final)' self.form.fields['tipo'].required = True self.form.fields['data_apresentacao'].required = True row1 = to_row([('tipo', 12)]) row2 = to_row([('data_apresentacao', 12)]) self.form.helper = SaplFormHelper() self.form.helper.form_method = 'GET' self.form.helper.layout = Layout( Fieldset(_('Documentos Acessórios em Lote'), row1, row2, form_actions(label='Pesquisar'))) class AnexadaEmLoteFilterSet(django_filters.FilterSet): class Meta(FilterOverridesMetaMixin): model = MateriaLegislativa fields = ['tipo', 'data_apresentacao'] def __init__(self, *args, **kwargs): super(AnexadaEmLoteFilterSet, self).__init__(*args, **kwargs) self.filters['tipo'].label = 'Tipo de Matéria' self.filters['data_apresentacao'].label = 'Data (Inicial - Final)' row1 = to_row([('tipo', 12)]) row2 = to_row([('data_apresentacao', 12)]) self.form.helper = SaplFormHelper() self.form.helper.form_method = 'GET' self.form.helper.layout = Layout( Fieldset(_('Pesquisa de Matérias'), row1, row2, form_actions(label='Pesquisar'))) class PrimeiraTramitacaoEmLoteFilterSet(django_filters.FilterSet): class Meta(FilterOverridesMetaMixin): model = MateriaLegislativa fields = ['tipo', 'data_apresentacao'] def __init__(self, *args, **kwargs): super(PrimeiraTramitacaoEmLoteFilterSet, self).__init__( *args, **kwargs) self.filters['tipo'].label = 'Tipo de Matéria' self.filters['data_apresentacao'].label = 'Data (Inicial - Final)' self.form.fields['tipo'].required = True self.form.fields['data_apresentacao'].required = False row1 = to_row([('tipo', 12)]) row2 = to_row([('data_apresentacao', 12)]) self.form.helper = SaplFormHelper() self.form.helper.form_method = 'GET' self.form.helper.layout = Layout( Fieldset(_('Primeira Tramitação'), row1, row2, form_actions(label='Pesquisar'))) class TramitacaoEmLoteFilterSet(django_filters.FilterSet): class Meta(FilterOverridesMetaMixin): model = MateriaLegislativa fields = ['tipo', 'data_apresentacao', 'tramitacao__status', 'tramitacao__unidade_tramitacao_destino'] def __init__(self, *args, **kwargs): super(TramitacaoEmLoteFilterSet, self).__init__( *args, **kwargs) self.filters['tipo'].label = _('Tipo de Matéria') self.filters['data_apresentacao'].label = _('Data (Inicial - Final)') self.filters['tramitacao__unidade_tramitacao_destino' ].label = _('Unidade Destino (Último Destino)') self.filters['tramitacao__status'].label = _('Status') self.form.fields['tipo'].required = True self.form.fields['data_apresentacao'].required = False self.form.fields['tramitacao__status'].required = True self.form.fields[ 'tramitacao__unidade_tramitacao_destino'].required = True row1 = to_row([ ('tipo', 4), ('tramitacao__unidade_tramitacao_destino', 4), ('tramitacao__status', 4)]) row2 = to_row([('data_apresentacao', 12)]) self.form.helper = SaplFormHelper() self.form.helper.form_method = 'GET' self.form.helper.layout = Layout( Fieldset(_('Tramitação em Lote'), row1, row2, form_actions(label=_('Pesquisar')))) class TipoProposicaoForm(ModelForm): logger = logging.getLogger(__name__) content_type = forms.ModelChoiceField( queryset=ContentType.objects.all(), label=TipoProposicao._meta.get_field('content_type').verbose_name, required=True, help_text=TipoProposicao._meta.get_field('content_type').help_text) tipo_conteudo_related_radio = ChoiceWithoutValidationField( label="Seleção de Tipo", required=False, widget=forms.RadioSelect()) tipo_conteudo_related = forms.IntegerField( widget=forms.HiddenInput(), required=True) class Meta: model = TipoProposicao fields = ['descricao', 'content_type', 'tipo_conteudo_related_radio', 'tipo_conteudo_related', 'perfis', 'tipo_autores', 'exige_assinatura_digital' ] widgets = {'tipo_conteudo_related': forms.HiddenInput(), 'perfis': widgets.CheckboxSelectMultiple(), 'tipo_autores': widgets.CheckboxSelectMultiple()} def __init__(self, *args, **kwargs): tipo_select = Fieldset( TipoProposicao._meta.verbose_name, Row( to_column( ( Row( to_column(('descricao', 12)), to_column(('exige_assinatura_digital', 12)), to_column(('tipo_autores', 12)), to_column(('perfis', 12)), ), 5 ) ), to_column( ( Row( to_column(('content_type', 12)), to_column(('tipo_conteudo_related_radio', 12)), to_column(('tipo_conteudo_related', 12)), ), 7 ) ), ) ) self.helper = SaplFormHelper() self.helper.layout = SaplFormLayout(tipo_select) super(TipoProposicaoForm, self).__init__(*args, **kwargs) content_types = ContentType.objects.get_for_models( *models_with_gr_for_model(TipoProposicao)) self.fields['content_type'].choices = [ (ct.pk, ct) for k, ct in content_types.items()] # Ordena por id self.fields['content_type'].choices.sort(key=lambda x: x[0]) if self.instance.pk: self.fields[ 'tipo_conteudo_related'].initial = self.instance.object_id def clean(self): super(TipoProposicaoForm, self).clean() if not self.is_valid(): return self.cleaned_data cd = self.cleaned_data if not cd['tipo_autores'].exists(): raise ValidationError( _('O Tipo de Proposição deve ser associado ' 'a ao menos um Tipo de Autor.')) content_type = cd['content_type'] if 'tipo_conteudo_related' not in cd or not cd[ 'tipo_conteudo_related']: self.logger.error("Seleção de Tipo não definida.") raise ValidationError( _('Seleção de Tipo não definida.')) if not content_type.model_class().objects.filter( pk=cd['tipo_conteudo_related']).exists(): self.logger.error("O Registro definido (%s) não está na base de %s." % (cd['tipo_conteudo_related'], content_type)) raise ValidationError( _('O Registro definido (%s) não está na base de %s.' ) % (cd['tipo_conteudo_related'], content_type)) # """ # A unicidade de tipo proposição para tipo de conteudo # foi desabilitada pois existem casos em quem é o procedimento da # instituição convergir vários tipos de proposição # para um tipo de matéria. unique_value = self._meta.model.objects.filter( content_type=content_type, object_id=cd['tipo_conteudo_related']) if self.instance.pk: unique_value = unique_value.exclude(pk=self.instance.pk) unique_value = unique_value.first() if unique_value: raise ValidationError( _('Já existe um Tipo de Proposição (%s) ' 'que foi defindo como (%s) para (%s)' ) % (unique_value, content_type, unique_value.tipo_conteudo_related)) return self.cleaned_data @transaction.atomic def save(self, commit=False): tipo_proposicao = self.instance assert tipo_proposicao.content_type tipo_proposicao.tipo_conteudo_related = \ tipo_proposicao.content_type.model_class( ).objects.get(pk=self.cleaned_data['tipo_conteudo_related']) return super().save(True) class TramitacaoEmLoteForm(ModelForm): logger = logging.getLogger(__name__) class Meta: model = Tramitacao fields = ['data_tramitacao', 'unidade_tramitacao_local', 'status', 'urgente', 'turno', 'unidade_tramitacao_destino', 'data_encaminhamento', 'data_fim_prazo', 'texto', 'user', 'ip'] widgets = {'user': forms.HiddenInput(), 'ip': forms.HiddenInput()} def __init__(self, *args, **kwargs): super(TramitacaoEmLoteForm, self).__init__(*args, **kwargs) self.fields['data_tramitacao'].initial = timezone.now().date() ust = UnidadeTramitacao.objects.select_related().all() unidade_tramitacao_destino = [('', '---------')] + [(ut.pk, ut) for ut in ust if ut.comissao and ut.comissao.ativa] unidade_tramitacao_destino.extend( [(ut.pk, ut) for ut in ust if ut.orgao]) unidade_tramitacao_destino.extend( [(ut.pk, ut) for ut in ust if ut.parlamentar]) self.fields['unidade_tramitacao_destino'].choices = unidade_tramitacao_destino self.fields['urgente'].label = "Urgente? *" row1 = to_row([ ('data_tramitacao', 4), ('data_encaminhamento', 4), ('data_fim_prazo', 4) ]) row2 = to_row([ ('unidade_tramitacao_local', 6), ('unidade_tramitacao_destino', 6), ]) row3 = to_row([ ('status', 4), ('urgente', 4), ('turno', 4) ]) row4 = to_row([ ('texto', 12) ]) documentos_checkbox_HTML = ''' <br\><br\><br\> <fieldset> <legend style="font-size: 24px;">Selecione as matérias para tramitação:</legend> <table class="table table-striped table-hover"> <div class="controls"> <div class="checkbox"> <label for="id_check_all"> <input type="checkbox" id="id_check_all" onchange="checkAll(this)" /> Marcar/Desmarcar Todos </label> </div> </div> <thead> <tr><th>Matéria</th></tr> </thead> <tbody> {% for materia in object_list %} <tr> <td> <input type="checkbox" name="materias" value="{{materia.id}}" {% if check %} checked {% endif %}/> <a href="{% url 'sapl.materia:materialegislativa_detail' materia.id %}"> {{materia.tipo.sigla}} {{materia.tipo.descricao}} {{materia.numero}}/{{materia.ano}} </a> </td> </tr> {% endfor %} </tbody> </table> </fieldset> ''' self.helper = SaplFormHelper() self.helper.layout = Layout( Fieldset( 'Detalhes da tramitação:', row1, row2, row3, row4, HTML(documentos_checkbox_HTML), form_actions(label='Salvar') ) ) def clean(self): cleaned_data = super(TramitacaoEmLoteForm, self).clean() if not self.is_valid(): return self.cleaned_data if 'data_encaminhamento' in cleaned_data: data_enc_form = cleaned_data['data_encaminhamento'] if 'data_fim_prazo' in cleaned_data: data_prazo_form = cleaned_data['data_fim_prazo'] if 'data_tramitacao' in cleaned_data: data_tram_form = cleaned_data['data_tramitacao'] if not self.instance.data_tramitacao: if cleaned_data['data_tramitacao'] > timezone.now().date(): self.logger.error('A data de tramitação ({}) deve ser ' 'menor ou igual a data de hoje ({})!' .format(cleaned_data['data_tramitacao'], timezone.now().date())) msg = _( 'A data de tramitação deve ser ' + 'menor ou igual a data de hoje!') raise ValidationError(msg) if data_enc_form: if data_enc_form < data_tram_form: self.logger.error('A data de encaminhamento ({}) deve ser ' 'maior que a data de tramitação ({})!' .format(data_enc_form, data_tram_form)) msg = _('A data de encaminhamento deve ser ' + 'maior que a data de tramitação!') raise ValidationError(msg) if data_prazo_form: if data_prazo_form < data_tram_form: self.logger.error('A data fim de prazo ({}) deve ser ' 'maior que a data de tramitação ({})!' .format(data_prazo_form, data_tram_form)) msg = _('A data fim de prazo deve ser ' + 'maior que a data de tramitação!') raise ValidationError(msg) return cleaned_data @transaction.atomic def save(self, commit=True): cd = self.cleaned_data materias = self.initial['materias'] user = self.initial['user'] if 'user' in self.initial else None ip = self.initial['ip'] if 'ip' in self.initial else '' tramitar_anexadas = AppConfig.attr('tramitacao_materia') for mat_id in materias: mat = MateriaLegislativa.objects.get(id=mat_id) tramitacao = Tramitacao.objects.create( status=cd['status'], materia=mat, data_tramitacao=cd['data_tramitacao'], unidade_tramitacao_local=cd['unidade_tramitacao_local'], unidade_tramitacao_destino=cd['unidade_tramitacao_destino'], data_encaminhamento=cd['data_encaminhamento'], urgente=cd['urgente'], turno=cd['turno'], texto=cd['texto'], data_fim_prazo=cd['data_fim_prazo'], user=user, ip=ip ) mat.em_tramitacao = False if tramitacao.status.indicador == "F" else True mat.save() if tramitar_anexadas: lista_tramitacao = [] anexadas = lista_anexados(mat) for ml in anexadas: if not ml.tramitacao_set.all() \ or ml.tramitacao_set.first() \ .unidade_tramitacao_destino == tramitacao.unidade_tramitacao_local: ml.em_tramitacao = False if tramitacao.status.indicador == "F" else True ml.save() lista_tramitacao.append(Tramitacao( status=tramitacao.status, materia=ml, data_tramitacao=tramitacao.data_tramitacao, unidade_tramitacao_local=tramitacao.unidade_tramitacao_local, data_encaminhamento=tramitacao.data_encaminhamento, unidade_tramitacao_destino=tramitacao.unidade_tramitacao_destino, urgente=tramitacao.urgente, turno=tramitacao.turno, texto=tramitacao.texto, data_fim_prazo=tramitacao.data_fim_prazo, user=tramitacao.user, ip=tramitacao.ip )) Tramitacao.objects.bulk_create(lista_tramitacao) return tramitacao class ProposicaoForm(FileFieldCheckMixin, forms.ModelForm): logger = logging.getLogger(__name__) TIPO_TEXTO_CHOICE = [ ('D', _('Arquivo Digital')), ('T', _('Texto Articulado')) ] tipo_materia = forms.ModelChoiceField( label=TipoMateriaLegislativa._meta.verbose_name, required=False, queryset=TipoMateriaLegislativa.objects.all(), empty_label='Selecione') numero_materia = forms.CharField( label='Número', required=False) ano_materia = forms.CharField( label='Ano', required=False) vinculo_numero = forms.CharField( label='Número', required=False,) vinculo_ano = forms.CharField( label='Ano', required=False) tipo_texto = forms.ChoiceField( label=_('Tipo do Texto da Proposição'), required=False, choices=TIPO_TEXTO_CHOICE, widget=widgets.RadioSelect()) materia_de_vinculo = forms.ModelChoiceField( queryset=MateriaLegislativa.objects.all(), widget=widgets.HiddenInput(), required=False) proposicao_vinculada = forms.ModelChoiceField( queryset=Proposicao.objects.all(), widget=widgets.HiddenInput(), required=False) receber_recibo = forms.TypedChoiceField( choices=YES_NO_CHOICES, widget=widgets.HiddenInput(), required=False) numero_materia_futuro = forms.IntegerField( label='Número (Opcional)', required=False) especie = forms.ModelChoiceField( queryset=ContentType.objects.all(), label=_('Espécie da Proposição'), required=True) tipo = forms.ModelChoiceField( queryset=TipoProposicao.objects.all(), label=_('Tipo da Proposição'), required=True, help_text=_('<i class="text-red">NAD - Necessário Assinatura Digital</i>')) class Meta: model = Proposicao fields = ['tipo', 'receber_recibo', 'descricao', 'observacao', 'texto_original', 'materia_de_vinculo', 'proposicao_vinculada', 'tipo_materia', 'numero_materia', 'ano_materia', 'vinculo_numero', 'vinculo_ano', 'tipo_texto', 'hash_code', 'numero_materia_futuro', 'user', 'ip', 'ultima_edicao', 'especie'] widgets = { 'descricao': widgets.Textarea(attrs={'rows': 4}), 'hash_code': forms.HiddenInput(), 'user': forms.HiddenInput(), 'ip': forms.HiddenInput(), 'ultima_edicao': forms.HiddenInput() } def __init__(self, *args, **kwargs): self.texto_articulado_proposicao = AppConfig.attr( 'texto_articulado_proposicao') self.receber_recibo = AppConfig.attr( 'receber_recibo_proposicao') if not self.texto_articulado_proposicao: if 'tipo_texto' in self._meta.fields: self._meta.fields.remove('tipo_texto') else: if 'tipo_texto' not in self._meta.fields: self._meta.fields.append('tipo_texto') fields = [ to_row([ ('especie', 5), ('tipo', 7)] ), to_row([ ('descricao', 12), ('observacao', 12) ]) ] if AppConfig.objects.last().escolher_numero_materia_proposicao: fields.append(to_column(('numero_materia_futuro', 12)),) else: if 'numero_materia_futuro' in self._meta.fields: self._meta.fields.remove('numero_materia_futuro') if self.texto_articulado_proposicao: fields.append( to_column((InlineRadios('tipo_texto'), 5)),) fields.append(to_column(( 'texto_original', 7 if self.texto_articulado_proposicao else 12))) fields.append( Div( to_row([ ( Fieldset(_('Víncular a Proposição ainda não recebida')), 12), ( HTML( '<small class="form-text text-muted">Esta proposição é parte de outra de sua própria autoria? ' 'Exemplo: Você está está registrando um ' 'documento acessório de uma proposição que ' 'ainda não foi recebida pelo protocolo, ' 'informe aqui que proposição é essa! (Caso a proposição já tenha sido recebida pelo protocolo, o sistema fará vínculo automatícamente com a matéria, e não com a proposição.</small>' ), 12), ( Div( to_row( [ ('vinculo_numero', 6), ('vinculo_ano', 6), ( Alert( '', css_class="ementa_proposicao hidden alert-info", dismiss=False ), 12 ) ] ), ), 8 ), ]), css_id='vinculo_proposicao' ) ) fields.append( Div( to_row([ (Fieldset(_('Víncular a uma Matéria Legislativa')), 12), ( HTML( '<small class="form-text text-muted">Colabore com o protocolo informando que esta ' 'proposição se trata de uma matéria anexada a outra. ' 'Exemplo: Você está criando uma proposição que é uma emenda, ' 'então informe aqui de que projeto é essa emenda.</small>'), 12), ( Div( to_row( [ ('tipo_materia', 6), ('numero_materia', 3), ('ano_materia', 3), ( Alert( '', css_class="ementa_materia hidden alert-info", dismiss=False ), 12 ) ] ), ), 12 ), ]), css_id="vinculo_materia" ) ) self.helper = SaplFormHelper() self.helper.layout = SaplFormLayout(*fields) super(ProposicaoForm, self).__init__(*args, **kwargs) content_types = ContentType.objects.get_for_models( *models_with_gr_for_model(TipoProposicao)) self.fields['especie'].choices = [ (ct.pk, ct) for k, ct in content_types.items()] # Ordena por id self.fields['especie'].choices.sort(key=lambda x: x[0]) if self.instance.pk: self.fields['especie'].initial = self.instance.tipo.content_type_id self.fields['tipo_texto'].initial = '' if self.instance.texto_original: self.fields['tipo_texto'].initial = 'D' if self.texto_articulado_proposicao: if self.instance.texto_articulado.exists(): self.fields['tipo_texto'].initial = 'T' if self.instance.materia_de_vinculo: self.fields[ 'tipo_materia' ].initial = self.instance.materia_de_vinculo.tipo self.fields[ 'numero_materia' ].initial = self.instance.materia_de_vinculo.numero self.fields[ 'ano_materia' ].initial = self.instance.materia_de_vinculo.ano if self.instance.proposicao_vinculada: self.fields[ 'vinculo_numero' ].initial = self.instance.proposicao_vinculada.numero_proposicao self.fields[ 'vinculo_ano' ].initial = self.instance.proposicao_vinculada.data_envio.year \ if self.instance.proposicao_vinculada.data_envio else \ self.instance.proposicao_vinculada.ultima_edicao.year def clean_texto_original(self): texto_original = self.cleaned_data.get('texto_original', False) if texto_original and texto_original.size > MAX_DOC_UPLOAD_SIZE: raise ValidationError("O arquivo Texto Original deve ser menor que {0:.1f} mb, o tamanho atual desse arquivo é {1:.1f} mb" .format((MAX_DOC_UPLOAD_SIZE / 1024) / 1024, (texto_original.size / 1024) / 1024)) return texto_original def gerar_hash(self, inst, receber_recibo): inst.save() if receber_recibo == True: inst.hash_code = '' else: if inst.texto_original: inst.hash_code = gerar_hash_arquivo( inst.texto_original.path, str(inst.pk)) elif inst.texto_articulado.exists(): ta = inst.texto_articulado.first() inst.hash_code = 'P' + ta.hash() + SEPARADOR_HASH_PROPOSICAO + str(inst.pk) def clean(self): super(ProposicaoForm, self).clean() if not self.is_valid(): return self.cleaned_data cd = self.cleaned_data tm, am, nm = (cd.get('tipo_materia', ''), cd.get('ano_materia', ''), cd.get('numero_materia', '')) if cd['numero_materia_futuro'] and \ 'tipo' in cd and \ MateriaLegislativa.objects.filter(tipo=cd['tipo'].tipo_conteudo_related, ano=timezone.now().year, numero=cd['numero_materia_futuro']): raise ValidationError(_("A matéria {} {}/{} já existe.".format(cd['tipo'].tipo_conteudo_related.descricao, cd['numero_materia_futuro'], timezone.now().year))) if tm and am and nm: try: self.logger.debug("Tentando obter objeto MateriaLegislativa (tipo_id={}, ano={}, numero={})." .format(tm, am, nm)) materia_de_vinculo = MateriaLegislativa.objects.get( tipo_id=tm, ano=am, numero=nm ) except ObjectDoesNotExist: self.logger.error("Objeto MateriaLegislativa vinculada (tipo_id={}, ano={}, numero={}) não existe!" .format(tm, am, nm)) raise ValidationError(_('Matéria Vinculada não existe!')) else: self.logger.info("MateriaLegislativa vinculada (tipo_id={}, ano={}, numero={}) com sucesso." .format(tm, am, nm)) cd['materia_de_vinculo'] = materia_de_vinculo vn, va = (cd.get('vinculo_numero', ''), cd.get('vinculo_ano', '')) if vn and va: if cd['materia_de_vinculo']: raise ValidationError( _('Não é possível vincular a uma proposição e a uma matéria ao mesmo tempo!')) self.logger.debug("Tentando obter objeto Proposição (numero={}, ano={})." .format(vn, va)) q = Q( autor=self.initial['user'].autor_set.first(), numero_proposicao=vn, data_envio__year=va ) | Q( autor=self.initial['user'].autor_set.first(), numero_proposicao=vn, data_envio__isnull=True, ultima_edicao__year=va ) proposicao_vinculada = Proposicao.objects.filter(q).first() if not proposicao_vinculada: raise ValidationError(_('Proposição Vinculada não existe!')) else: self.logger.info("Proposição vinculada (ano={}, numero={}) com sucesso." .format(va, vn)) if not proposicao_vinculada.conteudo_gerado_related: cd['proposicao_vinculada'] = proposicao_vinculada else: cd['materia_de_vinculo'] = proposicao_vinculada.conteudo_gerado_related return cd def save(self, commit=True): cd = self.cleaned_data inst = self.instance receber_recibo = AppConfig.objects.last().receber_recibo_proposicao if inst.pk: if 'tipo_texto' in cd: if cd['tipo_texto'] == 'T' and inst.texto_original: inst.texto_original.delete() elif cd['tipo_texto'] != 'T': inst.texto_articulado.all().delete() if 'texto_original' in cd and\ not cd['texto_original'] and \ inst.texto_original: inst.texto_original.delete() self.gerar_hash(inst, receber_recibo) return super().save(commit) inst.ano = timezone.now().year sequencia_numeracao = AppConfig.attr('sequencia_numeracao_proposicao') if sequencia_numeracao == 'A': numero__max = Proposicao.objects.filter( autor=inst.autor, ano=timezone.now().year).aggregate(Max('numero_proposicao')) elif sequencia_numeracao == 'B': numero__max = Proposicao.objects.filter( ano=timezone.now().year).aggregate(Max('numero_proposicao')) numero__max = numero__max['numero_proposicao__max'] inst.numero_proposicao = ( numero__max + 1) if numero__max else 1 self.gerar_hash(inst, receber_recibo) inst.save() return inst class DevolverProposicaoForm(forms.ModelForm): justificativa_devolucao = forms.CharField( required=False, widget=widgets.Textarea(attrs={'rows': 5})) logger = logging.getLogger(__name__) class Meta: model = Proposicao fields = [ 'justificativa_devolucao', 'observacao', ] def __init__(self, *args, **kwargs): # esta chamada isola o __init__ de ProposicaoForm super(DevolverProposicaoForm, self).__init__(*args, **kwargs) fields = [] fields.append( Fieldset( _('Registro de Devolução'), to_column(('justificativa_devolucao', 12)), to_column(('observacao', 12)), to_column( (form_actions(label=_('Devolver'), name='devolver', css_class='btn-danger float-right'), 12) ) ) ) self.helper = SaplFormHelper() self.helper.layout = Layout(*fields) def clean(self): super(DevolverProposicaoForm, self).clean() if not self.is_valid(): return self.cleaned_data cd = self.cleaned_data if 'justificativa_devolucao' not in cd or\ not cd['justificativa_devolucao']: # TODO Implementar notificação ao autor por email self.logger.error("Adicione uma Justificativa para devolução.") raise ValidationError( _('Adicione uma Justificativa para devolução.')) return cd @transaction.atomic def save(self, commit=False): cd = self.cleaned_data self.instance.data_devolucao = timezone.now() self.instance.data_recebimento = None self.instance.data_envio = None self.instance.save() if self.instance.texto_articulado.exists(): ta = self.instance.texto_articulado.first() ta.privacidade = STATUS_TA_PRIVATE ta.editing_locked = False ta.save() self.instance.results = { 'messages': { 'success': [_('Devolução efetuada com sucesso.'), ] }, 'url': reverse('sapl.materia:receber-proposicao') } return self.instance class ConfirmarProposicaoForm(ProposicaoForm): tipo_readonly = forms.CharField( label=TipoProposicao._meta.verbose_name, required=False, widget=widgets.TextInput( attrs={'readonly': 'readonly'})) autor_readonly = forms.CharField( label=Autor._meta.verbose_name, required=False, widget=widgets.TextInput( attrs={'readonly': 'readonly'})) regime_tramitacao = forms.ModelChoiceField(label="Regime de tramitação", required=False, queryset=RegimeTramitacao.objects.all()) gerar_protocolo = forms.ChoiceField( required=False, label=_( 'Gerar Protocolo na incorporação?'), choices=YES_NO_CHOICES, widget=widgets.RadioSelect()) numero_de_paginas = forms.IntegerField(required=False, min_value=0, label=_('Número de Páginas'),) class Meta: model = Proposicao fields = [ 'data_envio', 'descricao', 'observacao', 'gerar_protocolo', 'numero_de_paginas', 'numero_materia_futuro' ] widgets = { 'descricao': widgets.Textarea( attrs={'readonly': 'readonly', 'rows': 4}), 'data_envio': widgets.DateTimeInput( attrs={'readonly': 'readonly'}), } def __init__(self, *args, **kwargs): self.proposicao_incorporacao_obrigatoria = \ AppConfig.attr('proposicao_incorporacao_obrigatoria') if self.proposicao_incorporacao_obrigatoria != 'C': if 'gerar_protocolo' in self._meta.fields: self._meta.fields.remove('gerar_protocolo') else: if 'gerar_protocolo' not in self._meta.fields: self._meta.fields.append('gerar_protocolo') if self.proposicao_incorporacao_obrigatoria == 'N': if 'numero_de_paginas' in self._meta.fields: self._meta.fields.remove('numero_de_paginas') else: if 'numero_de_paginas' not in self._meta.fields: self._meta.fields.append('numero_de_paginas') self.instance = kwargs.get('instance', None) if not self.instance: self.logger.error("Erro na Busca por proposição a incorporar") raise ValueError(_('Erro na Busca por proposição a incorporar')) if self.instance.tipo.content_type.model_class() == TipoDocumento: if 'numero_de_paginas' in self._meta.fields: self._meta.fields.remove('numero_de_paginas') if 'gerar_protocolo' in self._meta.fields: self._meta.fields.remove('gerar_protocolo') if 'regime_tramitacao' in self._meta.fields: self._meta.fields.remove('regime_tramitacao') super(ProposicaoForm, self).__init__(*args, **kwargs) if self.instance.tipo.content_type.model_class() == \ TipoMateriaLegislativa: self.fields['regime_tramitacao'].required = True self.fields['especie'].required = False self.fields['tipo'].required = False fields = [ Fieldset( _('Dados Básicos'), to_row( [ ('tipo_readonly', 5), ('data_envio', 3), ('autor_readonly', 4), ('numero_materia_futuro', 3), ('descricao', 12), ('observacao', 12) ] ) ) ] if not AppConfig.objects.last().escolher_numero_materia_proposicao or \ not self.instance.numero_materia_futuro: if 'numero_materia_futuro' in self._meta.fields: del fields[0][0][3] fields.append( Div( to_row([ (Fieldset(_('Víncular a uma Matéria Legislativa')), 12), ( HTML( '''<small class="form-text text-muted"> O Autor da proposição vinculou esta proposição a uma Matéria Legislativa. Verifique se está correto para prosseguir com a Incorporação.</small> ''' if self.instance.materia_de_vinculo else ''' <small class="form-text text-muted"> Você pode fazer a anexação diretamente aqui na incorporaçao, basta informar a qual matéria legislativa deseja incorporar.</small> ''' ), 12), ( Div( to_row( [ ('tipo_materia', 6), ('numero_materia', 3), ('ano_materia', 3), ( Alert( '', css_class="ementa_materia hidden alert-info", dismiss=False ), 12 ) ] ), ), 12 ), ]), css_id="vinculo_materia" ) ) itens_incorporacao = [] if self.instance.tipo.content_type.model_class() == \ TipoMateriaLegislativa: itens_incorporacao = [to_column(('regime_tramitacao', 4))] if self.proposicao_incorporacao_obrigatoria == 'C': itens_incorporacao.append(to_column((InlineRadios( 'gerar_protocolo'), 4))) if self.proposicao_incorporacao_obrigatoria != 'N': itens_incorporacao.append(to_column(('numero_de_paginas', 4))) itens_incorporacao.append( to_column( (form_actions(label=_('Incorporar'), name='incorporar'), 12) ) ) fields.append( Fieldset(_('Registro de Incorporação'), Row(*itens_incorporacao))) self.helper = SaplFormHelper() self.helper.layout = Layout(*fields) self.fields['tipo_readonly'].initial = self.instance.tipo.descricao self.fields['autor_readonly'].initial = str(self.instance.autor) if self.instance.numero_materia_futuro: self.fields['numero_materia_futuro'].initial = self.instance.numero_materia_futuro if self.instance.materia_de_vinculo: self.fields[ 'tipo_materia' ].initial = self.instance.materia_de_vinculo.tipo self.fields[ 'numero_materia' ].initial = self.instance.materia_de_vinculo.numero self.fields[ 'ano_materia' ].initial = self.instance.materia_de_vinculo.ano if self.instance.proposicao_vinculada: self.fields[ 'vinculo_numero' ].initial = self.instance.proposicao_vinculada.numero_proposicao self.fields[ 'vinculo_ano' ].initial = self.instance.proposicao_vinculada.ano if self.proposicao_incorporacao_obrigatoria == 'C': self.fields['gerar_protocolo'].initial = True def clean(self): super(ConfirmarProposicaoForm, self).clean() if not self.is_valid(): return self.cleaned_data numeracao = AppConfig.attr('sequencia_numeracao_proposicao') if not numeracao: self.logger.error("A sequência de numeração (por ano ou geral)" " não foi configurada para a aplicação em " "tabelas auxiliares") raise ValidationError("A sequência de numeração (por ano ou geral)" " não foi configurada para a aplicação em " "tabelas auxiliares") cd = ProposicaoForm.clean(self) if self.instance.tipo.content_type.model_class() == \ TipoMateriaLegislativa: if 'regime_tramitacao' not in cd or\ not cd['regime_tramitacao']: self.logger.error("Regime de Tramitação deve ser informado.") raise ValidationError( _('Regime de Tramitação deve ser informado.')) elif self.instance.tipo.content_type.model_class( ) == TipoDocumento and not cd['materia_de_vinculo']: self.logger.error("Documentos não podem ser incorporados sem definir " "para qual Matéria Legislativa ele se destina.") raise ValidationError( _('Documentos não podem ser incorporados sem definir ' 'para qual Matéria Legislativa ele se destina.')) return cd @transaction.atomic def save(self, commit=False): # TODO Implementar workflow entre protocolo e autores cd = self.cleaned_data self.instance.justificativa_devolucao = '' self.instance.data_devolucao = None self.instance.data_recebimento = timezone.now() self.instance.materia_de_vinculo = cd['materia_de_vinculo'] if self.instance.texto_articulado.exists(): ta = self.instance.texto_articulado.first() ta.privacidade = STATUS_TA_IMMUTABLE_PUBLIC ta.editing_locked = True ta.save() self.instance.save() """ TipoProposicao possui conteúdo genérico para a modelegam de tipos relacionados e, a esta modelagem, qual o objeto que está associado. Porem, cada registro a ser gerado pode possuir uma estrutura diferente, é os casos básicos já implementados, TipoDocumento e TipoMateriaLegislativa, que são modelos utilizados em DocumentoAcessorio e MateriaLegislativa geradas, por sua vez a partir de uma Proposição. Portanto para estas duas e para outras implementações que possam surgir possuindo com matéria prima uma Proposição, dada sua estrutura, deverá contar também com uma implementação particular aqui no código abaixo. """ self.instance.results = { 'messages': { 'success': [_('Proposição incorporada com sucesso'), ] }, 'url': reverse('sapl.materia:receber-proposicao') } proposicao = self.instance conteudo_gerado = None if self.instance.tipo.content_type.model_class( ) == TipoMateriaLegislativa: numeracao = None try: self.logger.debug( "Tentando obter modelo de sequência de numeração.") numeracao = AppConfig.objects.last( ).sequencia_numeracao_protocolo except AttributeError as e: self.logger.error("Erro ao obter modelo. " + str(e)) pass tipo = self.instance.tipo.tipo_conteudo_related if tipo.sequencia_numeracao: numeracao = tipo.sequencia_numeracao ano = timezone.now().year if numeracao == 'A': numero = MateriaLegislativa.objects.filter( ano=ano, tipo=tipo).aggregate(Max('numero')) elif numeracao == 'L': legislatura = Legislatura.objects.filter( data_inicio__year__lte=ano, data_fim__year__gte=ano).first() data_inicio = legislatura.data_inicio data_fim = legislatura.data_fim numero = MateriaLegislativa.objects.filter( data_apresentacao__gte=data_inicio, data_apresentacao__lte=data_fim, tipo=tipo).aggregate( Max('numero')) elif numeracao == 'U': numero = MateriaLegislativa.objects.filter( tipo=tipo).aggregate(Max('numero')) if numeracao is None: numero['numero__max'] = 0 if cd['numero_materia_futuro'] and not MateriaLegislativa.objects.filter(tipo=tipo, ano=ano, numero=cd['numero_materia_futuro']): max_numero = cd['numero_materia_futuro'] else: max_numero = numero['numero__max'] + \ 1 if numero['numero__max'] else 1 # dados básicos materia = MateriaLegislativa() materia.numero = max_numero materia.tipo = tipo materia.ementa = proposicao.descricao materia.ano = ano materia.data_apresentacao = timezone.now() materia.em_tramitacao = True materia.regime_tramitacao = cd['regime_tramitacao'] if proposicao.texto_original: materia.texto_original = File( proposicao.texto_original, os.path.basename(proposicao.texto_original.path)) materia.save() materia.save() conteudo_gerado = materia if proposicao.texto_articulado.exists(): ta = proposicao.texto_articulado.first() ta_materia = ta.clone_for(materia) ta_materia.editing_locked = True ta_materia.privacidade = STATUS_TA_IMMUTABLE_PUBLIC ta_materia.save() self.instance.results['messages']['success'].append(_( 'Matéria Legislativa registrada com sucesso (%s)' ) % str(materia)) # autoria autoria = Autoria() autoria.autor = proposicao.autor autoria.materia = materia autoria.primeiro_autor = True autoria.save() try: if isinstance(autoria.autor.autor_related, Parlamentar): signs = list( map(lambda s: s[0], self.instance.metadata['signs']['texto_original']['signs'] ) ) parlamentares = Parlamentar.objects.filter( nome_completo__in=signs ).exclude( pk=autoria.autor.autor_related.id ) for p in parlamentares: autoria = Autoria() autoria.autor = p.autor.first() autoria.materia = materia autoria.primeiro_autor = True autoria.save() except Exception as e: self.logger.debug( f"Erro no Registro de multiplas autorias. Proposicao id={proposicao.id}") autores = materia.autores.all() self.instance.results['messages']['success'].append(_( 'Autoria registrada para (%s)' ) % ', '.join(map(lambda a: a.nome, autores))) # Matéria de vinlculo if proposicao.materia_de_vinculo: anexada = Anexada() anexada.materia_principal = proposicao.materia_de_vinculo anexada.materia_anexada = materia anexada.data_anexacao = timezone.now() anexada.save() self.instance.results['messages']['success'].append(_( 'Matéria anexada a (%s)' ) % str(anexada.materia_principal)) self.instance.results['url'] = reverse( 'sapl.materia:materialegislativa_detail', kwargs={'pk': materia.pk}) if proposicao.proposicao_vinculada_set.exists(): vinculos = proposicao.proposicao_vinculada_set.all() for v in vinculos: v.materia_de_vinculo = materia v.proposicao_vinculada = None v.save() pass elif self.instance.tipo.content_type.model_class() == TipoDocumento: # dados básicos doc = DocumentoAcessorio() doc.materia = proposicao.materia_de_vinculo doc.autor = str(proposicao.autor) doc.tipo = proposicao.tipo.tipo_conteudo_related doc.ementa = proposicao.descricao """ FIXME verificar questão de nome e data de documento, doc acessório. Possivelmente pode possuir data anterior a data de envio e/ou recebimento dada a incorporação. """ doc.nome = str(proposicao.tipo.tipo_conteudo_related)[:30] doc.data = proposicao.data_envio doc.arquivo = proposicao.texto_original = File( proposicao.texto_original, os.path.basename(proposicao.texto_original.path)) doc.save() doc.save() conteudo_gerado = doc self.instance.results['messages']['success'].append(_( 'Documento Acessório registrado com sucesso e anexado (%s)' ) % str(doc.materia)) self.instance.results['url'] = reverse( 'sapl.materia:documentoacessorio_detail', kwargs={'pk': doc.pk}) if proposicao.proposicao_vinculada_set.exists(): vinculos = proposicao.proposicao_vinculada_set.all() for v in vinculos: v.materia_de_vinculo = doc.materia v.proposicao_vinculada = None v.save() proposicao.conteudo_gerado_related = conteudo_gerado proposicao.save() proposicao.save() # if self.instance.tipo.content_type.model_class() == TipoDocumento: # return self.instance # Nunca gerar protocolo if self.proposicao_incorporacao_obrigatoria == 'N': return self.instance # ocorre se proposicao_incorporacao_obrigatoria == 'C' (condicional) # and gerar_protocolo == False if 'gerar_protocolo' not in cd or cd['gerar_protocolo'] == 'False': return self.instance # resta a opção proposicao_incorporacao_obrigatoria == 'C' # and gerar_protocolo == True # ou, proposicao_incorporacao_obrigatoria == 'O' # que são idênticas. """ apesar de TipoProposicao estar com conteudo e tipo conteudo genérico, aqui na incorporação de proposições, para gerar protocolo, cada caso possível de conteudo em tipo de proposição deverá ser tratado isoladamente justamente por Protocolo não estar generalizado com GenericForeignKey """ numeracao = AppConfig.attr('sequencia_numeracao_protocolo') if numeracao == 'A': nm = Protocolo.objects.filter( ano=timezone.now().year).aggregate(Max('numero')) elif numeracao == 'L': legislatura = Legislatura.objects.filter( data_inicio__year__lte=timezone.now().year, data_fim__year__gte=timezone.now().year).first() data_inicio = legislatura.data_inicio data_fim = legislatura.data_fim nm = MateriaLegislativa.objects.filter( data_apresentacao__gte=data_inicio, data_apresentacao__lte=data_fim, tipo=tipo).aggregate(Max('numero')) else: # numeracao == 'U' ou não informada nm = Protocolo.objects.all().aggregate(Max('numero')) protocolo = Protocolo() protocolo.numero = (nm['numero__max'] + 1) if nm['numero__max'] else 1 protocolo.ano = timezone.now().year protocolo.tipo_protocolo = '1' protocolo.interessado = str(proposicao.autor)[ :200] # tamanho máximo 200 protocolo.autor = proposicao.autor protocolo.assunto_ementa = proposicao.descricao protocolo.numero_paginas = cd['numero_de_paginas'] protocolo.anulado = False protocolo.tipo_conteudo_protocolado = proposicao.tipo.tipo_conteudo_related protocolo.conteudo_protocolado = conteudo_gerado protocolo.tipo_processo = '0' if self.instance.tipo.content_type.model_class( ) in (TipoMateriaLegislativa, TipoDocumento): # protocolo.tipo_materia = proposicao.tipo.tipo_conteudo_related protocolo.tipo_processo = '1' protocolo.save() self.instance.results['messages']['success'].append(_( 'Protocolo realizado com sucesso')) self.instance.results['url'] = reverse( 'sapl.protocoloadm:protocolo_mostrar', kwargs={'pk': protocolo.pk}) conteudo_gerado.numero_protocolo = protocolo.numero conteudo_gerado.save() return self.instance class MateriaAssuntoForm(ModelForm): class Meta: model = MateriaAssunto fields = ['materia', 'assunto'] widgets = {'materia': forms.HiddenInput()} class EtiquetaPesquisaForm(forms.Form): logger = logging.getLogger(__name__) tipo_materia = forms.ModelChoiceField( label=TipoMateriaLegislativa._meta.verbose_name, queryset=TipoMateriaLegislativa.objects.all(), required=False, empty_label='Selecione') data_inicial = forms.DateField( label='Data Inicial', required=False, widget=forms.DateInput(format='%d/%m/%Y') ) data_final = forms.DateField( label='Data Final', required=False, widget=forms.DateInput(format='%d/%m/%Y') ) processo_inicial = forms.IntegerField( label='Processo Inicial', required=False) processo_final = forms.IntegerField( label='Processo Final', required=False) def __init__(self, *args, **kwargs): super(EtiquetaPesquisaForm, self).__init__(*args, **kwargs) row1 = to_row( [('tipo_materia', 6), ('data_inicial', 3), ('data_final', 3)]) row2 = to_row( [('processo_inicial', 6), ('processo_final', 6)]) self.helper = SaplFormHelper() self.helper.layout = Layout( Fieldset( ('Formulário de Etiqueta'), row1, row2, form_actions(label='Pesquisar') ) ) def clean(self): super(EtiquetaPesquisaForm, self).clean() if not self.is_valid(): return self.cleaned_data cleaned_data = self.cleaned_data # Verifica se algum campo de data foi preenchido if cleaned_data['data_inicial'] or cleaned_data['data_final']: # Então verifica se o usuário preencheu o Incial e mas não # preencheu o Final, ou vice-versa if (not cleaned_data['data_inicial'] or not cleaned_data['data_final']): self.logger.error("Caso pesquise por data, os campos de Data Incial e " "Data Final devem ser preenchidos obrigatoriamente") raise ValidationError(_( 'Caso pesquise por data, os campos de Data Incial e ' + 'Data Final devem ser preenchidos obrigatoriamente')) # Caso tenha preenchido, verifica se a data final é maior que # a inicial elif cleaned_data['data_final'] < cleaned_data['data_inicial']: self.logger.error("A Data Final ({}) não pode ser menor que a Data Inicial({})." .format(cleaned_data['data_final'], cleaned_data['data_inicial'])) raise ValidationError(_( 'A Data Final não pode ser menor que a Data Inicial')) # O mesmo processo anterior é feito com o processo if (cleaned_data['processo_inicial'] or cleaned_data['processo_final']): if (not cleaned_data['processo_inicial'] or not cleaned_data['processo_final']): self.logger.error("Caso pesquise por número de processo, os campos de " "Processo Inicial e Processo Final " "devem ser preenchidos obrigatoriamente") raise ValidationError(_( 'Caso pesquise por número de processo, os campos de ' + 'Processo Inicial e Processo Final ' + 'devem ser preenchidos obrigatoriamente')) elif (cleaned_data['processo_final'] < cleaned_data['processo_inicial']): self.logger.error("O processo final ({}) não pode ser menor que o inicial ({})." .format(cleaned_data['processo_final'], cleaned_data['processo_inicial'])) raise ValidationError(_( 'O processo final não pode ser menor que o inicial')) return cleaned_data class FichaPesquisaForm(forms.Form): logger = logging.getLogger(__name__) tipo_materia = forms.ModelChoiceField( label=TipoMateriaLegislativa._meta.verbose_name, queryset=TipoMateriaLegislativa.objects.all(), empty_label='Selecione') data_inicial = forms.DateField( label='Data Inicial', widget=forms.DateInput(format='%d/%m/%Y') ) data_final = forms.DateField( label='Data Final', widget=forms.DateInput(format='%d/%m/%Y') ) def __init__(self, *args, **kwargs): super(FichaPesquisaForm, self).__init__(*args, **kwargs) row1 = to_row( [('tipo_materia', 6), ('data_inicial', 3), ('data_final', 3)]) self.helper = SaplFormHelper() self.helper.layout = Layout( Fieldset( ('Formulário de Ficha'), row1, form_actions(label='Pesquisar') ) ) def clean(self): super(FichaPesquisaForm, self).clean() if not self.is_valid(): return self.cleaned_data cleaned_data = self.cleaned_data if not self.is_valid(): return cleaned_data if cleaned_data['data_final'] < cleaned_data['data_inicial']: self.logger.error("A Data Final ({}) não pode ser menor que a Data Inicial ({})." .format(cleaned_data['data_final'], cleaned_data['data_inicial'])) raise ValidationError(_( 'A Data Final não pode ser menor que a Data Inicial')) return cleaned_data class FichaSelecionaForm(forms.Form): materia = forms.ModelChoiceField( widget=forms.RadioSelect, queryset=MateriaLegislativa.objects.all(), label='') def __init__(self, *args, **kwargs): super(FichaSelecionaForm, self).__init__(*args, **kwargs) row1 = to_row( [('materia', 12)]) self.helper = SaplFormHelper() self.helper.layout = Layout( Fieldset( ('Selecione a ficha que deseja imprimir'), row1, form_actions(label='Gerar Impresso') ) ) class ExcluirTramitacaoEmLote(forms.Form): logger = logging.getLogger(__name__) data_tramitacao = forms.DateField(required=True, label=_('Data da Tramitação')) unidade_tramitacao_local = forms.ModelChoiceField(label=_('Unidade Local'), required=True, queryset=UnidadeTramitacao.objects.all(), empty_label='------') unidade_tramitacao_destino = forms.ModelChoiceField(label=_('Unidade Destino'), required=True, queryset=UnidadeTramitacao.objects.all(), empty_label='------') status = forms.ModelChoiceField(label=_('Status'), required=True, queryset=StatusTramitacao.objects.all(), empty_label='------') def clean(self): super(ExcluirTramitacaoEmLote, self).clean() cleaned_data = self.cleaned_data if not self.is_valid(): return cleaned_data data_tramitacao = cleaned_data['data_tramitacao'] unidade_tramitacao_local = cleaned_data['unidade_tramitacao_local'] unidade_tramitacao_destino = cleaned_data['unidade_tramitacao_destino'] status = cleaned_data['status'] tramitacao_set = Tramitacao.objects.filter(data_tramitacao=data_tramitacao, unidade_tramitacao_local=unidade_tramitacao_local, unidade_tramitacao_destino=unidade_tramitacao_destino, status=status) if not tramitacao_set.exists(): self.logger.error("Não existem tramitações com os dados informados " " (data_tramitacao={}, unidade_tramitacao_local={})." "unidade_tramitacao_destino={}, status={})." .format(data_tramitacao, unidade_tramitacao_local, unidade_tramitacao_destino, status)) raise forms.ValidationError( _("Não existem tramitações com os dados informados.")) return cleaned_data def __init__(self, *args, **kwargs): super(ExcluirTramitacaoEmLote, self).__init__(*args, **kwargs) row1 = to_row( [('data_tramitacao', 6), ('status', 6), ]) row2 = to_row( [('unidade_tramitacao_local', 6), ('unidade_tramitacao_destino', 6)]) self.helper = SaplFormHelper() self.helper.layout = Layout( Fieldset(_('Dados das Tramitações'), row1, row2, HTML("&nbsp;"), form_actions(label='Excluir') ) ) class MateriaPesquisaSimplesForm(forms.Form): tipo_materia = forms.ModelChoiceField( label=TipoMateriaLegislativa._meta.verbose_name, queryset=TipoMateriaLegislativa.objects.all(), required=False, empty_label='Selecione') data_inicial = forms.DateField( label='Data Inicial', required=False, widget=forms.DateInput(format='%d/%m/%Y') ) data_final = forms.DateField( label='Data Final', required=False, widget=forms.DateInput(format='%d/%m/%Y') ) titulo = forms.CharField( label='Título do Relatório', required=False, max_length=150) logger = logging.getLogger(__name__) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) row1 = to_row( [('tipo_materia', 6), ('data_inicial', 3), ('data_final', 3)]) row2 = to_row( [('titulo', 12)]) self.helper = SaplFormHelper() self.helper.layout = Layout( Fieldset( 'Índice de Materias', row1, row2, form_actions(label='Pesquisar') ) ) def clean(self): super().clean() if not self.is_valid(): return self.cleaned_data cleaned_data = self.cleaned_data data_inicial = cleaned_data['data_inicial'] data_final = cleaned_data['data_final'] if data_inicial or data_final: if not (data_inicial and data_final): self.logger.error("Caso pesquise por data, os campos de Data Inicial e " "Data Final devem ser preenchidos obrigatoriamente") raise ValidationError(_('Caso pesquise por data, os campos de Data Inicial e ' 'Data Final devem ser preenchidos obrigatoriamente')) elif data_inicial > data_final: self.logger.error("Data Final ({}) menor que a Data Inicial ({}).".format( data_final, data_inicial)) raise ValidationError( _('A Data Final não pode ser menor que a Data Inicial')) return cleaned_data
codeparrot/github-code-clean
""" Integral Transforms """ from __future__ import print_function, division from sympy.core import S from sympy.core.compatibility import reduce from sympy.core.function import Function from sympy.core.numbers import oo from sympy.core.symbol import Dummy from sympy.integrals import integrate, Integral from sympy.integrals.meijerint import _dummy from sympy.logic.boolalg import to_cnf, conjuncts, disjuncts, Or, And from sympy.simplify import simplify from sympy.utilities import default_sort_key ########################################################################## # Helpers / Utilities ########################################################################## class IntegralTransformError(NotImplementedError): """ Exception raised in relation to problems computing transforms. This class is mostly used internally; if integrals cannot be computed objects representing unevaluated transforms are usually returned. The hint ``needeval=True`` can be used to disable returning transform objects, and instead raise this exception if an integral cannot be computed. """ def __init__(self, transform, function, msg): super(IntegralTransformError, self).__init__( "%s Transform could not be computed: %s." % (transform, msg)) self.function = function class IntegralTransform(Function): """ Base class for integral transforms. This class represents unevaluated transforms. To implement a concrete transform, derive from this class and implement the _compute_transform(f, x, s, **hints) and _as_integral(f, x, s) functions. If the transform cannot be computed, raise IntegralTransformError. Also set cls._name. Implement self._collapse_extra if your function returns more than just a number and possibly a convergence condition. """ @property def function(self): """ The function to be transformed. """ return self.args[0] @property def function_variable(self): """ The dependent variable of the function to be transformed. """ return self.args[1] @property def transform_variable(self): """ The independent transform variable. """ return self.args[2] @property def free_symbols(self): """ This method returns the symbols that will exist when the transform is evaluated. """ return self.function.free_symbols.union(set([self.transform_variable])) \ - set([self.function_variable]) def _compute_transform(self, f, x, s, **hints): raise NotImplementedError def _as_integral(self, f, x, s): raise NotImplementedError def _collapse_extra(self, extra): from sympy import And cond = And(*extra) if cond == False: raise IntegralTransformError(self.__class__.name, None, '') def doit(self, **hints): """ Try to evaluate the transform in closed form. This general function handles linearity, but apart from that leaves pretty much everything to _compute_transform. Standard hints are the following: - ``simplify``: whether or not to simplify the result - ``noconds``: if True, don't return convergence conditions - ``needeval``: if True, raise IntegralTransformError instead of returning IntegralTransform objects The default values of these hints depend on the concrete transform, usually the default is ``(simplify, noconds, needeval) = (True, False, False)``. """ from sympy import Add, expand_mul, Mul from sympy.core.function import AppliedUndef needeval = hints.pop('needeval', False) try_directly = not any(func.has(self.function_variable) for func in self.function.atoms(AppliedUndef)) if try_directly: try: return self._compute_transform(self.function, self.function_variable, self.transform_variable, **hints) except IntegralTransformError: pass fn = self.function if not fn.is_Add: fn = expand_mul(fn) if fn.is_Add: hints['needeval'] = needeval res = [self.__class__(*([x] + list(self.args[1:]))).doit(**hints) for x in fn.args] extra = [] ress = [] for x in res: if not isinstance(x, tuple): x = [x] ress.append(x[0]) if len(x) > 1: extra += [x[1:]] res = Add(*ress) if not extra: return res try: extra = self._collapse_extra(extra) return tuple([res]) + tuple(extra) except IntegralTransformError: pass if needeval: raise IntegralTransformError( self.__class__._name, self.function, 'needeval') # TODO handle derivatives etc # pull out constant coefficients coeff, rest = fn.as_coeff_mul(self.function_variable) return coeff*self.__class__(*([Mul(*rest)] + list(self.args[1:]))) @property def as_integral(self): return self._as_integral(self.function, self.function_variable, self.transform_variable) def _eval_rewrite_as_Integral(self, *args): return self.as_integral from sympy.solvers.inequalities import _solve_inequality def _simplify(expr, doit): from sympy import powdenest, piecewise_fold if doit: return simplify(powdenest(piecewise_fold(expr), polar=True)) return expr def _noconds_(default): """ This is a decorator generator for dropping convergence conditions. Suppose you define a function ``transform(*args)`` which returns a tuple of the form ``(result, cond1, cond2, ...)``. Decorating it ``@_noconds_(default)`` will add a new keyword argument ``noconds`` to it. If ``noconds=True``, the return value will be altered to be only ``result``, whereas if ``noconds=False`` the return value will not be altered. The default value of the ``noconds`` keyword will be ``default`` (i.e. the argument of this function). """ def make_wrapper(func): from sympy.core.decorators import wraps @wraps(func) def wrapper(*args, **kwargs): noconds = kwargs.pop('noconds', default) res = func(*args, **kwargs) if noconds: return res[0] return res return wrapper return make_wrapper _noconds = _noconds_(False) ########################################################################## # Mellin Transform ########################################################################## def _default_integrator(f, x): return integrate(f, (x, 0, oo)) @_noconds def _mellin_transform(f, x, s_, integrator=_default_integrator, simplify=True): """ Backend function to compute Mellin transforms. """ from sympy import re, Max, Min, count_ops # We use a fresh dummy, because assumptions on s might drop conditions on # convergence of the integral. s = _dummy('s', 'mellin-transform', f) F = integrator(x**(s - 1) * f, x) if not F.has(Integral): return _simplify(F.subs(s, s_), simplify), (-oo, oo), True if not F.is_Piecewise: raise IntegralTransformError('Mellin', f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError( 'Mellin', f, 'integral in unexpected form') def process_conds(cond): """ Turn ``cond`` into a strip (a, b), and auxiliary conditions. """ a = -oo b = oo aux = True conds = conjuncts(to_cnf(cond)) t = Dummy('t', real=True) for c in conds: a_ = oo b_ = -oo aux_ = [] for d in disjuncts(c): d_ = d.replace( re, lambda x: x.as_real_imag()[0]).subs(re(s), t) if not d.is_Relational or \ d.rel_op not in ('>', '>=', '<', '<=') \ or d_.has(s) or not d_.has(t): aux_ += [d] continue soln = _solve_inequality(d_, t) if not soln.is_Relational or \ soln.rel_op not in ('>', '>=', '<', '<='): aux_ += [d] continue if soln.lts == t: b_ = Max(soln.gts, b_) else: a_ = Min(soln.lts, a_) if a_ != oo and a_ != b: a = Max(a_, a) elif b_ != -oo and b_ != a: b = Min(b_, b) else: aux = And(aux, Or(*aux_)) return a, b, aux conds = [process_conds(c) for c in disjuncts(cond)] conds = [x for x in conds if x[2] != False] conds.sort(key=lambda x: (x[0] - x[1], count_ops(x[2]))) if not conds: raise IntegralTransformError('Mellin', f, 'no convergence found') a, b, aux = conds[0] return _simplify(F.subs(s, s_), simplify), (a, b), aux class MellinTransform(IntegralTransform): """ Class representing unevaluated Mellin transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Mellin transforms, see the :func:`mellin_transform` docstring. """ _name = 'Mellin' def _compute_transform(self, f, x, s, **hints): return _mellin_transform(f, x, s, **hints) def _as_integral(self, f, x, s): from sympy import Integral return Integral(f*x**(s - 1), (x, 0, oo)) def _collapse_extra(self, extra): from sympy import And, Max, Min a = [] b = [] cond = [] for (sa, sb), c in extra: a += [sa] b += [sb] cond += [c] res = (Max(*a), Min(*b)), And(*cond) if (res[0][0] >= res[0][1]) == True or res[1] == False: raise IntegralTransformError( 'Mellin', None, 'no combined convergence.') return res def mellin_transform(f, x, s, **hints): r""" Compute the Mellin transform `F(s)` of `f(x)`, .. math :: F(s) = \int_0^\infty x^{s-1} f(x) \mathrm{d}x. For all "sensible" functions, this converges absolutely in a strip `a < \operatorname{Re}(s) < b`. The Mellin transform is related via change of variables to the Fourier transform, and also to the (bilateral) Laplace transform. This function returns ``(F, (a, b), cond)`` where ``F`` is the Mellin transform of ``f``, ``(a, b)`` is the fundamental strip (as above), and ``cond`` are auxiliary convergence conditions. If the integral cannot be computed in closed form, this function returns an unevaluated :class:`MellinTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=False``, then only `F` will be returned (i.e. not ``cond``, and also not the strip ``(a, b)``). >>> from sympy.integrals.transforms import mellin_transform >>> from sympy import exp >>> from sympy.abc import x, s >>> mellin_transform(exp(-x), x, s) (gamma(s), (0, oo), True) See Also ======== inverse_mellin_transform, laplace_transform, fourier_transform hankel_transform, inverse_hankel_transform """ return MellinTransform(f, x, s).doit(**hints) def _rewrite_sin(m_n, s, a, b): """ Re-write the sine function ``sin(m*s + n)`` as gamma functions, compatible with the strip (a, b). Return ``(gamma1, gamma2, fac)`` so that ``f == fac/(gamma1 * gamma2)``. >>> from sympy.integrals.transforms import _rewrite_sin >>> from sympy import pi, S >>> from sympy.abc import s >>> _rewrite_sin((pi, 0), s, 0, 1) (gamma(s), gamma(-s + 1), pi) >>> _rewrite_sin((pi, 0), s, 1, 0) (gamma(s - 1), gamma(-s + 2), -pi) >>> _rewrite_sin((pi, 0), s, -1, 0) (gamma(s + 1), gamma(-s), -pi) >>> _rewrite_sin((pi, pi/2), s, S(1)/2, S(3)/2) (gamma(s - 1/2), gamma(-s + 3/2), -pi) >>> _rewrite_sin((pi, pi), s, 0, 1) (gamma(s), gamma(-s + 1), -pi) >>> _rewrite_sin((2*pi, 0), s, 0, S(1)/2) (gamma(2*s), gamma(-2*s + 1), pi) >>> _rewrite_sin((2*pi, 0), s, S(1)/2, 1) (gamma(2*s - 1), gamma(-2*s + 2), -pi) """ # (This is a separate function because it is moderately complicated, # and I want to doctest it.) # We want to use pi/sin(pi*x) = gamma(x)*gamma(1-x). # But there is one comlication: the gamma functions determine the # inegration contour in the definition of the G-function. Usually # it would not matter if this is slightly shifted, unless this way # we create an undefined function! # So we try to write this in such a way that the gammas are # eminently on the right side of the strip. from sympy import expand_mul, pi, ceiling, gamma, re m, n = m_n m = expand_mul(m/pi) n = expand_mul(n/pi) r = ceiling(-m*a - n.as_real_imag()[0]) # Don't use re(n), does not expand return gamma(m*s + n + r), gamma(1 - n - r - m*s), (-1)**r*pi class MellinTransformStripError(ValueError): """ Exception raised by _rewrite_gamma. Mainly for internal use. """ pass def _rewrite_gamma(f, s, a, b): """ Try to rewrite the product f(s) as a product of gamma functions, so that the inverse Mellin transform of f can be expressed as a meijer G function. Return (an, ap), (bm, bq), arg, exp, fac such that G((an, ap), (bm, bq), arg/z**exp)*fac is the inverse Mellin transform of f(s). Raises IntegralTransformError or MellinTransformStripError on failure. It is asserted that f has no poles in the fundamental strip designated by (a, b). One of a and b is allowed to be None. The fundamental strip is important, because it determines the inversion contour. This function can handle exponentials, linear factors, trigonometric functions. This is a helper function for inverse_mellin_transform that will not attempt any transformations on f. >>> from sympy.integrals.transforms import _rewrite_gamma >>> from sympy.abc import s >>> from sympy import oo >>> _rewrite_gamma(s*(s+3)*(s-1), s, -oo, oo) (([], [-3, 0, 1]), ([-2, 1, 2], []), 1, 1, -1) >>> _rewrite_gamma((s-1)**2, s, -oo, oo) (([], [1, 1]), ([2, 2], []), 1, 1, 1) Importance of the fundamental strip: >>> _rewrite_gamma(1/s, s, 0, oo) (([1], []), ([], [0]), 1, 1, 1) >>> _rewrite_gamma(1/s, s, None, oo) (([1], []), ([], [0]), 1, 1, 1) >>> _rewrite_gamma(1/s, s, 0, None) (([1], []), ([], [0]), 1, 1, 1) >>> _rewrite_gamma(1/s, s, -oo, 0) (([], [1]), ([0], []), 1, 1, -1) >>> _rewrite_gamma(1/s, s, None, 0) (([], [1]), ([0], []), 1, 1, -1) >>> _rewrite_gamma(1/s, s, -oo, None) (([], [1]), ([0], []), 1, 1, -1) >>> _rewrite_gamma(2**(-s+3), s, -oo, oo) (([], []), ([], []), 1/2, 1, 8) """ from itertools import repeat from sympy import (Poly, gamma, Mul, re, RootOf, exp as exp_, E, expand, roots, ilcm, pi, sin, cos, tan, cot, igcd, exp_polar) # Our strategy will be as follows: # 1) Guess a constant c such that the inversion integral should be # performed wrt s'=c*s (instead of plain s). Write s for s'. # 2) Process all factors, rewrite them independently as gamma functions in # argument s, or exponentials of s. # 3) Try to transform all gamma functions s.t. they have argument # a+s or a-s. # 4) Check that the resulting G function parameters are valid. # 5) Combine all the exponentials. a_, b_ = S([a, b]) def left(c, is_numer): """ Decide whether pole at c lies to the left of the fundamental strip. """ # heuristically, this is the best chance for us to solve the inequalities c = expand(re(c)) if a_ is None: return c < b_ if b_ is None: return c <= a_ if (c >= b_) is True: return False if (c <= a_) is True: return True if is_numer: return None if a_.free_symbols or b_.free_symbols or c.free_symbols: return None # XXX #raise IntegralTransformError('Inverse Mellin', f, # 'Could not determine position of singularity %s' # ' relative to fundamental strip' % c) raise MellinTransformStripError('Pole inside critical strip?') # 1) s_multipliers = [] for g in f.atoms(gamma): if not g.has(s): continue arg = g.args[0] if arg.is_Add: arg = arg.as_independent(s)[1] coeff, _ = arg.as_coeff_mul(s) s_multipliers += [coeff] for g in f.atoms(sin, cos, tan, cot): if not g.has(s): continue arg = g.args[0] if arg.is_Add: arg = arg.as_independent(s)[1] coeff, _ = arg.as_coeff_mul(s) s_multipliers += [coeff/pi] s_multipliers = [abs(x) for x in s_multipliers if x.is_real] common_coefficient = S(1) for x in s_multipliers: if not x.is_Rational: common_coefficient = x break s_multipliers = [x/common_coefficient for x in s_multipliers] if any(not x.is_Rational for x in s_multipliers): raise NotImplementedError s_multiplier = common_coefficient/reduce(ilcm, [S(x.q) for x in s_multipliers], S(1)) if s_multiplier == common_coefficient: if len(s_multipliers) == 0: s_multiplier = common_coefficient else: s_multiplier = common_coefficient \ *reduce(igcd, [S(x.p) for x in s_multipliers]) exponent = S(1) fac = S(1) f = f.subs(s, s/s_multiplier) fac /= s_multiplier exponent = 1/s_multiplier if a_ is not None: a_ *= s_multiplier if b_ is not None: b_ *= s_multiplier # 2) numer, denom = f.as_numer_denom() numer = Mul.make_args(numer) denom = Mul.make_args(denom) args = list(zip(numer, repeat(True))) + list(zip(denom, repeat(False))) facs = [] dfacs = [] # *_gammas will contain pairs (a, c) representing Gamma(a*s + c) numer_gammas = [] denom_gammas = [] # exponentials will contain bases for exponentials of s exponentials = [] def exception(fact): return IntegralTransformError("Inverse Mellin", f, "Unrecognised form '%s'." % fact) while args: fact, is_numer = args.pop() if is_numer: ugammas, lgammas = numer_gammas, denom_gammas ufacs, lfacs = facs, dfacs else: ugammas, lgammas = denom_gammas, numer_gammas ufacs, lfacs = dfacs, facs def linear_arg(arg): """ Test if arg is of form a*s+b, raise exception if not. """ if not arg.is_polynomial(s): raise exception(fact) p = Poly(arg, s) if p.degree() != 1: raise exception(fact) return p.all_coeffs() # constants if not fact.has(s): ufacs += [fact] # exponentials elif fact.is_Pow or isinstance(fact, exp_): if fact.is_Pow: base = fact.base exp = fact.exp else: base = exp_polar(1) exp = fact.args[0] if exp.is_Integer: cond = is_numer if exp < 0: cond = not cond args += [(base, cond)]*abs(exp) continue elif not base.has(s): a, b = linear_arg(exp) if not is_numer: base = 1/base exponentials += [base**a] facs += [base**b] else: raise exception(fact) # linear factors elif fact.is_polynomial(s): p = Poly(fact, s) if p.degree() != 1: # We completely factor the poly. For this we need the roots. # Now roots() only works in some cases (low degree), and RootOf # only works without parameters. So try both... coeff = p.LT()[1] rs = roots(p, s) if len(rs) != p.degree(): rs = RootOf.all_roots(p) ufacs += [coeff] args += [(s - c, is_numer) for c in rs] continue a, c = p.all_coeffs() ufacs += [a] c /= -a # Now need to convert s - c if left(c, is_numer): ugammas += [(S(1), -c + 1)] lgammas += [(S(1), -c)] else: ufacs += [-1] ugammas += [(S(-1), c + 1)] lgammas += [(S(-1), c)] elif isinstance(fact, gamma): a, b = linear_arg(fact.args[0]) if is_numer: if (a > 0 and (left(-b/a, is_numer) is False)) or \ (a < 0 and (left(-b/a, is_numer) is True)): raise NotImplementedError( 'Gammas partially over the strip.') ugammas += [(a, b)] elif isinstance(fact, sin): # We try to re-write all trigs as gammas. This is not in # general the best strategy, since sometimes this is impossible, # but rewriting as exponentials would work. However trig functions # in inverse mellin transforms usually all come from simplifying # gamma terms, so this should work. a = fact.args[0] if is_numer: # No problem with the poles. gamma1, gamma2, fac_ = gamma(a/pi), gamma(1 - a/pi), pi else: gamma1, gamma2, fac_ = _rewrite_sin(linear_arg(a), s, a_, b_) args += [(gamma1, not is_numer), (gamma2, not is_numer)] ufacs += [fac_] elif isinstance(fact, tan): a = fact.args[0] args += [(sin(a, evaluate=False), is_numer), (sin(pi/2 - a, evaluate=False), not is_numer)] elif isinstance(fact, cos): a = fact.args[0] args += [(sin(pi/2 - a, evaluate=False), is_numer)] elif isinstance(fact, cot): a = fact.args[0] args += [(sin(pi/2 - a, evaluate=False), is_numer), (sin(a, evaluate=False), not is_numer)] else: raise exception(fact) fac *= Mul(*facs)/Mul(*dfacs) # 3) an, ap, bm, bq = [], [], [], [] for gammas, plus, minus, is_numer in [(numer_gammas, an, bm, True), (denom_gammas, bq, ap, False)]: while gammas: a, c = gammas.pop() if a != -1 and a != +1: # We use the gamma function multiplication theorem. p = abs(S(a)) newa = a/p newc = c/p if not a.is_Integer: raise TypeError("a is not an integer") for k in range(p): gammas += [(newa, newc + k/p)] if is_numer: fac *= (2*pi)**((1 - p)/2) * p**(c - S(1)/2) exponentials += [p**a] else: fac /= (2*pi)**((1 - p)/2) * p**(c - S(1)/2) exponentials += [p**(-a)] continue if a == +1: plus.append(1 - c) else: minus.append(c) # 4) # TODO # 5) arg = Mul(*exponentials) # for testability, sort the arguments an.sort(key=default_sort_key) ap.sort(key=default_sort_key) bm.sort(key=default_sort_key) bq.sort(key=default_sort_key) return (an, ap), (bm, bq), arg, exponent, fac @_noconds_(True) def _inverse_mellin_transform(F, s, x_, strip, as_meijerg=False): """ A helper for the real inverse_mellin_transform function, this one here assumes x to be real and positive. """ from sympy import (expand, expand_mul, hyperexpand, meijerg, And, Or, arg, pi, re, factor, Heaviside, gamma, Add) x = _dummy('t', 'inverse-mellin-transform', F, positive=True) # Actually, we won't try integration at all. Instead we use the definition # of the Meijer G function as a fairly general inverse mellin transform. F = F.rewrite(gamma) for g in [factor(F), expand_mul(F), expand(F)]: if g.is_Add: # do all terms separately ress = [_inverse_mellin_transform(G, s, x, strip, as_meijerg, noconds=False) for G in g.args] conds = [p[1] for p in ress] ress = [p[0] for p in ress] res = Add(*ress) if not as_meijerg: res = factor(res, gens=res.atoms(Heaviside)) return res.subs(x, x_), And(*conds) try: a, b, C, e, fac = _rewrite_gamma(g, s, strip[0], strip[1]) except IntegralTransformError: continue G = meijerg(a, b, C/x**e) if as_meijerg: h = G else: try: h = hyperexpand(G) except NotImplementedError as detail: raise IntegralTransformError( 'Inverse Mellin', F, 'Could not calculate integral') if h.is_Piecewise and len(h.args) == 3: # XXX we break modularity here! h = Heaviside(x - abs(C))*h.args[0].args[0] \ + Heaviside(abs(C) - x)*h.args[1].args[0] # We must ensure that the intgral along the line we want converges, # and return that value. # See [L], 5.2 cond = [abs(arg(G.argument)) < G.delta*pi] # Note: we allow ">=" here, this corresponds to convergence if we let # limits go to oo symetrically. ">" corresponds to absolute convergence. cond += [And(Or(len(G.ap) != len(G.bq), 0 >= re(G.nu) + 1), abs(arg(G.argument)) == G.delta*pi)] cond = Or(*cond) if cond == False: raise IntegralTransformError( 'Inverse Mellin', F, 'does not converge') return (h*fac).subs(x, x_), cond raise IntegralTransformError('Inverse Mellin', F, '') _allowed = None class InverseMellinTransform(IntegralTransform): """ Class representing unevaluated inverse Mellin transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Mellin transforms, see the :func:`inverse_mellin_transform` docstring. """ _name = 'Inverse Mellin' _none_sentinel = Dummy('None') _c = Dummy('c') def __new__(cls, F, s, x, a, b, **opts): if a is None: a = InverseMellinTransform._none_sentinel if b is None: b = InverseMellinTransform._none_sentinel return IntegralTransform.__new__(cls, F, s, x, a, b, **opts) @property def fundamental_strip(self): a, b = self.args[3], self.args[4] if a is InverseMellinTransform._none_sentinel: a = None if b is InverseMellinTransform._none_sentinel: b = None return a, b def _compute_transform(self, F, s, x, **hints): from sympy import postorder_traversal global _allowed if _allowed is None: from sympy import ( exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh, coth, factorial, rf) _allowed = set( [exp, gamma, sin, cos, tan, cot, cosh, sinh, tanh, coth, factorial, rf]) for f in postorder_traversal(F): if f.is_Function and f.has(s) and f.func not in _allowed: raise IntegralTransformError('Inverse Mellin', F, 'Component %s not recognised.' % f) strip = self.fundamental_strip return _inverse_mellin_transform(F, s, x, strip, **hints) def _as_integral(self, F, s, x): from sympy import Integral, I, oo c = self.__class__._c return Integral(F*x**(-s), (s, c - I*oo, c + I*oo)) def inverse_mellin_transform(F, s, x, strip, **hints): r""" Compute the inverse Mellin transform of `F(s)` over the fundamental strip given by ``strip=(a, b)``. This can be defined as .. math:: f(x) = \int_{c - i\infty}^{c + i\infty} x^{-s} F(s) \mathrm{d}s, for any `c` in the fundamental strip. Under certain regularity conditions on `F` and/or `f`, this recovers `f` from its Mellin transform `F` (and vice versa), for positive real `x`. One of `a` or `b` may be passed as ``None``; a suitable `c` will be inferred. If the integral cannot be computed in closed form, this function returns an unevaluated :class:`InverseMellinTransform` object. Note that this function will assume x to be positive and real, regardless of the sympy assumptions! For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. >>> from sympy.integrals.transforms import inverse_mellin_transform >>> from sympy import oo, gamma >>> from sympy.abc import x, s >>> inverse_mellin_transform(gamma(s), s, x, (0, oo)) exp(-x) The fundamental strip matters: >>> f = 1/(s**2 - 1) >>> inverse_mellin_transform(f, s, x, (-oo, -1)) (x/2 - 1/(2*x))*Heaviside(x - 1) >>> inverse_mellin_transform(f, s, x, (-1, 1)) -x*Heaviside(-x + 1)/2 - Heaviside(x - 1)/(2*x) >>> inverse_mellin_transform(f, s, x, (1, oo)) (-x/2 + 1/(2*x))*Heaviside(-x + 1) See Also ======== mellin_transform hankel_transform, inverse_hankel_transform """ return InverseMellinTransform(F, s, x, strip[0], strip[1]).doit(**hints) ########################################################################## # Laplace Transform ########################################################################## def _simplifyconds(expr, s, a): r""" Naively simplify some conditions occuring in ``expr``, given that `\operatorname{Re}(s) > a`. >>> from sympy.integrals.transforms import _simplifyconds as simp >>> from sympy.abc import x >>> from sympy import sympify as S >>> simp(abs(x**2) < 1, x, 1) False >>> simp(abs(x**2) < 1, x, 2) False >>> simp(abs(x**2) < 1, x, 0) Abs(x**2) < 1 >>> simp(abs(1/x**2) < 1, x, 1) True >>> simp(S(1) < abs(x), x, 1) True >>> simp(S(1) < abs(1/x), x, 1) False >>> from sympy import Ne >>> simp(Ne(1, x**3), x, 1) True >>> simp(Ne(1, x**3), x, 2) True >>> simp(Ne(1, x**3), x, 0) 1 != x**3 """ from sympy.core.relational import ( StrictGreaterThan, StrictLessThan, Unequality ) from sympy import Abs def power(ex): if ex == s: return 1 if ex.is_Pow and ex.base == s: return ex.exp return None def bigger(ex1, ex2): """ Return True only if |ex1| > |ex2|, False only if |ex1| < |ex2|. Else return None. """ if ex1.has(s) and ex2.has(s): return None if ex1.func is Abs: ex1 = ex1.args[0] if ex2.func is Abs: ex2 = ex2.args[0] if ex1.has(s): return bigger(1/ex2, 1/ex1) n = power(ex2) if n is None: return None if n > 0 and (abs(ex1) <= abs(a)**n) is True: return False if n < 0 and (abs(ex1) >= abs(a)**n) is True: return True def replie(x, y): """ simplify x < y """ if not (x.is_positive or x.func is Abs) \ or not (y.is_positive or y.func is Abs): return (x < y) r = bigger(x, y) if r is not None: return not r return (x < y) def replue(x, y): if bigger(x, y) in (True, False): return True return Unequality(x, y) def repl(ex, *args): if isinstance(ex, bool): return ex return ex.replace(*args) expr = repl(expr, StrictLessThan, replie) expr = repl(expr, StrictGreaterThan, lambda x, y: replie(y, x)) expr = repl(expr, Unequality, replue) return expr @_noconds def _laplace_transform(f, t, s_, simplify=True): """ The backend function for Laplace transforms. """ from sympy import (re, Max, exp, pi, Abs, Min, periodic_argument as arg, cos, Wild, symbols, polar_lift) s = Dummy('s') F = integrate(exp(-s*t) * f, (t, 0, oo)) if not F.has(Integral): return _simplify(F.subs(s, s_), simplify), -oo, True if not F.is_Piecewise: raise IntegralTransformError( 'Laplace', f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError( 'Laplace', f, 'integral in unexpected form') def process_conds(conds): """ Turn ``conds`` into a strip and auxiliary conditions. """ a = -oo aux = True conds = conjuncts(to_cnf(conds)) u = Dummy('u', real=True) p, q, w1, w2, w3, w4, w5 = symbols( 'p q w1 w2 w3 w4 w5', cls=Wild, exclude=[s]) for c in conds: a_ = oo aux_ = [] for d in disjuncts(c): m = d.match(abs(arg((s + w3)**p*q, w1)) < w2) if not m: m = d.match(abs(arg((s + w3)**p*q, w1)) <= w2) if not m: m = d.match(abs(arg((polar_lift(s + w3))**p*q, w1)) < w2) if not m: m = d.match(abs(arg((polar_lift(s + w3))**p*q, w1)) <= w2) if m: if m[q] > 0 and m[w2]/m[p] == pi/2: d = re(s + m[w3]) > 0 m = d.match( 0 < cos(abs(arg(s**w1*w5, q))*w2)*abs(s**w3)**w4 - p) if not m: m = d.match(0 < cos(abs( arg(polar_lift(s)**w1*w5, q))*w2)*abs(s**w3)**w4 - p) if m and all(m[wild] > 0 for wild in [w1, w2, w3, w4, w5]): d = re(s) > m[p] d_ = d.replace( re, lambda x: x.expand().as_real_imag()[0]).subs(re(s), t) if not d.is_Relational or \ d.rel_op not in ('>', '>=', '<', '<=') \ or d_.has(s) or not d_.has(t): aux_ += [d] continue soln = _solve_inequality(d_, t) if not soln.is_Relational or \ soln.rel_op not in ('>', '>=', '<', '<='): aux_ += [d] continue if soln.lts == t: raise IntegralTransformError('Laplace', f, 'convergence not in half-plane?') else: a_ = Min(soln.lts, a_) if a_ != oo: a = Max(a_, a) else: aux = And(aux, Or(*aux_)) return a, aux conds = [process_conds(c) for c in disjuncts(cond)] conds2 = [x for x in conds if x[1] != False and x[0] != -oo] if not conds2: conds2 = [x for x in conds if x[1] != False] conds = conds2 def cnt(expr): if isinstance(expr, bool): return 0 return expr.count_ops() conds.sort(key=lambda x: (-x[0], cnt(x[1]))) if not conds: raise IntegralTransformError('Laplace', f, 'no convergence found') a, aux = conds[0] def sbs(expr): if isinstance(expr, bool): return expr return expr.subs(s, s_) if simplify: F = _simplifyconds(F, s, a) aux = _simplifyconds(aux, s, a) return _simplify(F.subs(s, s_), simplify), sbs(a), sbs(aux) class LaplaceTransform(IntegralTransform): """ Class representing unevaluated Laplace transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Laplace transforms, see the :func:`laplace_transform` docstring. """ _name = 'Laplace' def _compute_transform(self, f, t, s, **hints): return _laplace_transform(f, t, s, **hints) def _as_integral(self, f, t, s): from sympy import Integral, exp return Integral(f*exp(-s*t), (t, 0, oo)) def _collapse_extra(self, extra): from sympy import And, Max conds = [] planes = [] for plane, cond in extra: conds.append(cond) planes.append(plane) cond = And(*conds) plane = Max(*planes) if cond == False: raise IntegralTransformError( 'Laplace', None, 'No combined convergence.') return plane, cond def laplace_transform(f, t, s, **hints): r""" Compute the Laplace Transform `F(s)` of `f(t)`, .. math :: F(s) = \int_0^\infty e^{-st} f(t) \mathrm{d}t. For all "sensible" functions, this converges absolutely in a half plane `a < \operatorname{Re}(s)`. This function returns ``(F, a, cond)`` where ``F`` is the Laplace transform of ``f``, `\operatorname{Re}(s) > a` is the half-plane of convergence, and ``cond`` are auxiliary convergence conditions. If the integral cannot be computed in closed form, this function returns an unevaluated :class:`LaplaceTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=True``, only `F` will be returned (i.e. not ``cond``, and also not the plane ``a``). >>> from sympy.integrals import laplace_transform >>> from sympy.abc import t, s, a >>> laplace_transform(t**a, t, s) (s**(-a)*gamma(a + 1)/s, 0, -re(a) < 1) See Also ======== inverse_laplace_transform, mellin_transform, fourier_transform hankel_transform, inverse_hankel_transform """ return LaplaceTransform(f, t, s).doit(**hints) @_noconds_(True) def _inverse_laplace_transform(F, s, t_, plane, simplify=True): """ The backend function for inverse Laplace transforms. """ from sympy import exp, Heaviside, log, expand_complex, Integral, Piecewise from sympy.integrals.meijerint import meijerint_inversion, _get_coeff_exp # There are two strategies we can try: # 1) Use inverse mellin transforms - related by a simple change of variables. # 2) Use the inversion integral. t = Dummy('t', real=True) def pw_simp(*args): """ Simplify a piecewise expression from hyperexpand. """ # XXX we break modularity here! if len(args) != 3: return Piecewise(*args) arg = args[2].args[0].argument coeff, exponent = _get_coeff_exp(arg, t) e1 = args[0].args[0] e2 = args[1].args[0] return Heaviside(1/abs(coeff) - t**exponent)*e1 \ + Heaviside(t**exponent - 1/abs(coeff))*e2 try: f, cond = inverse_mellin_transform(F, s, exp(-t), (None, oo), needeval=True, noconds=False) except IntegralTransformError: f = None if f is None: f = meijerint_inversion(F, s, t) if f is None: raise IntegralTransformError('Inverse Laplace', f, '') if f.is_Piecewise: f, cond = f.args[0] if f.has(Integral): raise IntegralTransformError('Inverse Laplace', f, 'inversion integral of unrecognised form.') else: cond = True f = f.replace(Piecewise, pw_simp) if f.is_Piecewise: # many of the functions called below can't work with piecewise # (b/c it has a bool in args) return f.subs(t, t_), cond u = Dummy('u') def simp_heaviside(arg): a = arg.subs(exp(-t), u) if a.has(t): return Heaviside(arg) rel = _solve_inequality(a > 0, u) if rel.lts == u: k = log(rel.gts) return Heaviside(t + k) else: k = log(rel.lts) return Heaviside(-(t + k)) f = f.replace(Heaviside, simp_heaviside) def simp_exp(arg): return expand_complex(exp(arg)) f = f.replace(exp, simp_exp) # TODO it would be nice to fix cosh and sinh ... simplify messes these # exponentials up return _simplify(f.subs(t, t_), simplify), cond class InverseLaplaceTransform(IntegralTransform): """ Class representing unevaluated inverse Laplace transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Laplace transforms, see the :func:`inverse_laplace_transform` docstring. """ _name = 'Inverse Laplace' _none_sentinel = Dummy('None') _c = Dummy('c') def __new__(cls, F, s, x, plane, **opts): if plane is None: plane = InverseLaplaceTransform._none_sentinel return IntegralTransform.__new__(cls, F, s, x, plane, **opts) @property def fundamental_plane(self): plane = self.args[3] if plane is InverseLaplaceTransform._none_sentinel: plane = None return plane def _compute_transform(self, F, s, t, **hints): return _inverse_laplace_transform(F, s, t, self.fundamental_plane, **hints) def _as_integral(self, F, s, t): from sympy import I, Integral, exp c = self.__class__._c return Integral(exp(s*t)*F, (s, c - I*oo, c + I*oo)) def inverse_laplace_transform(F, s, t, plane=None, **hints): r""" Compute the inverse Laplace transform of `F(s)`, defined as .. math :: f(t) = \int_{c-i\infty}^{c+i\infty} e^{st} F(s) \mathrm{d}s, for `c` so large that `F(s)` has no singularites in the half-plane `\operatorname{Re}(s) > c-\epsilon`. The plane can be specified by argument ``plane``, but will be inferred if passed as None. Under certain regularity conditions, this recovers `f(t)` from its Laplace Transform `F(s)`, for non-negative `t`, and vice versa. If the integral cannot be computed in closed form, this function returns an unevaluated :class:`InverseLaplaceTransform` object. Note that this function will always assume `t` to be real, regardless of the sympy assumption on `t`. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. >>> from sympy.integrals.transforms import inverse_laplace_transform >>> from sympy import exp, Symbol >>> from sympy.abc import s, t >>> a = Symbol('a', positive=True) >>> inverse_laplace_transform(exp(-a*s)/s, s, t) Heaviside(-a + t) See Also ======== laplace_transform hankel_transform, inverse_hankel_transform """ return InverseLaplaceTransform(F, s, t, plane).doit(**hints) ########################################################################## # Fourier Transform ########################################################################## @_noconds_(True) def _fourier_transform(f, x, k, a, b, name, simplify=True): """ Compute a general Fourier-type transform F(k) = a int_-oo^oo exp(b*I*x*k) f(x) dx. For suitable choice of a and b, this reduces to the standard Fourier and inverse Fourier transforms. """ from sympy import exp, I, oo F = integrate(a*f*exp(b*I*x*k), (x, -oo, oo)) if not F.has(Integral): return _simplify(F, simplify), True if not F.is_Piecewise: raise IntegralTransformError(name, f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError(name, f, 'integral in unexpected form') return _simplify(F, simplify), cond class FourierTypeTransform(IntegralTransform): """ Base class for Fourier transforms. Specify cls._a and cls._b. """ def _compute_transform(self, f, x, k, **hints): return _fourier_transform(f, x, k, self.__class__._a, self.__class__._b, self.__class__._name, **hints) def _as_integral(self, f, x, k): from sympy import Integral, exp, I a = self.__class__._a b = self.__class__._b return Integral(a*f*exp(b*I*x*k), (x, -oo, oo)) class FourierTransform(FourierTypeTransform): """ Class representing unevaluated Fourier transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Fourier transforms, see the :func:`fourier_transform` docstring. """ _name = 'Fourier' _a = 1 _b = -2*S.Pi def fourier_transform(f, x, k, **hints): r""" Compute the unitary, ordinary-frequency Fourier transform of `f`, defined as .. math:: F(k) = \int_{-\infty}^\infty f(x) e^{-2\pi i x k} \mathrm{d} x. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`FourierTransform` object. For other Fourier transform conventions, see the function :func:`sympy.integrals.transforms._fourier_transform`. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import fourier_transform, exp >>> from sympy.abc import x, k >>> fourier_transform(exp(-x**2), x, k) sqrt(pi)*exp(-pi**2*k**2) >>> fourier_transform(exp(-x**2), x, k, noconds=False) (sqrt(pi)*exp(-pi**2*k**2), True) See Also ======== inverse_fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return FourierTransform(f, x, k).doit(**hints) class InverseFourierTransform(FourierTypeTransform): """ Class representing unevaluated inverse Fourier transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Fourier transforms, see the :func:`inverse_fourier_transform` docstring. """ _name = 'Inverse Fourier' _a = 1 _b = 2*S.Pi def inverse_fourier_transform(F, k, x, **hints): r""" Compute the unitary, ordinary-frequency inverse Fourier transform of `F`, defined as .. math:: f(x) = \int_{-\infty}^\infty F(k) e^{2\pi i x k} \mathrm{d} k. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseFourierTransform` object. For other Fourier transform conventions, see the function :func:`sympy.integrals.transforms._fourier_transform`. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import inverse_fourier_transform, exp, sqrt, pi >>> from sympy.abc import x, k >>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x) exp(-x**2) >>> inverse_fourier_transform(sqrt(pi)*exp(-(pi*k)**2), k, x, noconds=False) (exp(-x**2), True) See Also ======== fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return InverseFourierTransform(F, k, x).doit(**hints) ########################################################################## # Fourier Sine and Cosine Transform ########################################################################## from sympy import sin, cos, sqrt, pi, I, oo @_noconds_(True) def _sine_cosine_transform(f, x, k, a, b, K, name, simplify=True): """ Compute a general sine or cosine-type transform F(k) = a int_0^oo b*sin(x*k) f(x) dx. F(k) = a int_0^oo b*cos(x*k) f(x) dx. For suitable choice of a and b, this reduces to the standard sine/cosine and inverse sine/cosine transforms. """ F = integrate(a*f*K(b*x*k), (x, 0, oo)) if not F.has(Integral): return _simplify(F, simplify), True if not F.is_Piecewise: raise IntegralTransformError(name, f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError(name, f, 'integral in unexpected form') return _simplify(F, simplify), cond class SineCosineTypeTransform(IntegralTransform): """ Base class for sine and cosine transforms. Specify cls._a and cls._b and cls._kern. """ def _compute_transform(self, f, x, k, **hints): return _sine_cosine_transform(f, x, k, self.__class__._a, self.__class__._b, self.__class__._kern, self.__class__._name, **hints) def _as_integral(self, f, x, k): from sympy import Integral, exp, I a = self.__class__._a b = self.__class__._b K = self.__class__._kern return Integral(a*f*K(b*x*k), (x, 0, oo)) class SineTransform(SineCosineTypeTransform): """ Class representing unevaluated sine transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute sine transforms, see the :func:`sine_transform` docstring. """ _name = 'Sine' _kern = sin _a = sqrt(2)/sqrt(pi) _b = 1 def sine_transform(f, x, k, **hints): r""" Compute the unitary, ordinary-frequency sine transform of `f`, defined as .. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \sin(2\pi x k) \mathrm{d} x. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`SineTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import sine_transform, exp >>> from sympy.abc import x, k, a >>> sine_transform(x*exp(-a*x**2), x, k) sqrt(2)*k*exp(-k**2/(4*a))/(4*a**(3/2)) >>> sine_transform(x**(-a), x, k) 2**(-a + 1/2)*k**(a - 1)*gamma(-a/2 + 1)/gamma(a/2 + 1/2) See Also ======== fourier_transform, inverse_fourier_transform inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return SineTransform(f, x, k).doit(**hints) class InverseSineTransform(SineCosineTypeTransform): """ Class representing unevaluated inverse sine transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse sine transforms, see the :func:`inverse_sine_transform` docstring. """ _name = 'Inverse Sine' _kern = sin _a = sqrt(2)/sqrt(pi) _b = 1 def inverse_sine_transform(F, k, x, **hints): r""" Compute the unitary, ordinary-frequency inverse sine transform of `F`, defined as .. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \sin(2\pi x k) \mathrm{d} k. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseSineTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import inverse_sine_transform, exp, sqrt, gamma, pi >>> from sympy.abc import x, k, a >>> inverse_sine_transform(2**((1-2*a)/2)*k**(a - 1)* ... gamma(-a/2 + 1)/gamma((a+1)/2), k, x) x**(-a) >>> inverse_sine_transform(sqrt(2)*k*exp(-k**2/(4*a))/(4*sqrt(a)**3), k, x) x*exp(-a*x**2) See Also ======== fourier_transform, inverse_fourier_transform sine_transform cosine_transform, inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return InverseSineTransform(F, k, x).doit(**hints) class CosineTransform(SineCosineTypeTransform): """ Class representing unevaluated cosine transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute cosine transforms, see the :func:`cosine_transform` docstring. """ _name = 'Cosine' _kern = cos _a = sqrt(2)/sqrt(pi) _b = 1 def cosine_transform(f, x, k, **hints): r""" Compute the unitary, ordinary-frequency cosine transform of `f`, defined as .. math:: F(k) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty f(x) \cos(2\pi x k) \mathrm{d} x. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`CosineTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import cosine_transform, exp, sqrt, cos >>> from sympy.abc import x, k, a >>> cosine_transform(exp(-a*x), x, k) sqrt(2)*a/(sqrt(pi)*(a**2 + k**2)) >>> cosine_transform(exp(-a*sqrt(x))*cos(a*sqrt(x)), x, k) a*exp(-a**2/(2*k))/(2*k**(3/2)) See Also ======== fourier_transform, inverse_fourier_transform, sine_transform, inverse_sine_transform inverse_cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return CosineTransform(f, x, k).doit(**hints) class InverseCosineTransform(SineCosineTypeTransform): """ Class representing unevaluated inverse cosine transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse cosine transforms, see the :func:`inverse_cosine_transform` docstring. """ _name = 'Inverse Cosine' _kern = cos _a = sqrt(2)/sqrt(pi) _b = 1 def inverse_cosine_transform(F, k, x, **hints): r""" Compute the unitary, ordinary-frequency inverse cosine transform of `F`, defined as .. math:: f(x) = \sqrt{\frac{2}{\pi}} \int_{0}^\infty F(k) \cos(2\pi x k) \mathrm{d} k. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseCosineTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import inverse_cosine_transform, exp, sqrt, pi >>> from sympy.abc import x, k, a >>> inverse_cosine_transform(sqrt(2)*a/(sqrt(pi)*(a**2 + k**2)), k, x) exp(-a*x) >>> inverse_cosine_transform(1/sqrt(k), k, x) 1/sqrt(x) See Also ======== fourier_transform, inverse_fourier_transform, sine_transform, inverse_sine_transform cosine_transform hankel_transform, inverse_hankel_transform mellin_transform, laplace_transform """ return InverseCosineTransform(F, k, x).doit(**hints) ########################################################################## # Hankel Transform ########################################################################## @_noconds_(True) def _hankel_transform(f, r, k, nu, name, simplify=True): """ Compute a general Hankel transform .. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r. """ from sympy import besselj, oo F = integrate(f*besselj(nu, k*r)*r, (r, 0, oo)) if not F.has(Integral): return _simplify(F, simplify), True if not F.is_Piecewise: raise IntegralTransformError(name, f, 'could not compute integral') F, cond = F.args[0] if F.has(Integral): raise IntegralTransformError(name, f, 'integral in unexpected form') return _simplify(F, simplify), cond class HankelTypeTransform(IntegralTransform): """ Base class for Hankel transforms. """ def doit(self, **hints): return self._compute_transform(self.function, self.function_variable, self.transform_variable, self.args[3], **hints) def _compute_transform(self, f, r, k, nu, **hints): return _hankel_transform(f, r, k, nu, self._name, **hints) def _as_integral(self, f, r, k, nu): from sympy import Integral, besselj, oo return Integral(f*besselj(nu, k*r)*r, (r, 0, oo)) @property def as_integral(self): return self._as_integral(self.function, self.function_variable, self.transform_variable, self.args[3]) class HankelTransform(HankelTypeTransform): """ Class representing unevaluated Hankel transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute Hankel transforms, see the :func:`hankel_transform` docstring. """ _name = 'Hankel' def hankel_transform(f, r, k, nu, **hints): r""" Compute the Hankel transform of `f`, defined as .. math:: F_\nu(k) = \int_{0}^\infty f(r) J_\nu(k r) r \mathrm{d} r. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`HankelTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import hankel_transform, inverse_hankel_transform >>> from sympy import gamma, exp, sinh, cosh >>> from sympy.abc import r, k, m, nu, a >>> ht = hankel_transform(1/r**m, r, k, nu) >>> ht 2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2) >>> inverse_hankel_transform(ht, k, r, nu) r**(-m) >>> ht = hankel_transform(exp(-a*r), r, k, 0) >>> ht a/(k**3*(a**2/k**2 + 1)**(3/2)) >>> inverse_hankel_transform(ht, k, r, 0) exp(-a*r) See Also ======== fourier_transform, inverse_fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform inverse_hankel_transform mellin_transform, laplace_transform """ return HankelTransform(f, r, k, nu).doit(**hints) class InverseHankelTransform(HankelTypeTransform): """ Class representing unevaluated inverse Hankel transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Hankel transforms, see the :func:`inverse_hankel_transform` docstring. """ _name = 'Inverse Hankel' def inverse_hankel_transform(F, k, r, nu, **hints): r""" Compute the inverse Hankel transform of `F` defined as .. math:: f(r) = \int_{0}^\infty F_\nu(k) J_\nu(k r) k \mathrm{d} k. If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseHankelTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. >>> from sympy import hankel_transform, inverse_hankel_transform, gamma >>> from sympy import gamma, exp, sinh, cosh >>> from sympy.abc import r, k, m, nu, a >>> ht = hankel_transform(1/r**m, r, k, nu) >>> ht 2*2**(-m)*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/gamma(m/2 + nu/2) >>> inverse_hankel_transform(ht, k, r, nu) r**(-m) >>> ht = hankel_transform(exp(-a*r), r, k, 0) >>> ht a/(k**3*(a**2/k**2 + 1)**(3/2)) >>> inverse_hankel_transform(ht, k, r, 0) exp(-a*r) See Also ======== fourier_transform, inverse_fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform mellin_transform, laplace_transform """ return InverseHankelTransform(F, k, r, nu).doit(**hints)
codeparrot/github-code-clean
# -*- encoding: utf-8 -*- """ Factory object creation for all CLI methods """ import datetime import json import logging import os import random import time from fauxfactory import ( gen_alphanumeric, gen_choice, gen_integer, gen_ipaddr, gen_mac, gen_netmask, gen_string, ) from os import chmod from robottelo import manifests, ssh from robottelo.api.utils import enable_rhrepo_and_fetchid from robottelo.cli.activationkey import ActivationKey from robottelo.cli.architecture import Architecture from robottelo.cli.base import CLIReturnCodeError from robottelo.cli.computeresource import ComputeResource from robottelo.cli.contentview import ( ContentView, ContentViewFilter, ContentViewFilterRule, ) from robottelo.cli.discoveryrule import DiscoveryRule from robottelo.cli.docker import DockerContainer, DockerRegistry from robottelo.cli.domain import Domain from robottelo.cli.environment import Environment from robottelo.cli.filter import Filter from robottelo.cli.gpgkey import GPGKey from robottelo.cli.host import Host from robottelo.cli.hostcollection import HostCollection from robottelo.cli.hostgroup import HostGroup from robottelo.cli.job_invocation import JobInvocation from robottelo.cli.job_template import JobTemplate from robottelo.cli.ldapauthsource import LDAPAuthSource from robottelo.cli.lifecycleenvironment import LifecycleEnvironment from robottelo.cli.location import Location from robottelo.cli.medium import Medium from robottelo.cli.model import Model from robottelo.cli.operatingsys import OperatingSys from robottelo.cli.org import Org from robottelo.cli.partitiontable import PartitionTable from robottelo.cli.product import Product from robottelo.cli.proxy import CapsuleTunnelError, Proxy from robottelo.cli.realm import Realm from robottelo.cli.repository import Repository from robottelo.cli.repository_set import RepositorySet from robottelo.cli.role import Role from robottelo.cli.scapcontent import Scapcontent from robottelo.cli.subnet import Subnet from robottelo.cli.subscription import Subscription from robottelo.cli.syncplan import SyncPlan from robottelo.cli.scap_policy import Scappolicy from robottelo.cli.scap_tailoring_files import TailoringFiles from robottelo.cli.template import Template from robottelo.cli.user import User from robottelo.cli.usergroup import UserGroup, UserGroupExternal from robottelo.cli.smart_variable import SmartVariable from robottelo.cli.virt_who_config import VirtWhoConfig from robottelo.config import settings from robottelo.constants import ( DEFAULT_ARCHITECTURE, DEFAULT_LOC, DEFAULT_ORG, DEFAULT_PTABLE, DEFAULT_PXE_TEMPLATE, DEFAULT_SUBSCRIPTION_NAME, DEFAULT_TEMPLATE, DISTRO_RHEL7, DISTROS_MAJOR_VERSION, FAKE_1_YUM_REPO, FOREMAN_PROVIDERS, OPERATING_SYSTEMS, PRDS, REPOS, REPOSET, RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION, SYNC_INTERVAL, TEMPLATE_TYPES, ) from robottelo.datafactory import valid_cron_expressions from robottelo.decorators import bz_bug_is_open, cacheable from robottelo.helpers import ( update_dictionary, default_url_on_new_port, get_available_capsule_port ) from robottelo.ssh import download_file, upload_file from tempfile import mkstemp from time import sleep logger = logging.getLogger(__name__) ORG_KEYS = ['organization', 'organization-id', 'organization-label'] CONTENT_VIEW_KEYS = ['content-view', 'content-view-id'] LIFECYCLE_KEYS = ['lifecycle-environment', 'lifecycle-environment-id'] class CLIFactoryError(Exception): """Indicates an error occurred while creating an entity using hammer""" def create_object(cli_object, options, values): """ Creates <object> with dictionary of arguments. :param cli_object: A valid CLI object. :param dict options: The default options accepted by the cli_object create :param dict values: Custom values to override default ones. :raise robottelo.cli.factory.CLIFactoryError: Raise an exception if object cannot be created. :rtype: dict :return: A dictionary representing the newly created resource. """ if values: diff = set(values.keys()).difference(set(options.keys())) if diff: logger.debug( "Option(s) {0} not supported by CLI factory. Please check for " "a typo or update default options".format(diff) ) update_dictionary(options, values) try: result = cli_object.create(options) except CLIReturnCodeError as err: # If the object is not created, raise exception, stop the show. raise CLIFactoryError( u'Failed to create {0} with data:\n{1}\n{2}'.format( cli_object.__name__, json.dumps(options, indent=2, sort_keys=True), err.msg, ) ) # Sometimes we get a list with a dictionary and not # a dictionary. if type(result) is list and len(result) > 0: result = result[0] return result def _entity_with_credentials(credentials, cli_entity_cls): """Create entity class using credentials. If credentials is None will return cli_entity_cls itself :param credentials: tuple (login, password) :param cli_entity_cls: Cli Entity Class :return: Cli Entity Class """ if credentials is not None: cli_entity_cls = cli_entity_cls.with_user(*credentials) return cli_entity_cls @cacheable def make_activation_key(options=None): """ Usage:: hammer activation-key create [OPTIONS] Options:: --content-view CONTENT_VIEW_NAME Content view name to search by --content-view-id CONTENT_VIEW_ID content view numeric identifier --description DESCRIPTION description --lifecycle-environment LIFECYCLE_ENVIRONMENT_NAME Name to search by --lifecycle-environment-id LIFECYCLE_ENVIRONMENT_ID --max-hosts MAX_CONTENT_HOSTS maximum number of registered content hosts --name NAME name --organization ORGANIZATION_NAME Organization name to search by --organization-id ORGANIZATION_ID --organization-label ORGANIZATION_LABEL Organization label to search by --unlimited-hosts UNLIMITED_CONTENT_HOSTS can the activation key have unlimited content hosts """ # Organization Name, Label or ID is a required field. if ( not options or not options.get('organization') and not options.get('organization-label') and not options.get('organization-id')): raise CLIFactoryError('Please provide a valid Organization.') args = { u'content-view': None, u'content-view-id': None, u'description': None, u'lifecycle-environment': None, u'lifecycle-environment-id': None, u'max-hosts': None, u'name': gen_alphanumeric(), u'organization': None, u'organization-id': None, u'organization-label': None, u'unlimited-hosts': None, } return create_object(ActivationKey, args, options) @cacheable def make_architecture(options=None): """ Usage:: hammer architecture create [OPTIONS] Options:: --name NAME --operatingsystem-ids OPERATINGSYSTEM_IDS Operatingsystem ID’s Comma separated list of values. """ args = { u'name': gen_alphanumeric(), u'operatingsystem-ids': None, } return create_object(Architecture, args, options) def make_container(options=None): """Creates a docker container Usage:: hammer docker container create [OPTIONS] Options:: --attach-stderr ATTACH_STDERR One of true/false, yes/no, 1/0. --attach-stdin ATTACH_STDIN One of true/false, yes/no, 1/0. --attach-stdout ATTACH_STDOUT One of true/false, yes/no, 1/0. --capsule CAPSULE_NAME Name to search by --capsule-id CAPSULE_ID Id of the capsule --command COMMAND --compute-resource COMPUTE_RESOURCE_NAME Compute resource name --compute-resource-id COMPUTE_RESOURCE_ID --cpu-sets CPU_SETS --cpu-shares CPU_SHARES --entrypoint ENTRYPOINT --location-ids LOCATION_IDS REPLACE locations with given ids. Comma separated list of values. --locations LOCATION_NAMES Comma separated list of values. --memory MEMORY --name NAME --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. --organizations ORGANIZATION_NAMES Comma separated list of values. --registry-id REGISTRY_ID Registry this container will have to use to get the image --repository-name REPOSITORY_NAME Name of the repository to use to create the container. e.g: centos --tag TAG Tag to use to create the container. e.g: latest --tty TTY One of true/false, yes/no, 1/0. """ # Compute resource ID is a required field. if (not options or ( u'compute-resource' not in options and u'compute-resource-id' not in options )): raise CLIFactoryError( 'Please provide at least compute-resource or compute-resource-id ' 'options.' ) args = { u'attach-stderr': None, u'attach-stdin': None, u'attach-stdout': None, u'capsule': None, u'capsule-id': None, u'command': 'top', u'compute-resource': None, u'compute-resource-id': None, u'cpu-sets': None, u'cpu-shares': None, u'entrypoint': None, u'location-ids': None, u'locations': None, u'memory': None, u'name': gen_string('alphanumeric'), u'organization-ids': None, u'organizations': None, u'registry-id': None, u'repository-name': 'busybox', u'tag': 'latest', u'tty': None, } return create_object(DockerContainer, args, options) @cacheable def make_content_view(options=None): """ Usage:: hammer content-view create [OPTIONS] Options:: --component-ids COMPONENT_IDS List of component content view version ids for composite views Comma separated list of values. --composite Create a composite content view --description DESCRIPTION Description for the content view --label LABEL Content view label --name NAME Name of the content view --organization ORGANIZATION_NAME Organization name to search by --organization-id ORGANIZATION_ID Organization identifier --organization-label ORGANIZATION_LABEL Organization label to search by --product PRODUCT_NAME Product name to search by --product-id PRODUCT_ID product numeric identifier --repositories REPOSITORY_NAMES Comma separated list of values. --repository-ids REPOSITORY_IDS List of repository ids Comma separated list of values. -h, --help print help """ return make_content_view_with_credentials(options) def make_content_view_with_credentials(options=None, credentials=None): """ Usage:: hammer content-view create [OPTIONS] Options:: --component-ids COMPONENT_IDS List of component content view version ids for composite views Comma separated list of values. --composite Create a composite content view --description DESCRIPTION Description for the content view --label LABEL Content view label --name NAME Name of the content view --organization ORGANIZATION_NAME Organization name to search by --organization-id ORGANIZATION_ID Organization identifier --organization-label ORGANIZATION_LABEL Organization label to search by --product PRODUCT_NAME Product name to search by --product-id PRODUCT_ID product numeric identifier --repositories REPOSITORY_NAMES Comma separated list of values. --repository-ids REPOSITORY_IDS List of repository ids Comma separated list of values. -h, --help print help If credentials is None default credentials present on robottelo.properties will be used. """ # Organization ID is a required field. if not options or not options.get('organization-id'): raise CLIFactoryError('Please provide a valid ORG ID.') args = { u'component-ids': None, u'composite': False, u'description': None, u'label': None, u'name': gen_string('alpha', 10), u'organization': None, u'organization-id': None, u'organization-label': None, u'product': None, u'product-id': None, u'repositories': None, u'repository-ids': None } cv_cls = _entity_with_credentials(credentials, ContentView) return create_object(cv_cls, args, options) @cacheable def make_content_view_filter(options=None): """ Usage:: content-view filter create [OPTIONS] Options:: --content-view CONTENT_VIEW_NAME Content view name to search by --content-view-id CONTENT_VIEW_ID content view numeric identifier --description DESCRIPTION description of the filter --inclusion INCLUSION specifies if content should be included or excluded, default: inclusion=false One of true/false, yes/no, 1/0. --name NAME name of the filter --organization ORGANIZATION_NAME Organization name to search by --organization-id ORGANIZATION_ID Organization ID to search by --organization-label ORGANIZATION_LABEL Organization label to search by --original-packages ORIGINAL_PACKAGES add all packages without errata to the included/ excluded list. (package filter only) One of true/false, yes/no, 1/0. --repositories REPOSITORY_NAMES Comma separated list of values. --repository-ids REPOSITORY_IDS list of repository ids Comma separated list of values. --type TYPE type of filter (e.g. rpm, package_group, erratum) -h, --help print help """ args = { u'content-view': None, u'content-view-id': None, u'description': None, u'inclusion': None, u'name': gen_string('alpha', 10), u'organization': None, u'organization-id': None, u'organization-label': None, u'original-packages': None, u'repositories': None, u'repository-ids': None, u'type': None, } return create_object(ContentViewFilter, args, options) @cacheable def make_content_view_filter_rule(options=None): """ Usage:: content-view filter rule create [OPTIONS] Options:: --content-view CONTENT_VIEW_NAME Content view name to search by --content-view-filter CONTENT_VIEW_FILTER_NAME Name to search by --content-view-filter-id CONTENT_VIEW_FILTER_ID filter identifier --content-view-id CONTENT_VIEW_ID content view numeric identifier --date-type DATE_TYPE erratum: search using the 'Issued On' or 'Updated On' column of the errata. Values are 'issued'/'updated' --end-date END_DATE erratum: end date (YYYY-MM-DD) --errata-id ERRATA_ID erratum: id --errata-ids ERRATA_IDS erratum: IDs or a select all object Comma separated list of values. --max-version MAX_VERSION package: maximum version --min-version MIN_VERSION package: minimum version --name NAME package and package group names Comma separated list of values. --names NAMES Package and package group names --start-date START_DATE erratum: start date (YYYY-MM-DD) --types TYPES erratum: types (enhancement, bugfix, security) Comma separated list of values. --version VERSION package: version -h, --help print help """ args = { u'content-view': None, u'content-view-filter': None, u'content-view-filter-id': None, u'content-view-id': None, u'date-type': None, u'end-date': None, u'errata-id': None, u'errata-ids': None, u'max-version': None, u'min-version': None, u'name': None, u'names': None, u'start-date': None, u'types': None, u'version': None, } return create_object(ContentViewFilterRule, args, options) @cacheable def make_discoveryrule(options=None): """ Usage:: hammer discovery_rule create [OPTIONS] Options:: --enabled ENABLED flag is used for temporary shutdown of rules One of true/false, yes/no, 1/0. --hostgroup HOSTGROUP_NAME Hostgroup name --hostgroup-id HOSTGROUP_ID --hostgroup-title HOSTGROUP_TITLE Hostgroup title --hostname HOSTNAME defines a pattern to assign human-readable hostnames to the matching hosts --hosts-limit HOSTS_LIMIT --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. --locations LOCATION_NAMES Comma separated list of values. --max-count MAX_COUNT enables to limit maximum amount of provisioned hosts per rule --name NAME represents rule name shown to the users --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. --organizations ORGANIZATION_NAMES Comma separated list of values. --priority PRIORITY puts the rules in order, low numbers go first. Must be greater then zero --search SEARCH query to match discovered hosts for the particular rule -h, --help print help """ # Organizations, Locations, search query, hostgroup are required fields. if not options: raise CLIFactoryError('Please provide required parameters') # Organizations fields is required if not any(options.get(key) for key in [ 'organizations', 'organization-ids' ]): raise CLIFactoryError('Please provide a valid organization field.') # Locations field is required if not any(options.get(key) for key in ['locations', 'location-ids']): raise CLIFactoryError('Please provide a valid location field.') # search query is required if not options.get('search'): raise CLIFactoryError('Please provider a valid search query') # hostgroup is required if not any(options.get(key) for key in ['hostgroup', 'hostgroup-id']): raise CLIFactoryError('Please provider a valid hostgroup') args = { u'enabled': None, u'hostgroup': None, u'hostgroup-id': None, u'hostgroup-title': None, u'hostname': None, u'hosts-limit': None, u'location-ids': None, u'locations': None, u'max-count': None, u'name': gen_alphanumeric(), u'organizations': None, u'organization-ids': None, u'priority': None, u'search': None, } return create_object(DiscoveryRule, args, options) @cacheable def make_gpg_key(options=None): """ Usage:: hammer gpg create [OPTIONS] Options:: --key GPG_KEY_FILE GPG Key file --name NAME identifier of the GPG Key --organization ORGANIZATION_NAME Organization name to search by --organization-id ORGANIZATION_ID organization identifier --organization-label ORGANIZATION_LABEL Organization label to search by -h, --help print help """ # Organization ID is a required field. if not options or not options.get('organization-id'): raise CLIFactoryError('Please provide a valid ORG ID.') # Create a fake gpg key file if none was provided if not options.get('key'): (_, key_filename) = mkstemp(text=True) os.chmod(key_filename, 0o700) with open(key_filename, 'w') as gpg_key_file: gpg_key_file.write(gen_alphanumeric(gen_integer(20, 50))) else: # If the key is provided get its local path and remove it from options # to not override the remote path key_filename = options.pop('key') args = { u'key': '/tmp/{0}'.format(gen_alphanumeric()), u'name': gen_alphanumeric(), u'organization': None, u'organization-id': None, u'organization-label': None, } # Upload file to server ssh.upload_file(local_file=key_filename, remote_file=args['key']) return create_object(GPGKey, args, options) @cacheable def make_location(options=None): """Location CLI factory Usage:: hammer location create [OPTIONS] Options:: --compute-resource-ids COMPUTE_RESOURCE_IDS Compute resource IDs Comma separated list of values. --compute-resources COMPUTE_RESOURCE_NAMES Compute resource names Comma separated list of values. --config-template-ids CONFIG_TEMPLATE_IDS Provisioning template IDs Comma separated list of values. --config-templates CONFIG_TEMPLATE_NAMES Provisioning template names Comma separated list of values. --description DESCRIPTION Location description --domain-ids DOMAIN_IDS Domain IDs Comma separated list of values. --domains DOMAIN_NAMES Domain names Comma separated list of values. --environment-ids ENVIRONMENT_IDS Environment IDs Comma separated list of values. --environments ENVIRONMENT_NAMES Environment names Comma separated list of values. --puppet-environment-ids ENVIRONMENT_IDS Environment IDs Comma separated list of values. --puppet-environments ENVIRONMENT_NAMES Environment names Comma separated list of values. --hostgroup-ids HOSTGROUP_IDS Host group IDs Comma separated list of values. --hostgroups HOSTGROUP_NAMES Host group names Comma separated list of values. --medium-ids MEDIUM_IDS Media IDs Comma separated list of values. --name NAME --realm-ids REALM_IDS Realm IDs Comma separated list of values. --realms REALM_NAMES Realm names Comma separated list of values. --smart-proxy-ids SMART_PROXY_IDS Smart proxy IDs Comma separated list of values. --smart-proxies SMART_PROXY_NAMES Smart proxy names Comma separated list of values. --subnet-ids SUBNET_IDS Subnet IDs Comma separated list of values. --subnets SUBNET_NAMES Subnet names Comma separated list of --user-ids USER_IDS User IDs Comma separated list of values. --users USER_LOGINS User names Comma separated list of values. """ args = { u'compute-resource-ids': None, u'compute-resources': None, u'config-template-ids': None, u'config-templates': None, u'description': None, u'domain-ids': None, u'domains': None, u'environment-ids': None, u'environments': None, u'puppet-environment-ids': None, u'puppet-environments': None, u'hostgroup-ids': None, u'hostgroups': None, u'medium-ids': None, u'name': gen_alphanumeric(), u'parent-id': None, u'realm-ids': None, u'realms': None, u'smart-proxy-ids': None, u'smart-proxies': None, u'subnet-ids': None, u'subnets': None, u'user-ids': None, u'users': None, } return create_object(Location, args, options) @cacheable def make_model(options=None): """ Usage:: hammer model create [OPTIONS] Options:: --hardware-model HARDWARE_MODEL --info INFO --name NAME --vendor-class VENDOR_CLASS """ args = { u'hardware-model': None, u'info': None, u'name': gen_alphanumeric(), u'vendor-class': None, } return create_object(Model, args, options) @cacheable def make_partition_table(options=None): """ Usage:: hammer partition-table create [OPTIONS] Options:: --file LAYOUT Path to a file that contains the partition layout --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. --locations LOCATION_NAMES Comma separated list of values. --name NAME --operatingsystem-ids OPERATINGSYSTEM_IDS Array of operating system IDs to associate with the partition table Comma separated list of values. Values containing comma should be double quoted --operatingsystems OPERATINGSYSTEM_TITLES Comma separated list of values. Values containing comma should be double quoted --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. --organizations ORGANIZATION_NAMES Comma separated list of values. --os-family OS_FAMILY """ if options is None: options = {} (_, layout) = mkstemp(text=True) os.chmod(layout, 0o700) with open(layout, 'w') as ptable: ptable.write(options.get('content', 'default ptable content')) args = { u'file': '/tmp/{0}'.format(gen_alphanumeric()), u'location-ids': None, u'locations': None, u'name': gen_alphanumeric(), u'operatingsystem-ids': None, u'operatingsystems': None, u'organization-ids': None, u'organizations': None, u'os-family': random.choice(OPERATING_SYSTEMS), } # Upload file to server ssh.upload_file(local_file=layout, remote_file=args['file']) return create_object(PartitionTable, args, options) @cacheable def make_product(options=None): return make_product_with_credentials(options) def make_product_with_credentials(options=None, credentials=None): """ Usage:: hammer product create [OPTIONS] Options:: --description DESCRIPTION Product description --gpg-key GPG_KEY_NAME Name to search by --gpg-key-id GPG_KEY_ID Identifier of the GPG key --label LABEL --name NAME --organization ORGANIZATION_NAME Organization name to search by --organization-id ORGANIZATION_ID ID of the organization --organization-label ORGANIZATION_LABEL Organization label to search by --sync-plan SYNC_PLAN_NAME Sync plan name to search by --sync-plan-id SYNC_PLAN_ID Plan numeric identifier -h, --help print help """ # Organization ID is a required field. if not options or not options.get('organization-id'): raise CLIFactoryError('Please provide a valid ORG ID.') args = { u'description': gen_string('alpha', 20), u'gpg-key': None, u'gpg-key-id': None, u'label': gen_string('alpha', 20), u'name': gen_string('alpha', 20), u'organization': None, u'organization-id': None, u'organization-label': None, u'sync-plan': None, u'sync-plan-id': None, } product_cls = _entity_with_credentials(credentials, Product) return create_object(product_cls, args, options) def make_product_wait(options=None, wait_for=5): """Wrapper function for make_product to make it wait before erroring out. This is a temporary workaround for BZ#1332650: Sometimes cli product create errors for no reason when there are multiple product creation requests at the sametime although the product entities are created. This workaround will attempt to wait for 5 seconds and query the product again to make sure it is actually created. If it is not found, it will fail and stop. Note: This wrapper method is created instead of patching make_product because this issue does not happen for all entities and this workaround should be removed once the root cause is identified/fixed. """ # Organization ID is a required field. if not options or not options.get('organization-id'): raise CLIFactoryError('Please provide a valid ORG ID.') options['name'] = options.get('name', gen_string('alpha')) try: product = make_product(options) except CLIFactoryError as err: if not bz_bug_is_open(1332650): raise err sleep(wait_for) try: product = Product.info({ 'name': options.get('name'), 'organization-id': options.get('organization-id'), }) except CLIReturnCodeError: raise err if not product: raise err return product @cacheable def make_proxy(options=None): """ Usage:: hammer proxy create [OPTIONS] Options:: --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. --name NAME --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. -h, --help print help """ args = { u'name': gen_alphanumeric(), } if options is None or 'url' not in options: newport = get_available_capsule_port() try: with default_url_on_new_port(9090, newport) as url: args['url'] = url return create_object(Proxy, args, options) except CapsuleTunnelError as err: raise CLIFactoryError( 'Failed to create ssh tunnel: {0}'.format(err)) args['url'] = options['url'] return create_object(Proxy, args, options) def make_registry(options=None): """Creates a docker registry Usage:: hammer docker registry create [OPTIONS] Options:: --description DESCRIPTION --name NAME --password PASSWORD --url URL --username USERNAME """ args = { u'description': None, u'name': gen_string('alphanumeric'), u'password': None, u'url': settings.docker.external_registry_1, u'username': None, } return create_object(DockerRegistry, args, options) @cacheable def make_repository(options=None): return make_repository_with_credentials(options) def make_repository_with_credentials(options=None, credentials=None): """ Usage:: hammer repository create [OPTIONS] Options:: --checksum-type CHECKSUM_TYPE checksum of the repository, currently 'sha1' &amp; 'sha256' are supported.' --content-type CONTENT_TYPE type of repo (either 'yum', 'puppet', 'docker' or 'ostree', defaults to 'yum') --docker-tags-whitelist DOCKER_TAGS_WHITELIST Comma separated list of tags to sync for Container Image repository --docker-upstream-name DOCKER_UPSTREAM_NAME name of the upstream docker repository --download-policy DOWNLOAD_POLICY download policy for yum repos (either 'immediate','on_demand' or 'background') --gpg-key GPG_KEY_NAME Name to search by --gpg-key-id GPG_KEY_ID gpg key numeric identifier --ignorable-content IGNORABLE_CONTENT List of content units to ignore while syncing a yum repository. Subset of rpm, drpm, srpm, distribution, erratum --label LABEL --mirror-on-sync MIRROR_ON_SYNC true if this repository when synced has to be mirrored from the source and stale rpms removed. --name NAME --organization ORGANIZATION_NAME Organization name to search by --organization-id ORGANIZATION_ID organization ID --organization-label ORGANIZATION_LABEL Organization label to search by --ostree-upstream-sync-depth OSTREE_UPSTREAM_SYNC_DEPTH if a custom sync policy is chosen for ostree repositories then a 'depth' value must be provided. --ostree-upstream-sync-policy OSTREE_UPSTREAM_SYNC_POLICY policies for syncing upstream ostree repositories. Possible value(s): 'latest', 'all', 'custom' --product PRODUCT_NAME Product name to search by --product-id PRODUCT_ID product numeric identifier --publish-via-http ENABLE Publish Via HTTP One of true/false, yes/no, 1/0. --upstream-password UPSTREAM_PASSWORD Password of the upstream repository user used for authentication --upstream-username UPSTREAM_USERNAME Username of the upstream repository user used for authentication --url URL repository source url -h, --help print help """ # Product ID is a required field. if not options or not options.get('product-id'): raise CLIFactoryError('Please provide a valid Product ID.') args = { u'checksum-type': None, u'content-type': u'yum', u'docker-tags-whitelist': None, u'docker-upstream-name': None, u'download-policy': None, u'gpg-key': None, u'gpg-key-id': None, u'ignorable-content': None, u'label': None, u'mirror-on-sync': None, u'name': gen_string('alpha', 15), u'organization': None, u'organization-id': None, u'organization-label': None, u'product': None, u'product-id': None, u'publish-via-http': u'true', u'url': FAKE_1_YUM_REPO, } repo_cls = _entity_with_credentials(credentials, Repository) return create_object(repo_cls, args, options) @cacheable def make_role(options=None): """Usage:: hammer role create [OPTIONS] Options:: --name NAME """ # Assigning default values for attributes args = {u'name': gen_alphanumeric(6)} return create_object(Role, args, options) @cacheable def make_filter(options=None): """ Usage:: hammer filter create [OPTIONS] Options:: --location-ids LOCATION_IDS Comma separated list of values. --locations LOCATION_NAMES Comma separated list of values. --organization-ids ORGANIZATION_IDS Comma separated list of values. --organizations ORGANIZATION_NAMES Comma separated list of values. --override OVERRIDE One of true/false, yes/no, 1/0. --permission-ids PERMISSION_IDS Comma separated list of values. --permissions PERMISSION_NAMES Comma separated list of values. --role ROLE_NAME User role name --role-id ROLE_ID --search SEARCH -h, --help print help """ args = { u'location-ids': None, u'locations': None, u'organization-ids': None, u'organizations': None, u'override': None, u'permission-ids': None, u'permissions': None, u'role': None, u'role-id': None, u'search': None, } # Role and permissions are required fields. if not options: raise CLIFactoryError('Please provide required parameters') # Do we have at least one role field? if not any(options.get(key) for key in ['role', 'role-id']): raise CLIFactoryError('Please provide a valid role field.') # Do we have at least one permissions field? if not any(options.get(key) for key in ['permissions', 'permission-ids']): raise CLIFactoryError('Please provide a valid permissions field.') return create_object(Filter, args, options) @cacheable def make_scap_policy(options=None): """ Usage:: policy create [OPTIONS] Options:: --cron-line CRON_LINE Policy schedule cron line --day-of-month DAY_OF_MONTH Policy schedule day of month (only if period == “monthly”) --deploy-by DEPLOY_BY How the policy should be deployed Possible value(s): 'puppet', 'ansible', 'manual' --description DESCRIPTION Policy description --hostgroup-ids HOSTGROUP_IDS Apply policy to host groups Comma separated Values list of values. containing comma or should be quoted escaped with backslash --hostgroups HOSTGROUP_NAMES Comma separated list of values. Values containing comma should be quoted or escaped with backslash --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. containing comma should be quoted escaped with backslash --locations LOCATION_NAMES Comma separated list of values. containing comma should be quoted escaped with backslash --name NAME Policy name --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. containing comma should be quoted escaped with backslash --organizations ORGANIZATION_NAMES Comma separated list of values. containing comma should be quoted escaped with backslash --period PERIOD Policy schedule period (weekly, monthly, custom) --scap-content SCAP_CONTENT_TITLE SCAP content title --scap-content-id SCAP_CONTENT_ID --scap-content-profile-id SCAP_CONTENT_PROFILE_ID Policy SCAP content profile ID --tailoring-file TAILORING_FILE_NAME Tailoring file name --tailoring-file-id TAILORING_FILE_ID --tailoring-file-profile-id TAILORING_FILE_PROFILE_ID Tailoring file profile ID --weekday WEEKDAY Policy schedule weekday (only if period == “weekly”) -h, --help print help """ # Assigning default values for attributes # SCAP ID and SCAP profile ID is a required field. if not options and not options.get('scap-content-id') and not options.get( 'scap-content-profile-id') and not options.get('period') and not options.get( 'deploy-by'): raise CLIFactoryError('Please provide a valid SCAP ID or' ' SCAP Profile ID or Period or Deploy by option') args = { u'description': None, u'scap-content-id': None, u'scap-content-profile-id': None, u'deploy-by': None, u'period': None, u'weekday': None, u'day-of-month': None, u'cron-line': None, u'hostgroup-ids': None, u'hostgroups': None, u'locations': None, u'organizations': None, u'tailoring-file': None, u'tailoring-file-id': None, u'tailoring-file-profile-id': None, u'location-ids': None, u'name': gen_alphanumeric().lower(), u'organization-ids': None, } return create_object(Scappolicy, args, options) @cacheable def make_subnet(options=None): """ Usage:: hammer subnet create [OPTIONS] Options:: --boot-mode BOOT_MODE Default boot mode for interfaces assigned to this subnet, valid values are "Static", "DHCP" --dhcp-id DHCP_ID DHCP Proxy to use within this subnet --dns-id DNS_ID DNS Proxy to use within this subnet --dns-primary DNS_PRIMARY Primary DNS for this subnet --dns-secondary DNS_SECONDARY Secondary DNS for this subnet --domain-ids DOMAIN_IDS Numerical ID or domain name --domains DOMAIN_NAMES Comma separated list of values. --from FROM Starting IP Address for IP auto suggestion --gateway GATEWAY Primary DNS for this subnet --ipam IPAM IP Address auto suggestion mode for this subnet, valid values are 'DHCP', 'Internal DB', 'None' --location-ids LOCATION_IDS --locations LOCATION_NAMES Comma separated list of values. --mask MASK Netmask for this subnet --name NAME Subnet name --network NETWORK Subnet network --organization-ids ORGANIZATION_IDS organization ID --organizations ORGANIZATION_NAMES Comma separated list of values. --tftp-id TFTP_ID TFTP Proxy to use within this subnet --to TO Ending IP Address for IP auto suggestion --vlanid VLANID VLAN ID for this subnet -h, --help print help """ args = { u'boot-mode': None, u'dhcp-id': None, u'dns-id': None, u'dns-primary': None, u'dns-secondary': None, u'domain-ids': None, u'domains': None, u'from': None, u'gateway': None, u'ipam': None, u'location-ids': None, u'locations': None, u'mask': gen_netmask(), u'name': gen_alphanumeric(8), u'network': gen_ipaddr(ip3=True), u'organization-ids': None, u'organizations': None, u'tftp-id': None, u'to': None, u'vlanid': None, } return create_object(Subnet, args, options) @cacheable def make_sync_plan(options=None): """ Usage:: hammer sync-plan create [OPTIONS] Options:: --description DESCRIPTION sync plan description --enabled ENABLED enables or disables synchronization. One of true/false, yes/no, 1/0. --interval INTERVAL how often synchronization should run. One of 'none', 'hourly', 'daily', 'weekly' 'custom cron'. Default: ""none"" --name NAME sync plan name --organization ORGANIZATION_NAME Organization name to search by --organization-id ORGANIZATION_ID organization ID --organization-label ORGANIZATION_LABEL Organization label to search by --sync-date SYNC_DATE start date and time of the synchronization defaults to now Date and time in YYYY-MM-DD HH:MM:SS or ISO 8601 format Default: "2014-10-07 08:50:35" --cron-expression CRON EXPRESSION Set this when interval is custom cron -h, --help print help """ # Organization ID is a required field. if not options or not options.get('organization-id'): raise CLIFactoryError('Please provide a valid ORG ID.') args = { u'description': gen_string('alpha', 20), u'enabled': 'true', u'interval': random.choice(list(SYNC_INTERVAL.values())), u'name': gen_string('alpha', 20), u'organization': None, u'organization-id': None, u'organization-label': None, u'sync-date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), u'cron-expression': None, } if (options.get('interval', args['interval']) == SYNC_INTERVAL['custom'] and not options.get('cron-expression')): args['cron-expression'] = gen_choice(valid_cron_expressions()) return create_object(SyncPlan, args, options) @cacheable def make_host(options=None): """ Usage:: hammer host create [OPTIONS] Options:: --architecture ARCHITECTURE_NAME Architecture name --architecture-id ARCHITECTURE_ID --ask-root-password ASK_ROOT_PW One of true/false, yes/no, 1/0. --autoheal AUTOHEAL Sets whether the Host will autoheal subscriptions upon checkin One of true/false, yes/no, 1/0. --build BUILD One of true/false, yes/no, 1/0. Default: "true" --comment COMMENT Additional information about this host --compute-attributes COMPUTE_ATTRS Compute resource attributes Comma-separated list of key=value. --compute-profile COMPUTE_PROFILE_NAME Name to search by --compute-profile-id COMPUTE_PROFILE_ID --compute-resource COMPUTE_RESOURCE_NAME Compute resource name --compute-resource-id COMPUTE_RESOURCE_ID --config-group-ids CONFIG_GROUP_IDS IDs of associated config groups. Comma separated list of values --config-groups CONFIG_GROUP_NAMES Comma separated list of values. --content-source-id CONTENT_SOURCE_ID --content-view CONTENT_VIEW_NAME Name to search by --content-view-id CONTENT_VIEW_ID content view numeric identifier --domain DOMAIN_NAME Domain name --domain-id DOMAIN_ID Numerical ID or domain name --enabled ENABLED One of true/false, yes/no, 1/0. Default: "true" --environment ENVIRONMENT_NAME Environment name --environment-id ENVIRONMENT_ID --hostgroup HOSTGROUP_NAME Hostgroup name --hostgroup-id HOSTGROUP_ID --hostgroup-title HOSTGROUP_TITLE Hostgroup title --hypervisor-guest-uuids HYPERVISOR_GUEST_UUIDS List of hypervisor guest uuids Comma separated list of values. --image IMAGE_NAME Name to search by --image-id IMAGE_ID --interface INTERFACE Interface parameters. Comma-separated list of key=value. Can be specified multiple times. --ip IP not required if using a subnet with DHCP Capsule --kickstart-repository-id KICKSTART_REPOSITORY_ID Repository Id associated with the kickstart repo used for provisioning --lifecycle-environment LIFECYCLE_ENVIRONMENT_NAME Name to search by --lifecycle-environment-id LIFECYCLE_ENVIRONMENT_ID ID of the environment --location LOCATION_NAME Location name --location-id LOCATION_ID --mac MAC required for managed host that is bare metal, not required if it's a virtual machine --managed MANAGED One of true/false, yes/no, 1/0. Default: "true" --medium MEDIUM_NAME Medium name --medium-id MEDIUM_ID --model MODEL_NAME Model name --model-id MODEL_ID --name NAME --operatingsystem OPERATINGSYSTEM_TITLE Operating system title --operatingsystem-id OPERATINGSYSTEM_ID --organization ORGANIZATION_NAME Organization name --organization-id ORGANIZATION_ID organization ID --overwrite OVERWRITE One of true/false, yes/no, 1/0. Default: "true" --owner OWNER_LOGIN Login of the owner --owner-id OWNER_ID ID of the owner --owner-type OWNER_TYPE Host's owner type Possible value(s): 'User', 'Usergroup' --parameters PARAMS Host parameters. Comma-separated list of key=value. --partition-table PARTITION_TABLE_NAME Partition table name --partition-table-id PARTITION_TABLE_ID --progress-report-id PROGRESS_REPORT_ID UUID to track orchestration tasks status, GET /api/orchestration/:UUID /tasks --provision-method METHOD Possible value(s): 'build', 'image' --puppet-ca-proxy PUPPET_CA_PROXY_NAME --puppet-ca-proxy-id PUPPET_CA_PROXY_ID --puppet-class-ids PUPPET_CLASS_IDS Comma separated list of values. --puppet-classes PUPPET_CLASS_NAMES Comma separated list of values. --puppet-proxy PUPPET_PROXY_NAME --puppet-proxy-id PUPPET_PROXY_ID --pxe-loader PXE_LOADER DHCP filename option (Grub2/PXELinux by default) Possible value(s): 'None', 'PXELinux BIOS', 'PXELinux UEFI', 'Grub UEFI', 'Grub UEFI SecureBoot', 'Grub2 UEFI', 'Grub2 UEFI SecureBoot' --realm REALM_NAME Name to search by --realm-id REALM_ID Numerical ID or realm name --release-version RELEASE_VERSION Release version for this Host to use (7Server, 7.1, etc) --root-password ROOT_PW required if host is managed and value is not inherited from host group or default password in settings --service-level SERVICE_LEVEL Service level to be used for autoheal. --subnet SUBNET_NAME Subnet name --subnet-id SUBNET_ID --volume VOLUME Volume parameters Comma-separated list of key=value. Can be specified multiple times. Available keys for --interface:: mac ip type Possible values: interface, bmc, bond, bridge name subnet_id domain_id identifier managed true/false primary true/false, each managed hosts needs to have one primary interface. provision true/false virtual true/false """ args = { u'architecture': None, u'architecture-id': None, u'ask-root-password': None, u'autoheal': None, u'build': None, u'comment': None, u'compute-attributes': None, u'compute-profile': None, u'compute-profile-id': None, u'compute-resource': None, u'compute-resource-id': None, u'content-source-id': None, u'content-view': None, u'content-view-id': None, u'domain': None, u'domain-id': None, u'enabled': None, u'environment': None, u'environment-id': None, u'hostgroup': None, u'hostgroup-id': None, u'hostgroup-title': None, u'hypervisor-guest-uuids': None, u'image': None, u'image-id': None, u'interface': None, u'ip': gen_ipaddr(), u'kickstart-repository-id': None, u'lifecycle-environment': None, u'lifecycle-environment-id': None, u'location': None, u'location-id': None, u'mac': gen_mac(multicast=False), u'managed': None, u'medium': None, u'medium-id': None, u'model': None, u'model-id': None, u'name': gen_string('alpha', 10), u'operatingsystem': None, u'operatingsystem-id': None, u'openscap-proxy-id': None, u'organization': None, u'organization-id': None, u'overwrite': None, u'owner': None, u'owner-id': None, u'owner-type': None, u'parameters': None, u'partition-table': None, u'partition-table-id': None, u'progress-report-id': None, u'provision-method': None, u'puppet-ca-proxy': None, u'puppet-ca-proxy-id': None, u'puppet-class-ids': None, u'puppet-classes': None, u'puppet-proxy': None, u'puppet-proxy-id': None, u'pxe-loader': None, u'realm': None, u'realm-id': None, u'root-password': gen_string('alpha', 8), u'service-level': None, u'subnet': None, u'subnet-id': None, u'volume': None, } return create_object(Host, args, options) @cacheable def make_fake_host(options=None): """Wrapper function for make_host to pass all required options for creation of a fake host """ if options is None: options = {} # Try to use default Satellite entities, otherwise create them if they were # not passed or defined previously if not options.get('organization') and not options.get('organization-id'): try: options['organization-id'] = Org.info({'name': DEFAULT_ORG})['id'] except CLIReturnCodeError: options['organization-id'] = make_org()['id'] if not options.get('location') and not options.get('location-id'): try: options['location-id'] = Location.info({'name': DEFAULT_LOC})['id'] except CLIReturnCodeError: options['location-id'] = make_location()['id'] if not options.get('domain') and not options.get('domain-id'): options['domain-id'] = make_domain({ 'location-ids': options.get('location-id'), 'locations': options.get('location'), 'organization-ids': options.get('organization-id'), 'organizations': options.get('organization'), })['id'] if not options.get('architecture') and not options.get('architecture-id'): try: options['architecture-id'] = Architecture.info({ 'name': DEFAULT_ARCHITECTURE})['id'] except CLIReturnCodeError: options['architecture-id'] = make_architecture()['id'] if (not options.get('operatingsystem') and not options.get('operatingsystem-id')): try: options['operatingsystem-id'] = OperatingSys.list({ 'search': 'name="RedHat" AND major="{0}" OR major="{1}"' .format( RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION ) })[0]['id'] except IndexError: options['operatingsystem-id'] = make_os({ 'architecture-ids': options.get('architecture-id'), 'architectures': options.get('architecture'), 'partition-table-ids': options.get('partition-table-id'), 'partition-tables': options.get('partition-table'), })['id'] if (not options.get('partition-table') and not options.get('partition-table-id')): try: options['partition-table-id'] = PartitionTable.list({ 'operatingsystem': options.get('operatingsystem'), 'operatingsystem-id': options.get('operatingsystem-id'), })[0]['id'] except IndexError: options['partition-table-id'] = make_partition_table({ 'location-ids': options.get('location-id'), 'locations': options.get('location'), 'operatingsystem-ids': options.get('operatingsystem-id'), 'organization-ids': options.get('organization-id'), 'organizations': options.get('organization'), })['id'] # Finally, create a new medium (if none was passed) if not options.get('medium') and not options.get('medium-id'): options['medium-id'] = make_medium({ 'location-ids': options.get('location-id'), 'locations': options.get('location'), 'operatingsystems': options.get('operatingsystem'), 'operatingsystem-ids': options.get('operatingsystem-id'), 'organization-ids': options.get('organization-id'), 'organizations': options.get('organization'), })['id'] return make_host(options) @cacheable def make_host_collection(options=None): """ Usage:: host-collection create [OPTIONS] Options:: --description DESCRIPTION --host-collection-ids HOST_COLLECTION_IDS Array of content host ids to replace the content hosts in host collection Comma separated list of values --hosts HOST_NAMES Comma separated list of values --max-hosts MAX_CONTENT_HOSTS Maximum number of content hosts in the host collection --name NAME Host Collection name --organization ORGANIZATION_NAME --organization-id ORGANIZATION_ID Organization identifier --organization-label ORGANIZATION_LABEL --unlimited-hosts UNLIMITED_CONTENT_HOSTS Whether or not the host collection may have unlimited content hosts One of true/false, yes/no, 1/0. -h, --help print help """ # Assigning default values for attributes args = { u'description': None, u'host-collection-ids': None, u'hosts': None, u'max-hosts': None, u'name': gen_string('alpha', 15), u'organization': None, u'organization-id': None, u'organization-label': None, u'unlimited-hosts': None, } return create_object(HostCollection, args, options) @cacheable def make_job_invocation(options=None): """ Usage:: hammer job-invocation create Options:: --async Do not wait for the task --bookmark BOOKMARK_NAME Name to search by --bookmark-id BOOKMARK_ID --concurrency-level CONCURRENCY_LEVEL Run at most N tasks at a time --cron-line CRONLINE Create a recurring execution --description-format DESCRIPTION_FORMAT Override the description format from the template for this invocation only --dynamic Dynamic search queries are evaluated at run time --effective-user EFFECTIVE_USER What user should be used to run the script (using sudo-like mechanisms). --end-time DATETIME Perform no more executions after this time, used with --cron-line (YYYY-MM-DD HH:MM:SS or ISO 8601 format) --input-files INPUT FILES Read input values from files Comma-separated list of key=file, where file is a path to a text file --inputs INPUTS Inputs from command line Comma-separated list of key=value. --job-template JOB_TEMPLATE_NAME Name to search by --job-template-id JOB_TEMPLATE_ID --max-iteration MAX_ITERATION Repeat a maximum of N times --search-query SEARCH_QUERY --start-at DATETIME Schedule the execution for a later time in YYYY-MM-DD HH:MM:SS or ISO 8601 --start-before DATETIME Execution should be cancelled if it cannot be started before specified datetime --time-span TIME_SPAN Distribute tasks over N seconds """ args = { u'async': None, u'bookmark': None, u'bookmark-id': None, u'concurrency-level': None, u'cron-line': None, u'description-format': None, u'dynamic': None, u'effective-user': None, u'end-time': None, u'input-files': None, u'inputs': None, u'job-template': None, u'job-template-id': None, u'max-iteration': None, u'search-query': None, u'start-at': None, u'start-before': None, u'time-span': None, } return create_object(JobInvocation, args, options) @cacheable def make_job_template(options=None): """ Usage:: hammer job-template create Options:: --audit-comment AUDIT_COMMENT --current-user CURRENT_USER Whether the current user login should be used as the effective user. --description-format DESCRIPTION_FORMAT This template is used to generate the description. --file TEMPLATE Path to a file that contains the template. --job-category JOB_CATEGORY Job category. --location-ids LOCATION_IDS Comma separated list of values. --locations LOCATION_NAMES Comma separated list of values. --locked LOCKED Whether or not the template is locked for editing. --name NAME Template name --organization-ids ORGANIZATION_IDS Comma separated list of values. --organizations ORGANIZATION_NAMES Comma separated list of values. --overridable OVERRIDABLE Whether it should be allowed to override the effective user from the invocation form. --provider-type PROVIDER_TYPE Possible value(s): 'SSH' --snippet SNIPPET One of true/false, yes/no, 1/0. --value VALUE What user should be used to run the script (using sudo-like mechanisms). """ args = { u'audit-comment': None, u'current-user': None, u'description-format': None, u'file': None, u'job-category': u'Miscellaneous', u'location-ids': None, u'locations': None, u'name': None, u'organization-ids': None, u'organizations': None, u'overridable': None, u'provider-type': u'SSH', u'snippet': None, u'value': None, } return create_object(JobTemplate, args, options) @cacheable def make_user(options=None): """ Usage:: hammer user create [OPTIONS] Options:: --admin ADMIN Is an admin account? --auth-source-id AUTH_SOURCE_ID --default-location-id DEFAULT_LOCATION_ID --default-organization-id DEFAULT_ORGANIZATION_ID --description DESCRIPTION --firstname FIRSTNAME --lastname LASTNAME --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. --login LOGIN --mail MAIL --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. --password PASSWORD -h, --help print help """ login = gen_alphanumeric(6) # Assigning default values for attributes args = { u'admin': None, u'auth-source-id': 1, u'default-location-id': None, u'default-organization-id': None, u'description': None, u'firstname': gen_alphanumeric(), u'lastname': gen_alphanumeric(), u'location-ids': None, u'login': login, u'mail': '{0}@example.com'.format(login), u'organization-ids': None, u'password': gen_alphanumeric(), } logger.debug( 'User "{0}" password not provided {1} was generated' .format(args['login'], args['password']) ) return create_object(User, args, options) @cacheable def make_usergroup(options=None): """ Usage: hammer user-group create [OPTIONS] Options: --name NAME --role-ids ROLE_IDS Comma separated list --roles ROLE_NAMES Comma separated list --user-group-ids, --usergroup-ids USER_GROUP_IDS Comma separated list --user-groups, --usergroups USER_GROUP_NAMES Comma separated list --user-ids USER_IDS Comma separated list --users USER_LOGINS Comma separated list """ # Assigning default values for attributes args = { u'name': gen_alphanumeric(), u'role-ids': None, u'roles': None, u'user-group-ids': None, u'user-groups': None, u'user-ids': None, u'users': None, } return create_object(UserGroup, args, options) @cacheable def make_usergroup_external(options=None): """ Usage:: hammer user-group external create [OPTIONS] Options:: --auth-source-id AUTH_SOURCE_ID ID of linked auth source --name NAME External user group name --user-group, --usergroup USER_GROUP_NAME Name to search by --user-group-id, --usergroup-id USER_GROUP_ID """ # UserGroup Name or ID is a required field. if ( not options or not options.get('user-group') and not options.get('user-group-id') ): raise CLIFactoryError('Please provide a valid UserGroup.') # Assigning default values for attributes args = { u'auth-source-id': 1, u'name': gen_alphanumeric(8), u'user-group': None, u'user-group-id': None, } return create_object(UserGroupExternal, args, options) @cacheable def make_ldap_auth_source(options=None): """ Usage:: hammer auth-source ldap create [OPTIONS] Options:: --account ACCOUNT --account-password ACCOUNT_PASSWORD required if onthefly_register is true --attr-firstname ATTR_FIRSTNAME required if onthefly_register is true --attr-lastname ATTR_LASTNAME required if onthefly_register is true --attr-login ATTR_LOGIN required if onthefly_register is true --attr-mail ATTR_MAIL required if onthefly_register is true --attr-photo ATTR_PHOTO --base-dn BASE_DN --groups-base GROUPS_BASE groups base DN --host HOST --ldap-filter LDAP_FILTER LDAP filter --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. Values containing comma should be double quoted --locations LOCATION_NAMES Comma separated list of values. Values containing comma should be double quoted --name NAME --onthefly-register ONTHEFLY_REGISTER One of true/false, yes/no, 1/0. --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. Values containing comma should be double quoted --organizations ORGANIZATION_NAMES Comma separated list of values. Values containing comma should be double quoted --port PORT defaults to 389 --server-type SERVER_TYPE type of the LDAP server Possible value(s): 'free_ipa', 'active_directory', 'posix' --tls TLS One of true/false, yes/no, 1/0. --usergroup-sync USERGROUP_SYNC sync external user groups on login One of true/false, yes/no, 1/0. -h, --help print help """ # Assigning default values for attributes args = { u'account': None, u'account-password': None, u'attr-firstname': None, u'attr-lastname': None, u'attr-login': None, u'attr-mail': None, u'attr-photo': None, u'base-dn': None, u'groups-base': None, u'host': None, u'ldap-filter': None, u'location-ids': None, u'locations': None, u'name': gen_alphanumeric(), u'onthefly-register': None, u'organization-ids': None, u'organizations': None, u'port': None, u'server-type': None, u'tls': None, u'usergroup-sync': None, } return create_object(LDAPAuthSource, args, options) @cacheable def make_compute_resource(options=None): """ Usage:: hammer compute-resource create [OPTIONS] Options:: --caching-enabled CACHING_ENABLED Enable caching, for VMware only One of true/false, yes/no, 1/0. --datacenter DATACENTER For RHEV, VMware Datacenter --description DESCRIPTION --display-type DISPLAY_TYPE For Libvirt only Possible value(s): 'VNC', 'SPICE' --domain DOMAIN For RHEL OpenStack Platform (v3) only --location LOCATION_NAME Location name --location-id LOCATION_ID --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. Values containing comma should be quoted or escaped with backslash. JSON is acceptable and preferred way for complex parameters --location-title LOCATION_TITLE Location title --location-titles LOCATION_TITLES Comma separated list of values. Values containing comma should be quoted or escaped with backslash. JSON is acceptable and preferred way for complex parameters --locations LOCATION_NAMES Comma separated list of values. Values containing comma should be quoted or escaped with backslash. JSON is acceptable and preferred way for complex parameters --name NAME --organization ORGANIZATION_NAME Organization name --organization-id ORGANIZATION_ID Organization ID --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. Values containing comma should be quoted or escaped with backslash. JSON is acceptable and preferred way for complex parameters --organization-title ORGANIZATION_TITLE Organization title --organization-titles ORGANIZATION_TITLES Comma separated list of values. Values containing comma should be quoted or escaped with backslash. JSON is acceptable and preferred way for complex parameters --organizations ORGANIZATION_NAMES Comma separated list of values. Values containing comma should be quoted or escaped with backslash. JSON is acceptable and preferred way for complex parameters --ovirt-quota OVIRT_QUOTA For RHEV only, ID of quota to use --password PASSWORD Password for RHEV, EC2, VMware, RHEL OpenStack Platform. Secret key for EC2 --project-domain-id PROJECT_DOMAIN_ID For RHEL OpenStack Platform (v3) only --project-domain-name PROJECT_DOMAIN_NAME For RHEL OpenStack Platform (v3) only --provider PROVIDER Providers include Libvirt, Ovirt, EC2, Vmware, Openstack, Rackspace, GCE --public-key PUBLIC_KEY For RHEV only --public-key-path PUBLIC_KEY_PATH Path to a file that contains oVirt public key (For oVirt only) --region REGION For EC2 only, use 'us-gov-west-1' for GovCloud region --server SERVER For VMware --set-console-password SET_CONSOLE_PASSWORD For Libvirt and VMware only One of true/false, yes/no, 1/0. --tenant TENANT For RHEL OpenStack Platform only --url URL URL for Libvirt, RHEV, RHEL OpenStack Platform and Rackspace --use-v4 USE_V4 For RHEV only One of true/false, yes/no, 1/0. --user USER Username for RHEV, EC2, VMware, RHEL OpenStack Platform. Access Key for EC2. --uuid UUID Deprecated, please use datacenter -h, --help Print help """ args = { u'caching-enabled': None, u'datacenter': None, u'description': None, u'display-type': None, u'domain': None, u'location': None, u'location-id': None, u'location-ids': None, u'location-title': None, u'location-titles': None, u'locations': None, u'name': gen_alphanumeric(8), u'organization': None, u'organization-id': None, u'organization-ids': None, u'organization-title': None, u'organization-titles': None, u'organizations': None, u'ovirt-quota': None, u'password': None, u'project-domain-id': None, u'project-domain-name': None, u'provider': None, u'public-key': None, u'public-key-path': None, u'region': None, u'server': None, u'set-console-password': None, u'tenant': None, u'url': None, u'use-v4': None, u'user': None, u'uuid': None, } if options is None: options = {} if options.get('provider') is None: options['provider'] = FOREMAN_PROVIDERS['libvirt'] if options.get('url') is None: options['url'] = 'qemu+tcp://localhost:16509/system' return create_object(ComputeResource, args, options) @cacheable def make_org(options=None): return make_org_with_credentials(options) def make_org_with_credentials(options=None, credentials=None): """ Usage:: hammer organization create [OPTIONS] Options:: --compute-resource-ids COMPUTE_RESOURCE_IDS Compute resource IDs Comma separated list of values. --compute-resources COMPUTE_RESOURCE_NAMES Compute resource Names Comma separated list of values. --config-template-ids CONFIG_TEMPLATE_IDS Provisioning template IDs Comma separated list of values. --config-templates CONFIG_TEMPLATE_NAMES Provisioning template Names Comma separated list of values. --description DESCRIPTION description --domain-ids DOMAIN_IDS Domain IDs Comma separated list of values. --environment-ids ENVIRONMENT_IDS Environment IDs Comma separated list of values. --environments ENVIRONMENT_NAMES Environment Names Comma separated list of values. --hostgroup-ids HOSTGROUP_IDS Host group IDs Comma separated list of values. --hostgroups HOSTGROUP_NAMES Host group Names Comma separated list of values. --label LABEL unique label --media MEDIUM_NAMES Media Names Comma separated list of values. --media-ids MEDIA_IDS Media IDs Comma separated list of values. --name NAME name --realms REALM_NAMES Realm Names Comma separated list of values. --realm-ids REALM_IDS Realm IDs Comma separated list of values. --smart-proxies SMART_PROXY_NAMES Smart proxy Names Comma separated list of values. --smart-proxy-ids SMART_PROXY_IDS Smart proxy IDs Comma separated list of values. --subnet-ids SUBNET_IDS Subnet IDs Comma separated list of values. --subnets SUBNET_NAMES Subnet Names Comma separated list of values. --user-ids USER_IDS User IDs Comma separated list of values. --users USER_NAMES User Names Comma separated list of values. -h, --help print help """ # Assigning default values for attributes args = { u'compute-resource-ids': None, u'compute-resources': None, u'config-template-ids': None, u'config-templates': None, u'description': None, u'domain-ids': None, u'environment-ids': None, u'environments': None, u'hostgroup-ids': None, u'hostgroups': None, u'label': None, u'media-ids': None, u'media': None, u'name': gen_alphanumeric(6), u'realm-ids': None, u'realms': None, u'smart-proxy-ids': None, u'smart-proxies': None, u'subnet-ids': None, u'subnets': None, u'user-ids': None, u'users': None, } org_cls = _entity_with_credentials(credentials, Org) return create_object(org_cls, args, options) @cacheable def make_realm(options=None): """ Usage:: hammer realm create [OPTIONS] Options:: --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. Values containing comma should be double quoted --locations LOCATION_NAMES Comma separated list of values. Values containing comma should be double quoted --name NAME The realm name, e.g. EXAMPLE.COM --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. Values containing comma should be double quoted --organizations ORGANIZATION_NAMES Comma separated list of values. Values containing comma should be double quoted --realm-proxy-id REALM_PROXY_ID Capsule ID to use within this realm --realm-type REALM_TYPE Realm type, e.g. Red Hat Identity Management or Active Directory -h, --help print help """ # Assigning default values for attributes args = { u'location-ids': None, u'locations': None, u'name': gen_alphanumeric(6), u'organization-ids': None, u'organizations': None, u'realm-proxy-id': None, u'realm-type': None, } return create_object(Realm, args, options) @cacheable def make_os(options=None): """ Usage:: hammer os create [OPTIONS] Options:: --architecture-ids ARCHITECTURE_IDS IDs of associated architectures. Comma separated list of values. --architectures ARCHITECTURE_NAMES Comma separated list of values. --config-template-ids CONFIG_TEMPLATE_IDS IDs of associated provisioning templates. Comma separated list of values. --config-templates CONFIG_TEMPLATE_NAMES Comma separated list of values. --description DESCRIPTION --family FAMILY --major MAJOR --media MEDIUM_NAMES Comma separated list of values. --medium-ids MEDIUM_IDS IDs of associated media. Comma separated list of values. --minor MINOR --name NAME --partition-table-ids PARTITION_TABLE_IDS IDs of associated partition tables. Comma separated list of values. --partition-tables PARTITION_TABLE_NAMES Comma separated list of values. --password-hash PASSWORD_HASH Root password hash function to use, one of MD5, SHA256, SHA512 --release-name RELEASE_NAME -h, --help print help """ # Assigning default values for attributes args = { u'architecture-ids': None, u'architectures': None, u'config-template-ids': None, u'config-templates': None, u'description': None, u'family': None, u'major': random.randint(0, 10), u'media': None, u'medium-ids': None, u'minor': random.randint(0, 10), u'name': gen_alphanumeric(6), u'partition-table-ids': None, u'partition-tables': None, u'password-hash': None, u'release-name': None, } return create_object(OperatingSys, args, options) @cacheable def make_scapcontent(options=None): """ Usage:: scap-content create [OPTIONS] Options:: --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. Values containing comma should be double quoted --locations LOCATION_NAMES Comma separated list of values. Values containing comma should be double quoted --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. Values containing comma should be double quoted --organizations ORGANIZATION_NAMES Comma separated list of values. Values containing comma should be double quoted --original-filename ORIGINAL_FILENAME Original file name of the XML file --scap-file SCAP_FILE Scap content file --title TITLE SCAP content name -h, --help print help """ # Assigning default values for attributes args = { u'scap-file': None, u'original-filename': None, u'location-ids': None, u'locations': None, u'title': gen_alphanumeric().lower(), u'organization-ids': None, u'organizations': None, } return create_object(Scapcontent, args, options) @cacheable def make_domain(options=None): """ Usage:: hammer domain create [OPTIONS] Options:: --description DESC Full name describing the domain --dns-id DNS_ID DNS Proxy to use within this domain --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. --locations LOCATION_NAMES Comma separated list of values. --name NAME The full DNS Domain name --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. --organizations ORGANIZATION_NAMES Comma separated list of values. -h, --help print help """ # Assigning default values for attributes args = { u'description': None, u'dns-id': None, u'location-ids': None, u'locations': None, u'name': gen_alphanumeric().lower(), u'organization-ids': None, u'organizations': None, } return create_object(Domain, args, options) @cacheable def make_hostgroup(options=None): """ Usage:: hammer hostgroup create [OPTIONS] Options:: --architecture ARCHITECTURE_NAME Architecture name --architecture-id ARCHITECTURE_ID --ask-root-pass ASK_ROOT_PW One of true/false, yes/no, 1/0. --compute-profile COMPUTE_PROFILE_NAME Name to search by --compute-profile-id COMPUTE_PROFILE_ID --config-group-ids CONFIG_GROUP_IDS IDs of associated config groups --config-groups CONFIG_GROUP_NAMES --content-source-id CONTENT_SOURCE_ID --content-view CONTENT_VIEW_NAME Name to search by --content-view-id CONTENT_VIEW_ID content view numeric identifier --domain DOMAIN_NAME Domain name --domain-id DOMAIN_ID Numerical ID or domain name --environment ENVIRONMENT_NAME Environment name --environment-id ENVIRONMENT_ID --group-parameters-attributes GROUP_PARAMETERS_ATTRIBUTES Array of parameters --kickstart-repository-id KICKSTART_REPOSITORY_ID Kickstart repository ID --lifecycle-environment LIFECYCLE_ENVIRONMENT_NAME Name to search by --lifecycle-environment-id LIFECYCLE_ENVIRONMENT_ID ID of the environment --locations LOCATION_NAMES Comma separated list of values --location-titles LOCATION_TITLES --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. --medium MEDIUM_NAME Medium name --medium-id MEDIUM_ID --name NAME --openscap-proxy-id OPENSCAP_PROXY_ID ID of OpenSCAP Capsule --operatingsystem OPERATINGSYSTEM_TITLE Operating system title --operatingsystem-id OPERATINGSYSTEM_ID --organizations ORGANIZATION_NAMES Comma separated list of values --organization-titles ORGANIZATION_TITLES --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. --parent PARENT_NAME Name of parent hostgroup --parent-id PARENT_ID --partition-table PTABLE_NAME Partition table name --partition-table-id PTABLE_ID --puppet-ca-proxy PUPPET_CA_PROXY_NAME Name of puppet CA proxy --puppet-ca-proxy-id PUPPET_CA_PROXY_ID --puppet-class-ids PUPPETCLASS_IDS List of puppetclass ids Comma separated list of values. --puppet-classes PUPPET_CLASS_NAMES Comma separated list of values. --puppet-proxy PUPPET_CA_PROXY_NAME Name of puppet proxy --puppet-proxy-id PUPPET_PROXY_ID --pxe-loader PXE_LOADER DHCP filename option ( Grub2/PXELinux by default) --query-organization ORGANIZATION_NAME Organization name to search by --query-organization-id ORGANIZATION_ID Organization ID to search by --query-organization-label ORGANIZATION_LABEL Organization label to search by --realm REALM_NAME Name to search by --realm-id REALM_ID Numerical ID or realm name --root-pass ROOT_PASSWORD --subnet SUBNET_NAME Subnet name --subnet-id SUBNET_ID -h, --help print help """ # Assigning default values for attributes args = { u'architecture': None, u'architecture-id': None, u'compute-profile': None, u'compute-profile-id': None, u'config-group-ids': None, u'config-groups': None, u'content-source-id': None, u'content-source': None, u'content-view': None, u'content-view-id': None, u'domain': None, u'domain-id': None, u'environment': None, u'puppet-environment': None, u'environment-id': None, u'puppet-environment-id': None, u'locations': None, u'location-ids': None, u'kickstart-repository-id': None, u'lifecycle-environment': None, u'lifecycle-environment-id': None, u'lifecycle-environment-organization-id': None, u'medium': None, u'medium-id': None, u'name': gen_alphanumeric(6), u'operatingsystem': None, u'operatingsystem-id': None, u'organizations': None, u'organization-titles': None, u'organization-ids': None, u'parent': None, u'parent-id': None, u'partition-table': None, u'partition-table-id': None, u'puppet-ca-proxy': None, u'puppet-ca-proxy-id': None, u'puppet-class-ids': None, u'puppet-classes': None, u'puppet-proxy': None, u'puppet-proxy-id': None, u'pxe-loader': None, u'query-organization': None, u'query-organization-id': None, u'query-organization-label': None, u'realm': None, u'realm-id': None, u'subnet': None, u'subnet-id': None, } return create_object(HostGroup, args, options) @cacheable def make_medium(options=None): """ Usage:: hammer medium create [OPTIONS] Options:: --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. --locations LOCATION_NAMES Comma separated list of values. --name NAME Name of media --operatingsystem-ids OPERATINGSYSTEM_IDS REPLACE organizations with given ids. Comma separated list of values. --operatingsystems OPERATINGSYSTEM_TITLES Comma separated list of values. --organization-ids ORGANIZATION_IDS Comma separated list of values. --organizations ORGANIZATION_NAMES Comma separated list of values. --os-family OS_FAMILY The family that the operating system belongs to. Available families: Archlinux Debian Gentoo Redhat Solaris Suse Windows --path PATH The path to the medium, can be a URL or a valid NFS server (exclusive of the architecture) for example http://mirror.centos.org/centos/ $version/os/$arch where $arch will be substituted for the host’s actual OS architecture and $version, $major and $minor will be substituted for the version of the operating system. Solaris and Debian media may also use $release. -h, --help print help """ # Assigning default values for attributes args = { u'location-ids': None, u'locations': None, u'name': gen_alphanumeric(6), u'operatingsystem-ids': None, u'operatingsystems': None, u'organization-ids': None, u'organizations': None, u'os-family': None, u'path': 'http://{0}'.format((gen_string('alpha', 6))), } return create_object(Medium, args, options) @cacheable def make_environment(options=None): """ Usage:: hammer environment create [OPTIONS] Options:: --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. --locations LOCATION_NAMES Comma separated list of values. --name NAME --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. --organizations ORGANIZATION_NAMES Comma separated list of values. """ # Assigning default values for attributes args = { u'location-ids': None, u'locations': None, u'name': gen_alphanumeric(6), u'organization-ids': None, u'organizations': None, } return create_object(Environment, args, options) @cacheable def make_lifecycle_environment(options=None): """ Usage:: hammer lifecycle-environment create [OPTIONS] Options:: --description DESCRIPTION description of the environment --label LABEL label of the environment --name NAME name of the environment --organization ORGANIZATION_NAME Organization name to search by --organization-id ORGANIZATION_ID organization ID --organization-label ORGANIZATION_LABEL Organization label to search by --prior PRIOR Name of an environment that is prior to the new environment in the chain. It has to be either ‘Library’ or an environment at the end of a chain. --registry-name-pattern REGISTRY_NAME_PATTERN Pattern for container image names --registry-unauthenticated-pull REGISTRY_UNAUTHENTICATED_PULL Allow unauthenticed pull of container images -h, --help print help """ # Organization Name, Label or ID is a required field. if ( not options or 'organization' not in options and 'organization-label' not in options and 'organization-id' not in options): raise CLIFactoryError('Please provide a valid Organization.') if not options.get('prior'): options['prior'] = 'Library' # Assigning default values for attributes args = { u'description': None, u'label': None, u'name': gen_alphanumeric(6), u'organization': None, u'organization-id': None, u'organization-label': None, u'prior': None, u'registry-name-pattern': None, u'registry-unauthenticated-pull': None, } return create_object(LifecycleEnvironment, args, options) @cacheable def make_tailoringfile(options=None): """ Usage:: tailoring-file create [OPTIONS] Options:: --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. Values containing comma should be double quoted. --locations LOCATION_NAMES Comma separated list of values. Values containing comma should be double quoted --name NAME Tailoring file name --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. Values containing comma should be double quoted --organizations ORGANIZATION_NAMES Comma separated list of values. Values containing comma should be double quoted --original-filename ORIGINAL_FILENAME Original file name of the XML file --scap-file SCAP_FILE Tailoring file content -h, --help print help """ # Assigning default values for attributes args = { u'scap-file': None, u'original-filename': None, u'location-ids': None, u'locations': None, u'name': gen_alphanumeric().lower(), u'organization-ids': None, u'organizations': None, } return create_object(TailoringFiles, args, options) @cacheable def make_template(options=None): """ Usage:: hammer template create [OPTIONS] Options:: --audit-comment AUDIT_COMMENT --file TEMPLATE Path to a file that contains the template --location-ids LOCATION_IDS REPLACE locations with given ids Comma separated list of values. --locked LOCKED Whether or not the template is locked for editing One of true/false, yes/no, 1/0. --name NAME template name --operatingsystem-ids OPERATINGSYSTEM_IDS Array of operating systems ID to associate the template with Comma separated list of values. --organization-ids ORGANIZATION_IDS REPLACE organizations with given ids. Comma separated list of values. --type TYPE Template type. Eg. snippet, script, provision -h, --help print help """ # Assigning default values for attribute args = { u'audit-comment': None, u'file': '/tmp/{0}'.format(gen_alphanumeric()), u'location-ids': None, u'locked': None, u'name': gen_alphanumeric(6), u'operatingsystem-ids': None, u'organization-ids': None, u'type': random.choice(TEMPLATE_TYPES), } # Write content to file or random text if options is not None and 'content' in options.keys(): content = options.pop('content') else: content = gen_alphanumeric() # Special handling for template factory (_, layout) = mkstemp(text=True) chmod(layout, 0o700) with open(layout, 'w') as ptable: ptable.write(content) # Upload file to server ssh.upload_file(local_file=layout, remote_file=args['file']) # End - Special handling for template factory return create_object(Template, args, options) @cacheable def make_smart_variable(options=None): """ Usage:: hammer smart-variable create [OPTIONS] Options:: --avoid-duplicates AVOID_DUPLICATES Remove duplicate values ( only array type) One of true/false, yes/no, 1/0. --default-value DEFAULT_VALUE Default value of variable --description DESCRIPTION Description of variable --hidden-value HIDDEN_VALUE When enabled the parameter is hidden in the UI One of true/false, yes/no, 1/0. --merge-default MERGE_DEFAULT Include default value when merging all matching values One of true/false, yes/no, 1/0. --merge-overrides MERGE_OVERRIDES Merge all matching values( only array/hash type) One of true/false, yes/no, 1/0. --override-value-order OVERRIDE_VALUE_ORDER The order in which values are resolved --puppet-class PUPPET_CLASS_NAME Puppet class name --puppet-class-id PUPPET_CLASS_ID ID of Puppet class --validator-rule VALIDATOR_RULE Used to enforce certain values for the parameter values --validator-type VALIDATOR_TYPE Type of the validator. Possible value(s): 'regexp', 'list', '' --variable VARIABLE Name of variable --variable-type VARIABLE_TYPE Type of the variable. Possible value(s): 'string', 'boolean', 'integer', 'real', 'array', 'hash', 'yaml', 'json' -h, --help print help """ # Puppet class name or ID is a required field. if ( not options or 'puppet-class' not in options and 'puppet-class-id' not in options): raise CLIFactoryError('Please provide a valid Puppet class') # Assigning default values for attributes args = { u'avoid-duplicates': None, u'default-value': None, u'description': None, u'hidden-value': None, u'merge-default': None, u'merge-overrides': None, u'override-value-order': None, u'puppet-class': None, u'puppet-class-id': None, u'validator-rule': None, u'validator-type': None, u'variable': gen_alphanumeric(), u'variable-type': None, } return create_object(SmartVariable, args, options) @cacheable def make_virt_who_config(options=None): """ Usage:: hammer virt-who-config create [OPTIONS] Options:: --blacklist BLACKLIST Hypervisor blacklist, applicable only when filtering mode is set to 2. Wildcards and regular expressions are supported, multiple records must be separated by comma. --debug DEBUG Enable debugging output One of true/false, yes/no, 1/0. --filtering-mode MODE Hypervisor filtering mode Possible value(s): 'none', 'whitelist', 'blacklist' --hypervisor-id HYPERVISOR_ID Specifies how the hypervisor will be identified. Possible value(s): 'hostname', 'uuid', 'hwuuid' --hypervisor-password HYPERVISOR_PASSWORD Hypervisor password, required for all hypervisor types except for libvirt --hypervisor-server HYPERVISOR_SERVER Fully qualified host name or IP address of the hypervisor --hypervisor-type HYPERVISOR_TYPE Hypervisor type Possible value(s): 'esx', 'rhevm', 'hyperv', 'xen', 'libvirt' --hypervisor-username HYPERVISOR_USERNAME Account name by which virt-who is to connect to the hypervisor. --interval INTERVAL Configuration interval in minutes Possible value(s): '60', '120', '240', '480', '720' --name NAME Configuration name --no-proxy NO_PROXY Ignore Proxy. A comma-separated list of hostnames or domains or ip addresses to ignore proxy settings for. Optionally this may be set to * to bypass proxy settings for all hostnames domains or ip addresses. --organization ORGANIZATION_NAME Organization name --organization-id ORGANIZATION_ID organization ID --organization-title ORGANIZATION_TITLE Organization title --proxy PROXY HTTP Proxy that should be used for communication between the server on which virt-who is running and the hypervisors and virtualization managers. --satellite-url SATELLITE_URL Satellite server FQDN --whitelist WHITELIST Hypervisor whitelist, applicable only when filtering mode is set to 1. Wildcards and regular expressions are supported, multiple records must be separated by comma. -h, --help print help """ args = { u'blacklist': None, u'debug': None, u'filtering-mode': 'none', u'hypervisor-id': 'hostname', u'hypervisor-password': None, u'hypervisor-server': None, u'hypervisor-type': None, u'hypervisor-username': None, u'interval': '60', u'name': gen_alphanumeric(6), u'no-proxy': None, u'organization': None, u'organization-id': None, u'organization-title': None, u'proxy': None, u'satellite-url': settings.server.hostname, u'whitelist': None } return create_object(VirtWhoConfig, args, options) def activationkey_add_subscription_to_repo(options=None): """ Adds subscription to activation key. Args:: organization-id - ID of organization activationkey-id - ID of activation key subscription - subscription name """ if( not options or not options.get('organization-id') or not options.get('activationkey-id') or not options.get('subscription')): raise CLIFactoryError( 'Please provide valid organization, activation key and ' 'subscription.' ) # List the subscriptions in given org subscriptions = Subscription.list( {u'organization-id': options['organization-id']}, per_page=False ) # Add subscription to activation-key if options['subscription'] not in (sub['name'] for sub in subscriptions): raise CLIFactoryError( u'Subscription {0} not found in the given org' .format(options['subscription']) ) for subscription in subscriptions: if subscription['name'] == options['subscription']: if ( subscription['quantity'] != 'Unlimited' and int(subscription['quantity']) == 0): raise CLIFactoryError( 'All the subscriptions are already consumed') try: ActivationKey.add_subscription({ u'id': options['activationkey-id'], u'subscription-id': subscription['id'], u'quantity': 1, }) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to add subscription to activation key\n{0}' .format(err.msg) ) def setup_org_for_a_custom_repo(options=None): """Sets up Org for the given custom repo by: 1. Checks if organization and lifecycle environment were given, otherwise creates new ones. 2. Creates a new product with the custom repo. Synchronizes the repo. 3. Checks if content view was given, otherwise creates a new one and - adds the RH repo - publishes - promotes to the lifecycle environment 4. Checks if activation key was given, otherwise creates a new one and associates it with the content view. 5. Adds the custom repo subscription to the activation key Options:: url - URL to custom repository organization-id (optional) - ID of organization to use (or create a new one if empty) lifecycle-environment-id (optional) - ID of lifecycle environment to use (or create a new one if empty) content-view-id (optional) - ID of content view to use (or create a new one if empty) activationkey-id (optional) - ID of activation key (or create a new one if empty) :return: A dictionary with the entity ids of Activation key, Content view, Lifecycle Environment, Organization, Product and Repository """ if( not options or not options.get('url')): raise CLIFactoryError('Please provide valid custom repo URL.') # Create new organization and lifecycle environment if needed if options.get('organization-id') is None: org_id = make_org()['id'] else: org_id = options['organization-id'] if options.get('lifecycle-environment-id') is None: env_id = make_lifecycle_environment({u'organization-id': org_id})['id'] else: env_id = options['lifecycle-environment-id'] # Create custom product and repository custom_product = make_product({u'organization-id': org_id}) custom_repo = make_repository({ u'content-type': 'yum', u'product-id': custom_product['id'], u'url': options.get('url'), }) # Synchronize custom repository try: Repository.synchronize({'id': custom_repo['id']}) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to synchronize repository\n{0}'.format(err.msg)) # Create CV if needed and associate repo with it if options.get('content-view-id') is None: cv_id = make_content_view({u'organization-id': org_id})['id'] else: cv_id = options['content-view-id'] try: ContentView.add_repository({ u'id': cv_id, u'organization-id': org_id, u'repository-id': custom_repo['id'], }) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to add repository to content view\n{0}'.format(err.msg)) # Publish a new version of CV try: ContentView.publish({u'id': cv_id}) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to publish new version of content view\n{0}' .format(err.msg) ) # Get the version id cvv = ContentView.info({u'id': cv_id})['versions'][-1] # Promote version to next env try: ContentView.version_promote({ u'id': cvv['id'], u'organization-id': org_id, u'to-lifecycle-environment-id': env_id, }) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to promote version to next environment\n{0}' .format(err.msg) ) # Create activation key if needed and associate content view with it if options.get('activationkey-id') is None: activationkey_id = make_activation_key({ u'content-view-id': cv_id, u'lifecycle-environment-id': env_id, u'organization-id': org_id, })['id'] else: activationkey_id = options['activationkey-id'] # Given activation key may have no (or different) CV associated. # Associate activation key with CV just to be sure try: ActivationKey.update({ u'content-view-id': cv_id, u'id': activationkey_id, u'organization-id': org_id, }) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to associate activation-key with CV\n{0}' .format(err.msg) ) # Add subscription to activation-key activationkey_add_subscription_to_repo({ u'activationkey-id': activationkey_id, u'organization-id': org_id, u'subscription': custom_product['name'], }) return { u'activationkey-id': activationkey_id, u'content-view-id': cv_id, u'lifecycle-environment-id': env_id, u'organization-id': org_id, u'product-id': custom_product['id'], u'repository-id': custom_repo['id'], } def _setup_org_for_a_rh_repo(options=None): """Sets up Org for the given Red Hat repository by: 1. Checks if organization and lifecycle environment were given, otherwise creates new ones. 2. Clones and uploads manifest. 3. Enables RH repo and synchronizes it. 4. Checks if content view was given, otherwise creates a new one and - adds the RH repo - publishes - promotes to the lifecycle environment 5. Checks if activation key was given, otherwise creates a new one and associates it with the content view. 6. Adds the RH repo subscription to the activation key Note that in most cases you should use ``setup_org_for_a_rh_repo`` instead as it's more flexible. Options:: product - RH product name repository-set - RH repository set name repository - RH repository name releasever (optional) - Repository set release version, don't specify it if enabling the Satellite 6 Tools repo. organization-id (optional) - ID of organization to use (or create a new one if empty) lifecycle-environment-id (optional) - ID of lifecycle environment to use (or create a new one if empty) content-view-id (optional) - ID of content view to use (or create a new one if empty) activationkey-id (optional) - ID of activation key (or create a new one if empty) subscription (optional) - subscription name (or use the default one if empty) :return: A dictionary with the entity ids of Activation key, Content view, Lifecycle Environment, Organization and Repository """ if ( not options or not options.get('product') or not options.get('repository-set') or not options.get('repository')): raise CLIFactoryError( 'Please provide valid product, repository-set and repo.') # Create new organization and lifecycle environment if needed if options.get('organization-id') is None: org_id = make_org()['id'] else: org_id = options['organization-id'] if options.get('lifecycle-environment-id') is None: env_id = make_lifecycle_environment({u'organization-id': org_id})['id'] else: env_id = options['lifecycle-environment-id'] # Clone manifest and upload it with manifests.clone() as manifest: upload_file(manifest.content, manifest.filename) try: Subscription.upload({ u'file': manifest.filename, u'organization-id': org_id, }) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to upload manifest\n{0}'.format(err.msg)) # Enable repo from Repository Set try: RepositorySet.enable({ u'basearch': 'x86_64', u'name': options['repository-set'], u'organization-id': org_id, u'product': options['product'], u'releasever': options.get('releasever'), }) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to enable repository set\n{0}'.format(err.msg)) # Fetch repository info try: rhel_repo = Repository.info({ u'name': options['repository'], u'organization-id': org_id, u'product': options['product'], }) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to fetch repository info\n{0}'.format(err.msg)) # Synchronize the RH repository try: Repository.synchronize({ u'name': options['repository'], u'organization-id': org_id, u'product': options['product'], }) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to synchronize repository\n{0}'.format(err.msg)) # Create CV if needed and associate repo with it if options.get('content-view-id') is None: cv_id = make_content_view({u'organization-id': org_id})['id'] else: cv_id = options['content-view-id'] try: ContentView.add_repository({ u'id': cv_id, u'organization-id': org_id, u'repository-id': rhel_repo['id'], }) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to add repository to content view\n{0}'.format(err.msg)) # Publish a new version of CV try: ContentView.publish({u'id': cv_id}) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to publish new version of content view\n{0}' .format(err.msg) ) # Get the version id try: cvv = ContentView.info({u'id': cv_id})['versions'][-1] except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to fetch content view info\n{0}'.format(err.msg)) # Promote version1 to next env try: ContentView.version_promote({ u'id': cvv['id'], u'organization-id': org_id, u'to-lifecycle-environment-id': env_id, }) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to promote version to next environment\n{0}' .format(err.msg) ) # Create activation key if needed and associate content view with it if options.get('activationkey-id') is None: activationkey_id = make_activation_key({ u'content-view-id': cv_id, u'lifecycle-environment-id': env_id, u'organization-id': org_id, })['id'] else: activationkey_id = options['activationkey-id'] # Given activation key may have no (or different) CV associated. # Associate activation key with CV just to be sure try: ActivationKey.update({ u'id': activationkey_id, u'organization-id': org_id, u'content-view-id': cv_id, }) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to associate activation-key with CV\n{0}' .format(err.msg) ) # Add subscription to activation-key activationkey_add_subscription_to_repo({ u'organization-id': org_id, u'activationkey-id': activationkey_id, u'subscription': options.get( u'subscription', DEFAULT_SUBSCRIPTION_NAME), }) return { u'activationkey-id': activationkey_id, u'content-view-id': cv_id, u'lifecycle-environment-id': env_id, u'organization-id': org_id, u'repository-id': rhel_repo['id'], } def setup_org_for_a_rh_repo(options=None, force_manifest_upload=False, force_use_cdn=False): """Wrapper above ``_setup_org_for_a_rh_repo`` to use custom downstream repo instead of CDN's 'Satellite Capsule' and 'Satellite Tools' if ``settings.cdn == 0`` and URL for custom repositories is set in properties. :param options: a dict with options to pass to function ``_setup_org_for_a_rh_repo``. See its docstring for more details :param force_use_cdn: bool flag whether to use CDN even if there's downstream repo available and ``settings.cdn == 0``. :param force_manifest_upload: bool flag whether to upload a manifest to organization even if downstream custom repo is used instead of CDN. Useful when test relies on organization with manifest (e.g. uses some other RH repo afterwards). Defaults to False. :return: a dict with entity ids (see ``_setup_org_for_a_rh_repo`` and ``setup_org_for_a_custom_repo``). """ custom_repo_url = None if options.get('repository') == REPOS['rhst6']['name']: custom_repo_url = settings.sattools_repo['rhel6'] elif options.get('repository') == REPOS['rhst7']['name']: custom_repo_url = settings.sattools_repo['rhel7'] elif 'Satellite Capsule' in options.get('repository'): custom_repo_url = settings.capsule_repo if force_use_cdn or settings.cdn or not custom_repo_url: return _setup_org_for_a_rh_repo(options) else: options['url'] = custom_repo_url result = setup_org_for_a_custom_repo(options) if force_manifest_upload: with manifests.clone() as manifest: upload_file(manifest.content, manifest.filename) try: Subscription.upload({ u'file': manifest.filename, u'organization-id': result.get('organization-id'), }) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to upload manifest\n{0}'.format(err.msg)) # attach the default subscription to activation key activationkey_add_subscription_to_repo({ 'activationkey-id': result[u'activationkey-id'], 'organization-id': result[u'organization-id'], 'subscription': DEFAULT_SUBSCRIPTION_NAME, }) return result def configure_env_for_provision(org=None, loc=None): """Create and configure org, loc, product, repo, env. Update proxy, domain, subnet, compute resource, provision templates and medium with previously created entities and create a hostgroup using all mentioned entities. :param org: Default Organization that should be used in both host discovering and host provisioning procedures :param loc: Default Location that should be used in both host discovering and host provisioning procedures :return: List of created entities that can be re-used further in provisioning or validation procedure (e.g. hostgroup or subnet) """ # Create new organization and location in case they were not passed if org is None: org = make_org() if loc is None: loc = make_location() # Get a Library Lifecycle environment and the default CV for the org lce = LifecycleEnvironment.info( {u'name': u'Library', 'organization-id': org['id']} ) cv = ContentView.info( {u'name': u'Default Organization View', u'organization-id': org['id']} ) # Create puppet environment and associate organization and location env = make_environment({ 'location-ids': loc['id'], 'organization-ids': org['id'], }) # get default capsule and associate location puppet_proxy = Proxy.info({'id': Proxy.list({ u'search': settings.server.hostname })[0]['id']}) Proxy.update({ 'id': puppet_proxy['id'], 'locations': list( set(puppet_proxy.get('locations') or []) | {loc['name']}), }) # Network # Search for existing domain or create new otherwise. Associate org, # location and dns to it _, _, domain_name = settings.server.hostname.partition('.') domain = Domain.list({'search': 'name={0}'.format(domain_name)}) if len(domain) == 1: domain = Domain.info({'id': domain[0]['id']}) Domain.update({ 'name': domain_name, 'locations': list( set(domain.get('locations') or []) | {loc['name']}), 'organizations': list( set(domain.get('organizations') or []) | {org['name']}), 'dns-id': puppet_proxy['id'], }) else: # Create new domain domain = make_domain({ 'name': domain_name, 'location-ids': loc['id'], 'organization-ids': org['id'], 'dns-id': puppet_proxy['id'], }) # Search if subnet is defined with given network. If so, just update its # relevant fields otherwise create new subnet network = settings.vlan_networking.subnet subnet = Subnet.list({'search': 'network={0}'.format(network)}) if len(subnet) >= 1: subnet = Subnet.info({'id': subnet[0]['id']}) Subnet.update({ 'name': subnet['name'], 'domains': list( set(subnet.get('domains') or []) | {domain['name']}), 'locations': list( set(subnet.get('locations') or []) | {loc['name']}), 'organizations': list( set(subnet.get('organizations') or []) | {org['name']}), 'dhcp-id': puppet_proxy['id'], 'dns-id': puppet_proxy['id'], 'tftp-id': puppet_proxy['id'], }) else: # Create new subnet subnet = make_subnet({ 'name': gen_string('alpha'), 'network': network, 'mask': settings.vlan_networking.netmask, 'domain-ids': domain['id'], 'location-ids': loc['id'], 'organization-ids': org['id'], 'dhcp-id': puppet_proxy['id'], 'dns-id': puppet_proxy['id'], 'tftp-id': puppet_proxy['id'], }) # Get the Partition table entity ptable = PartitionTable.info({'name': DEFAULT_PTABLE}) # Get the OS entity os = OperatingSys.list({ 'search': 'name="RedHat" AND major="{0}" OR major="{1}"'.format( RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION) })[0] # Get proper Provisioning templates and update with OS, Org, Location provisioning_template = Template.info({'name': DEFAULT_TEMPLATE}) pxe_template = Template.info({'name': DEFAULT_PXE_TEMPLATE}) for template in provisioning_template, pxe_template: if os['title'] not in template['operating-systems']: Template.update({ 'id': template['id'], 'locations': list( set(template.get('locations') or []) | {loc['name']}), 'operatingsystems': list(set( template.get('operating-systems') or []) | {os['title']}), 'organizations': list( set(template.get('organizations') or []) | {org['name']}), }) # Get the architecture entity arch = Architecture.list( {'search': 'name={0}'.format(DEFAULT_ARCHITECTURE)})[0] os = OperatingSys.info({'id': os['id']}) # Get the media and update its location medium = Medium.list({'search': 'path={0}'.format(settings.rhel7_os)}) if medium: media = Medium.info({'id': medium[0]['id']}) Medium.update({ 'id': media['id'], 'operatingsystems': list( set(media.get('operating-systems') or []) | {os['title']}), 'locations': list( set(media.get('locations') or []) | {loc['name']}), 'organizations': list( set(media.get('organizations') or []) | {org['name']}), }) else: media = make_medium({ 'location-ids': loc['id'], 'operatingsystem-ids': os['id'], 'organization-ids': org['id'], 'path': settings.rhel7_os }) # Update the OS with found arch, ptable, templates and media OperatingSys.update({ 'id': os['id'], 'architectures': list( set(os.get('architectures') or []) | {arch['name']}), 'media': list( set(os.get('installation-media') or []) | {media['name']}), 'partition-tables': list( set(os.get('partition-tables') or []) | {ptable['name']}), }) for template in (provisioning_template, pxe_template): if '{} ({})'.format(template['name'], template['type']) not in os[ 'templates']: OperatingSys.update({ 'id': os['id'], 'config-templates': list( set(os['templates']) | {template['name']}), }) # Create new hostgroup using proper entities hostgroup = make_hostgroup({ 'location-ids': loc['id'], 'environment-id': env['id'], 'lifecycle-environment-id': lce['id'], 'puppet-proxy-id': puppet_proxy['id'], 'puppet-ca-proxy-id': puppet_proxy['id'], 'content-view-id': cv['id'], 'domain-id': domain['id'], 'subnet-id': subnet['id'], 'organization-ids': org['id'], 'architecture-id': arch['id'], 'partition-table-id': ptable['id'], 'medium-id': media['id'], 'operatingsystem-id': os['id'], 'content-source-id': puppet_proxy['id'], }) return { 'hostgroup': hostgroup, 'subnet': subnet, 'domain': domain, 'ptable': ptable, 'os': os } def publish_puppet_module(puppet_modules, repo_url, organization_id=None): """Creates puppet repo, sync it via provided url and publish using Content View publishing mechanism. It makes puppet class available via Puppet Environment created by Content View and returns Content View entity. :param puppet_modules: List of dictionaries with module 'author' and module 'name' fields. :param str repo_url: Url of the repo that can be synced using pulp: pulp repo or puppet forge. :param organization_id: Organization id that is shared between created entities. :return: Content View entity. """ if not organization_id: organization_id = make_org()['id'] product = make_product({u'organization-id': organization_id}) repo = make_repository({ u'product-id': product['id'], u'content-type': 'puppet', u'url': repo_url, }) # Synchronize repo via provided URL Repository.synchronize({'id': repo['id']}) # Add selected module to Content View cv = make_content_view({u'organization-id': organization_id}) for module in puppet_modules: ContentView.puppet_module_add({ u'author': module['author'], u'name': module['name'], u'content-view-id': cv['id'], }) # CV publishing will automatically create Environment and # Puppet Class entities ContentView.publish({u'id': cv['id']}) return ContentView.info({u'id': cv['id']}) def setup_virtual_machine( vm, org_label, rh_repos_id=None, repos_label=None, product_label=None, lce=None, activation_key=None, patch_os_release_distro=None, install_katello_agent=True): """ Setup a Virtual machine with basic components and tasks. :param robottelo.vm.VirtualMachine vm: The Virtual machine to setup. :param str org_label: The Organization label. :param list rh_repos_id: a list of RH repositories ids to enable. :param list repos_label: a list of custom repositories labels to enable. :param str product_label: product label if repos_label is applicable. :param str lce: Lifecycle environment label if applicable. :param str activation_key: Activation key name if applicable. :param str patch_os_release_distro: distro name, to patch the VM with os version. :param bool install_katello_agent: whether to install katello agent. """ if rh_repos_id is None: rh_repos_id = [] if repos_label is None: repos_label = [] vm.install_katello_ca() vm.register_contenthost(org_label, activation_key=activation_key, lce=lce) if not vm.subscribed: raise CLIFactoryError('Virtual machine failed subscription') if patch_os_release_distro: vm.patch_os_release_version(distro=patch_os_release_distro) # Enable RH repositories for repo_id in rh_repos_id: vm.enable_repo(repo_id, force=True) if product_label: # Enable custom repositories for repo_label in repos_label: result = vm.run( 'yum-config-manager --enable {0}_{1}_{2}'.format( org_label, product_label, repo_label, ) ) if result.return_code != 0: raise CLIFactoryError( 'Failed to enable custom repository "{0}"\n{1}'.format( repos_label, result.stderr) ) if install_katello_agent: vm.install_katello_agent() def _get_capsule_vm_distro_repos(distro): """Return the right RH repos info for the capsule setup""" rh_repos = [] if distro == DISTRO_RHEL7: # Red Hat Enterprise Linux 7 Server rh_product_arch = REPOS['rhel7']['arch'] rh_product_releasever = REPOS['rhel7']['releasever'] rh_repos.append({ 'product': PRDS['rhel'], 'repository-set': REPOSET['rhel7'], 'repository': REPOS['rhel7']['name'], 'repository-id': REPOS['rhel7']['id'], 'releasever': rh_product_releasever, 'arch': rh_product_arch, 'cdn': True, }) # Red Hat Software Collections (for 7 Server) rh_repos.append({ 'product': PRDS['rhscl'], 'repository-set': REPOSET['rhscl7'], 'repository': REPOS['rhscl7']['name'], 'repository-id': REPOS['rhscl7']['id'], 'releasever': rh_product_releasever, 'arch': rh_product_arch, 'cdn': True, }) # Red Hat Satellite Capsule 6.2 (for RHEL 7 Server) rh_repos.append({ 'product': PRDS['rhsc'], 'repository-set': REPOSET['rhsc7'], 'repository': REPOS['rhsc7']['name'], 'repository-id': REPOS['rhsc7']['id'], 'url': settings.capsule_repo, 'cdn': bool(settings.cdn or not settings.capsule_repo), }) else: raise CLIFactoryError('distro "{}" not supported'.format(distro)) return rh_product_arch, rh_product_releasever, rh_repos def add_role_permissions(role_id, resource_permissions): """Create role permissions found in resource permissions dict :param role_id: The role id :param resource_permissions: a dict containing resources with permission names and other Filter options Usage:: role = make_role({'organization-id': org['id']}) resource_permissions = { 'Katello::ActivationKey': { 'permissions': [ 'view_activation_keys', 'create_activation_keys', 'edit_activation_keys', 'destroy_activation_keys' ], 'search': "name ~ {}".format(ak_name_like) }, } add_role_permissions(role['id'], resource_permissions) """ available_permissions = Filter.available_permissions() # group the available permissions by resource type available_rc_permissions = {} for permission in available_permissions: permission_resource = permission['resource'] if permission_resource not in available_rc_permissions: available_rc_permissions[permission_resource] = [] available_rc_permissions[permission_resource].append(permission) # create only the required role permissions per resource type for resource_type, permission_data in resource_permissions.items(): permission_names = permission_data.get('permissions') if permission_names is None: raise CLIFactoryError( 'Permissions not provided for resource: {0}' .format(resource_type) ) # ensure that the required resource type is available if resource_type not in available_rc_permissions: raise CLIFactoryError( 'Resource "{0}" not in the list of available resources' .format(resource_type) ) available_permission_names = [ permission['name'] for permission in available_rc_permissions[resource_type] if permission['name'] in permission_names ] # ensure that all the required permissions are available missing_permissions = set( permission_names).difference(set(available_permission_names)) if missing_permissions: raise CLIFactoryError( 'Permissions "{0}" are not available in Resource "{1}"' .format(list(missing_permissions), resource_type) ) # Create the current resource type role permissions options = {'role-id': role_id} options.update(permission_data) make_filter(options=options) def setup_cdn_and_custom_repositories( org_id, repos, download_policy='on_demand', synchronize=True): """Setup cdn and custom repositories :param int org_id: The organization id :param list repos: a list of dict repositories options :param str download_policy: update the repositories with this download policy :param bool synchronize: Whether to synchronize the repositories. :return: a dict containing the content view and repos info """ custom_product = None repos_info = [] for repo in repos: custom_repo_url = repo.get('url') cdn = repo.get('cdn', False) if not cdn and not custom_repo_url: raise CLIFactoryError(u'Custom repository with url not supplied') if cdn: if bz_bug_is_open(1655239): rh_repo_id = enable_rhrepo_and_fetchid( repo.get('arch', DEFAULT_ARCHITECTURE), org_id, repo['product'], repo['repository'], repo['repository-set'], repo.get('releasever') ) repo_info = Repository.info({'id': rh_repo_id}) else: RepositorySet.enable({ u'organization-id': org_id, u'product': repo['product'], u'name': repo['repository-set'], u'basearch': repo.get('arch', DEFAULT_ARCHITECTURE), u'releasever': repo.get('releasever'), }) repo_info = Repository.info({ u'organization-id': org_id, u'name': repo['repository'], u'product': repo['product'], }) else: if custom_product is None: custom_product = make_product_wait({ 'organization-id': org_id, }) repo_info = make_repository({ 'product-id': custom_product['id'], 'organization-id': org_id, 'url': custom_repo_url, }) if download_policy: # Set download policy Repository.update({ 'download-policy': download_policy, 'id': repo_info['id'], }) repos_info.append(repo_info) if synchronize: # Synchronize the repositories for repo_info in repos_info: Repository.synchronize({'id': repo_info['id']}, timeout=4800) return custom_product, repos_info def setup_cdn_and_custom_repos_content( org_id, lce_id=None, repos=None, upload_manifest=True, download_policy='on_demand', rh_subscriptions=None, default_cv=False): """Setup cdn and custom repositories, content view and activations key :param int org_id: The organization id :param int lce_id: the lifecycle environment id :param list repos: a list of dict repositories options :param bool default_cv: whether to use the Default Organization CV :param bool upload_manifest: whether to upload the organization manifest :param str download_policy: update the repositories with this download policy :param list rh_subscriptions: a list of RH subscription to attach to activation key :return: a dict containing the activation key, content view and repos info """ if lce_id is None and not default_cv: raise TypeError(u'lce_id must be specified') if repos is None: repos = [] if rh_subscriptions is None: rh_subscriptions = [] if upload_manifest: # Upload the organization manifest try: manifests.upload_manifest_locked(org_id, manifests.clone(), interface=manifests.INTERFACE_CLI) except CLIReturnCodeError as err: raise CLIFactoryError( u'Failed to upload manifest\n{0}'.format(err.msg)) custom_product, repos_info = setup_cdn_and_custom_repositories( org_id=org_id, repos=repos, download_policy=download_policy ) if default_cv: activation_key = make_activation_key({ u'organization-id': org_id, u'lifecycle-environment': 'Library', }) content_view = ContentView.info({ u'organization-id': org_id, u'name': u'Default Organization View' }) else: # Create a content view content_view = make_content_view({u'organization-id': org_id}) # Add repositories to content view for repo_info in repos_info: ContentView.add_repository({ u'id': content_view['id'], u'organization-id': org_id, u'repository-id': repo_info['id'], }) # Publish the content view ContentView.publish({u'id': content_view['id']}) # Get the latest content view version id content_view_version = ContentView.info({ u'id': content_view['id'] })['versions'][-1] # Promote content view version to lifecycle environment ContentView.version_promote({ u'id': content_view_version['id'], u'organization-id': org_id, u'to-lifecycle-environment-id': lce_id, }) content_view = ContentView.info({u'id': content_view['id']}) activation_key = make_activation_key({ u'organization-id': org_id, u'lifecycle-environment-id': lce_id, u'content-view-id': content_view['id'], }) # Get organization subscriptions subscriptions = Subscription.list({ u'organization-id': org_id}, per_page=False ) # Add subscriptions to activation-key needed_subscription_names = list(rh_subscriptions) if custom_product: needed_subscription_names.append(custom_product['name']) added_subscription_names = [] for subscription in subscriptions: if (subscription['name'] in needed_subscription_names and subscription['name'] not in added_subscription_names): ActivationKey.add_subscription({ u'id': activation_key['id'], u'subscription-id': subscription['id'], u'quantity': 1, }) added_subscription_names.append(subscription['name']) if (len(added_subscription_names) == len(needed_subscription_names)): break missing_subscription_names = set( needed_subscription_names).difference(set(added_subscription_names)) if missing_subscription_names: raise CLIFactoryError( u'Missing subscriptions: {0}'.format(missing_subscription_names)) data = dict( activation_key=activation_key, content_view=content_view, product=custom_product, repos=repos_info, ) if lce_id: lce = LifecycleEnvironment.info({ 'id': lce_id, 'organization-id': org_id, }) data['lce'] = lce return data def vm_setup_ssh_config(vm, ssh_key_name, host, user=None): """Create host entry in vm ssh config and know_hosts files to allow vm to access host via ssh without password prompt :param robottelo.vm.VirtualMachine vm: Virtual machine instance :param str ssh_key_name: The ssh key file name to use to access host, the file must already exist in /root/.ssh directory :param str host: the hostname to setup that will be accessed from vm :param str user: the user that will access the host """ if user is None: user = 'root' ssh_path = '/root/.ssh' ssh_key_file_path = '{0}/{1}'.format(ssh_path, ssh_key_name) # setup the config file ssh_config_file_path = '{0}/config'.format(ssh_path) result = vm.run('touch {0}'.format(ssh_config_file_path)) if result.return_code != 0: raise CLIFactoryError( u'Failed to create ssh config file:\n{}' .format(result.stderr) ) result = vm.run( 'echo "\nHost {0}\n\tHostname {0}\n\tUser {1}\n' '\tIdentityFile {2}\n" >> {3}' .format(host, user, ssh_key_file_path, ssh_config_file_path) ) if result.return_code != 0: raise CLIFactoryError( u'Failed to write to ssh config file:\n{}'.format(result.stderr)) # add host entry to ssh known_hosts result = vm.run( 'ssh-keyscan {0} >> {1}/known_hosts'.format(host, ssh_path)) if result.return_code != 0: raise CLIFactoryError( u'Failed to put hostname in ssh known_hosts files:\n{}' .format(result.stderr) ) def vm_upload_ssh_key(vm, source_key_path, destination_key_name): """Copy ssh key to virtual machine ssh path and ensure proper permission is set :param robottelo.vm.VirtualMachine vm: Virtual machine instance :param source_key_path: The ssh key file path to copy to vm :param destination_key_name: The ssh key file name when copied to vm """ destination_key_path = '/root/.ssh/{0}'.format(destination_key_name) upload_file( local_file=source_key_path, remote_file=destination_key_path, hostname=vm.ip_addr ) result = vm.run('chmod 600 {0}'.format(destination_key_path)) if result.return_code != 0: raise CLIFactoryError( u'Failed to chmod ssh key file:\n{}'.format(result.stderr)) def virt_who_hypervisor_config( config_id, virt_who_vm, org_id=None, lce_id=None, hypervisor_hostname=None, configure_ssh=False, hypervisor_user=None, subscription_name=None, exec_one_shot=False, upload_manifest=True, extra_repos=None): """ Configure virtual machine as hypervisor virt-who service :param int config_id: virt-who config id :param robottelo.vm.VirtualMachine virt_who_vm: the Virtual machine instance to use for configuration :param int org_id: the organization id :param int lce_id: the lifecycle environment id to use :param str hypervisor_hostname: the hypervisor hostname :param str hypervisor_user: hypervisor user that connect with the ssh key :param bool configure_ssh: whether to configure the ssh key to allow this virtual machine to connect to hypervisor :param str subscription_name: the subscription name to assign to virt-who hypervisor guests :param bool exec_one_shot: whether to run the virt-who one-shot command after startup :param bool upload_manifest: whether to upload the organization manifest :param list extra_repos: (Optional) a list of repositories dict options to setup additionally. """ if org_id is None: org = make_org() else: org = Org.info({'id': org_id}) if lce_id is None: lce = make_lifecycle_environment({'organization-id': org['id']}) else: lce = LifecycleEnvironment.info({ 'id': lce_id, 'organization-id': org['id'] }) if extra_repos is None: extra_repos = [] repos = [ # Red Hat Satellite Tools { 'product': PRDS['rhel'], 'repository-set': REPOSET['rhst7'], 'repository': REPOS['rhst7']['name'], 'repository-id': REPOS['rhst7']['id'], 'url': settings.sattools_repo['rhel7'], 'cdn': bool(settings.cdn or not settings.sattools_repo['rhel7']), }, ] repos.extend(extra_repos) content_setup_data = setup_cdn_and_custom_repos_content( org['id'], lce['id'], repos, upload_manifest=upload_manifest, rh_subscriptions=[DEFAULT_SUBSCRIPTION_NAME], ) activation_key = content_setup_data['activation_key'] content_view = content_setup_data['content_view'] setup_virtual_machine( virt_who_vm, org['label'], activation_key=activation_key['name'], patch_os_release_distro=DISTRO_RHEL7, rh_repos_id=[ repo['repository-id'] for repo in repos if repo['cdn'] ], install_katello_agent=False, ) # configure manually RHEL custom repo url as sync time is very big # (more than 2 hours for RHEL 7Server) and not critical in this context. rhel_repo_option_name = 'rhel{0}_repo'.format(DISTROS_MAJOR_VERSION[DISTRO_RHEL
codeparrot/github-code-clean
# -*- coding: utf-8 -*- """ Created on Mon Apr 02 11:54:33 2012 @author: a1185872 """ import os import numpy as np import matplotlib.pyplot as plt import matplotlib.pylab as pylab from matplotlib.ticker import MultipleLocator, FormatStrFormatter from matplotlib.patches import Ellipse, Rectangle, Arrow from matplotlib.colors import LinearSegmentedColormap, Normalize import matplotlib.colorbar as mcb import matplotlib.gridspec as gridspec import mtpy1.core.z as Z import mtpy1.utils.latlongutmconversion as ll2utm # tolerance to find frequencies ptol = .15 # error of data in percentage zerr = .05 # errormap values which is multiplied by zerr to get a total error zxxerrmap = 10 zxyerrmap = 1 zyxerrmap = 1 zyyerrmap = 10 zerrmap = [zxxerrmap, zxyerrmap, zyxerrmap, zyyerrmap] #============================================================================== # Colormaps for plots #============================================================================== # phase tensor map ptcmapdict = {'red': ((0.0, 1.0, 1.0), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 1.0), (1.0, 0.0, 1.0)), 'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0))} ptcmap = LinearSegmentedColormap('ptcmap', ptcmapdict, 256) # phase tensor map for difference (reverse) ptcmapdictr = {'red': ((0.0, 1.0, 1.0), (1.0, 1.0, 1.0)), 'green': ((0.0, 1.0, 0.0), (1.0, 1.0, 0.0)), 'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0))} ptcmapr = LinearSegmentedColormap('ptcmapr', ptcmapdictr, 256) # resistivity tensor map for calculating delta ptcmapdict2 = {'red': ((0.0, 1.0, 0.0), (1.0, 1.0, 0.0)), 'green': ((0.0, 0.5, 0.5), (1.0, 0.5, 0.5)), 'blue': ((0.0, 0.5, 0.5), (1.0, 0.5, 0.5))} ptcmap2 = LinearSegmentedColormap('ptcmap2', ptcmapdict2, 256) # resistivity tensor map for calcluating resistivity difference rtcmapdict = {'red': ((0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 1.0, 0.0)), 'green': ((0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.0, 1.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0))} rtcmap = LinearSegmentedColormap('rtcmap', rtcmapdict, 256) # resistivity tensor map for calcluating apparent resistivity rtcmapdictr = {'red': ((0.0, 1.0, 1.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)), 'green': ((0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 1.0, 1.0))} rtcmapr = LinearSegmentedColormap('rtcmapr', rtcmapdictr, 256) #============================================================================== # define some helping functions #============================================================================== # make a class to pick periods class ListPeriods: def __init__(self, fig): self.plst = [] self.fig = fig self.count = 1 def connect(self): self.cid = self.fig.canvas.mpl_connect('button_press_event', self.onclick) def onclick(self, event): print '{0} Period: {1:.5g}'.format(self.count, event.xdata) self.plst.append(event.xdata) self.count += 1 def disconnect(self): self.fig.canvas.mpl_disconnect(self.cid) def readWLOutFile(outfn, ncol=5): """ read .out file from winglink Inputs: outfn = full path to .out file from winglink Outputs: dx,dy,dz = cell nodes in x,y,z directions (note x is to the East here and y is to the north.) """ wingLinkDataFH = file(outfn, 'r') raw_data = wingLinkDataFH.read().strip().split() nx = int(raw_data[0]) ny = int(raw_data[1]) nz = int(raw_data[2]) dx = np.zeros(nx) dy = np.zeros(ny) dz = np.zeros(nz) for x_idx in range(nx): dx[x_idx] = raw_data[x_idx + 5] for y_idx in range(ny): dy[y_idx] = raw_data[y_idx + 5 + nx] for z_idx in range(nz): dz[z_idx] = raw_data[z_idx + 5 + nx + ny] # dx[0:nx/2]=-dx[0:nx/2] # dy[0:ny/2]=-dy[0:ny/2] return dx, dy, dz def readSitesFile(sitesfn): """ read sites_ file output from winglink Input: sitesfn = full path to the sites file output by winglink Output: slst = list of dictionaries for each station. Keys include: station = station name dx = number of blocks from center of grid in East-West direction dy = number of blocks from center of grid in North-South direction dz = number of blocks from center of grid vertically number = block number in the grid sitelst = list of station names """ sfid = file(sitesfn, 'r') slines = sfid.readlines() slst = [] sitelst = [] for ss in slines: sdict = {} sline = ss.strip().split() sdict['station'] = sline[0][0:-4] sdict['dx'] = int(sline[1]) - 1 sdict['dy'] = int(sline[2]) - 1 sdict['dz'] = int(sline[3]) - 1 sdict['something'] = int(sline[4]) sdict['number'] = int(sline[5]) slst.append(sdict) sitelst.append(sline[0][0:-4]) return slst, sitelst def getXY(sitesfn, outfn, ncol=5): """ get x (e-w) and y (n-s) position of station and put in middle of cell Input: sitesfn = full path to sites file output from winglink outfn = full path to .out file output from winglink ncol = number of columns the data is in Outputs: xarr = array of relative distance for each station from center of the grid. Note this is E-W direction yarr = array of relative distance for each station from center of the grid. Note this is N-S direction """ slst, sitelst = readSitesFile(sitesfn) dx, dy, dz = readWLOutFile(outfn, ncol=ncol) ns = len(slst) nxh = len(dx) / 2 nyh = len(dy) / 2 xarr = np.zeros(ns) yarr = np.zeros(ns) for ii, sdict in enumerate(slst): xx = sdict['dx'] yy = sdict['dy'] if xx < nxh: xarr[ii] = dx[xx:nxh].sum() - dx[xx] / 2 else: xarr[ii] = dx[nxh:xx].sum() + dx[xx] / 2 if yy < nyh: yarr[ii] = -1 * (dy[yy:nyh].sum() - dy[yy] / 2) else: yarr[ii] = -1 * (dy[nyh:yy].sum() + dy[yy] / 2) return xarr, yarr def getPeriods(edilst, errthresh=10): """ Plots periods for all stations in edipath and the plot is interactive, just click on the period you want to select and it will appear in the console, it will also be saved to lp.plst. To sort this list type lp.plst.sort() The x's mark a conformation that the station contains that period. So when looking for the best periods to invert for look for a dense line of x's Inputs: edipath = path to where all your edi files are. Note that only the impedance components are supported so if you have spectra data, export them from wingling to have impedance information. errthresh = threshold on the error in impedance estimation, this just gives an indication on where bad stations and bad periods are, anything above this level will be colored in red. Outputs: periodlst = list of periods for each station errorlst = error in the impedance determinant for each station at each period. lp = data type lp has attributes: plst = period list of chosen periods, again to sort this list type lp.plst.sort(). this will then be the input to make the data file later. """ plt.rcParams['font.size'] = 10 plt.rcParams['figure.subplot.left'] = .13 plt.rcParams['figure.subplot.right'] = .98 plt.rcParams['figure.subplot.bottom'] = .1 plt.rcParams['figure.subplot.top'] = .95 plt.rcParams['figure.subplot.wspace'] = .25 plt.rcParams['figure.subplot.hspace'] = .05 periodlst = [] errorlst = [] fig1 = plt.figure(5) ax = fig1.add_subplot(1, 1, 1) for edi in edilst: if not os.path.isfile(edi): print 'Could not find ' + edi else: z1 = Z.Z(edi) periodlst.append(z1.period) zdet = np.array([np.sqrt(abs(np.linalg.det(zz))) for zz in z1.z]) error = np.array([np.sqrt(abs(np.linalg.det(zz))) for zz in z1.zvar]) perror = (error / zdet) * 100 errorlst.append(perror) # make a plot to pick frequencies from showing period and percent # error ax.scatter(z1.period, perror, marker='x', picker=5) pfind = np.where(perror > errthresh)[0] if len(pfind) > 0: print 'Error greater than {0:.3f} for '.format(errthresh) + z1.station for jj in pfind: ax.scatter( z1.period[jj], perror[jj], marker='x', color='r') ax.text(z1.period[jj], perror[jj] * 1.05, z1.station, horizontalalignment='center', verticalalignment='baseline', fontdict={'size': 8, 'color': 'red'}) print jj, z1.period[jj] ax.set_xscale('log') ax.set_xlim(10**np.floor(np.log10(z1.period[0])), 10**np.ceil(np.log10(z1.period[-1]))) ax.set_ylim(0, 3 * errthresh) ax.set_yscale('log') ax.set_xlabel('Period (s)', fontdict={'size': 12, 'weight': 'bold'}) ax.set_ylabel('Percent Error', fontdict={'size': 12, 'weight': 'bold'}) ax.grid('on', which='both') lp = ListPeriods(fig1) lp.connect() plt.show() return periodlst, errorlst, lp def make3DGrid(edilst, xspacing=500, yspacing=500, z1layer=10, xpad=5, ypad=5, zpad=5, xpadroot=5, ypadroot=5, zpadroot=2, zpadpow=(5, 15), nz=30, plotyn='y', plotxlimits=None, plotylimits=None, plotzlimits=None): """ makes a grid from the edifiles to go into wsinv3d. The defaults usually work relatively well, but it might take some effort to get a desired grid. Inputs: -------- **edilst** : list list of full paths to the .edi files to be included in the inversion. **xspacing** : float spacing of cells in the east-west direction in meters. *default* is 500 (m) **yspacing** : float spacing of cells in the north-south direction in meters. *default* is 500 (m) **z1layer** : float the depth of the first layer in the model in meters. This is usually about 1/10th of your shallowest skin depth. *default* is 10 (m) **xpad** : int number of cells to pad on either side in the east-west direction. The width of these cells grows exponentially to the edge. *default* is 5 **ypad** : int number of cells to pad on either side in the north-south direction. The width of these cells grows exponentially to the edge. *default* is 5 **zpad** : int number of cells to pad on either side in the vertical direction. This is to pad beneath the depth of investigation and grows faster exponentially than the zone of study. The purpose is to decrease the number of cells in the model. *default* is 5 **xpadroot** : float the root number that is multiplied to itself for calculating the width of the padding cells in the east-west direction. *default* is 5 **ypadroot** : float the root number that is multiplied to itself for calculating the width of the padding cells in the north-south direction. *default* is 5 **zpadroot** : float the root number that is multiplied to itself for calculating the width of the padding cells in the vertical direction. *default* is 2 **zpadpow** : tuple (min,max) the power to which zpadroot is raised for the padding cells in the vertical direction. Input as a tuple with minimum power and maximum power. *default* is (5,15) **nz** : int number of layers in the vertical direction. Remember that the inversion code automatically adds 7 air layers to the model which need to be used when estimating the memory that it is going to take to run the model. *default* is 30 **plotyn** : [ 'y' | 'n' ] if plotyn=='y' then a plot showing map view (east:north) and a cross sectional view (east:vertical) plane * 'y' to plot the grid with station locations * 'n' to suppress the plotting. **plotxlimits** : tuple (xmin,xmax) plot min and max distances in meters for the east-west direction. If not input, the xlimits will be set to the furthest stations east and west. *default* is None **plotylimits** : tuple (ymin,ymax) plot min and max distances in meters for the east-west direction. If not input, the ylimits will be set to the furthest stations north and south. *default* is None **plotzlimits** : tuple (zmin,zmax) plot min and max distances in meters for the east-west direction. If not input, the zlimits will be set to the nz layer and 0. *default* is None Returns: -------- xgrid,ygrid,zgrid,locations,slst **xgrid** : np.array array of the east-west cell locations **ygrid** : np.array array of the north-south cell locations **zgrid** : np.array array of the vertical cell locations **locations** : np.array (ns,2) array of station locations placed in the center of the cells. * column 1 is for east-west locations * column 2 is for the north-south location **slst** : list list of dictionaries for each station with keys: * *'station'* for the station name * *'east'* for easting in model coordinates * *'east_c'* for easting in model coordinates to place the station at the center of the cell * *'north'* for northing in model coordinates * *'north_c'* for northing in model coordinates to place the station at the center of the cell :Example: :: >>> import mtpy.modeling.ws3dtools as ws >>> import os >>> edipath=r"/home/edifiles" >>> edilst=[os.path.join(edipath,edi) for os.listdir(edipath)] >>> xg,yg,zg,loc,statlst=ws.make3DGrid(edilst,plotzlimits=(-2000,200)) """ ns = len(edilst) locations = np.zeros((ns, 2)) slst = [] for ii, edi in enumerate(edilst): zz = Z.Z(edi) zone, east, north = ll2utm.LLtoUTM(23, zz.lat, zz.lon) locations[ii, 0] = east locations[ii, 1] = north slst.append({'station': zz.station, 'east': east, 'north': north}) # estimate the mean distance to get into relative coordinates xmean = locations[:, 0].mean() ymean = locations[:, 1].mean() # remove the average distance to get coordinates in a relative space locations[:, 0] -= xmean locations[:, 1] -= ymean for sdict in slst: sdict['east'] -= xmean sdict['north'] -= ymean # translate the stations so they are relative to 0,0 xcenter = (locations[:, 0].max() - np.abs(locations[:, 0].min())) / 2 ycenter = (locations[:, 1].max() - np.abs(locations[:, 1].min())) / 2 # remove the average distance to get coordinates in a relative space locations[:, 0] -= xcenter locations[:, 1] -= ycenter for sdict in slst: sdict['east'] -= xcenter sdict['north'] -= ycenter # pickout the furtherst south and west locations # and put that station as the bottom left corner of the main grid xleft = locations[:, 0].min() - xspacing / 2 xright = locations[:, 0].max() + xspacing / 2 ybottom = locations[:, 1].min() - yspacing / 2 ytop = locations[:, 1].max() + yspacing / 2 #---make a grid around the stations from the parameters above--- # make grid in east-west direction midxgrid = np.arange(start=xleft, stop=xright + xspacing, step=xspacing) xpadleft = np.round(-xspacing * 5**np.arange(start=.5, stop=3, step=3. / xpad)) +\ xleft xpadright = np.round(xspacing * 5**np.arange(start=.5, stop=3, step=3. / xpad)) +\ xright xgridr = np.append(np.append(xpadleft[::-1], midxgrid), xpadright) # make grid in north-south direction midygrid = np.arange(start=ybottom, stop=ytop + yspacing, step=yspacing) ypadbottom = np.round(-yspacing * 5**np.arange(start=.5, stop=3, step=3. / xpad)) +\ ybottom ypadtop = np.round(yspacing * 5**np.arange(start=.5, stop=3, step=3. / xpad)) +\ ytop ygridr = np.append(np.append(ypadbottom[::-1], midygrid), ypadtop) # make depth grid zgrid1 = z1layer * \ 2**np.round(np.arange(0, zpadpow[0], zpadpow[0] / (nz - zpad))) zgrid2 = z1layer * 2**np.round(np.arange(zpadpow[0], zpadpow[1], (zpadpow[1] - zpadpow[0]) / (zpad))) zgrid = np.append(zgrid1, zgrid2) #--Need to make an array of the individual cell dimensions for the wsinv3d xnodes = xgridr.copy() nx = xgridr.shape[0] xnodes[:nx / 2] = np.array([abs(xgridr[ii] - xgridr[ii + 1]) for ii in range(int(nx / 2))]) xnodes[nx / 2:] = np.array([abs(xgridr[ii] - xgridr[ii + 1]) for ii in range(int(nx / 2) - 1, nx - 1)]) ynodes = ygridr.copy() ny = ygridr.shape[0] ynodes[:ny / 2] = np.array([abs(ygridr[ii] - ygridr[ii + 1]) for ii in range(int(ny / 2))]) ynodes[ny / 2:] = np.array([abs(ygridr[ii] - ygridr[ii + 1]) for ii in range(int(ny / 2) - 1, ny - 1)]) #--put the grids into coordinates relative to the center of the grid xgrid = xnodes.copy() xgrid[:int(nx / 2)] = -np.array([xnodes[ii:int(nx / 2)].sum() for ii in range(int(nx / 2))]) xgrid[int(nx / 2):] = np.array([xnodes[int(nx / 2):ii + 1].sum() for ii in range(int(nx / 2), nx)]) - xnodes[int(nx / 2)] ygrid = ynodes.copy() ygrid[:int(ny / 2)] = -np.array([ynodes[ii:int(ny / 2)].sum() for ii in range(int(ny / 2))]) ygrid[int(ny / 2):] = np.array([ynodes[int(ny / 2):ii + 1].sum() for ii in range(int(ny / 2), ny)]) - ynodes[int(ny / 2)] # make sure that the stations are in the center of the cell as requested by # the code. for sdict in slst: # look for the closest grid line xx = [nn for nn, xf in enumerate(xgrid) if xf > (sdict['east'] - xspacing) and xf < (sdict['east'] + xspacing)] # shift the station to the center in the east-west direction if xgrid[xx[0]] < sdict['east']: sdict['east_c'] = xgrid[xx[0]] + xspacing / 2 elif xgrid[xx[0]] > sdict['east']: sdict['east_c'] = xgrid[xx[0]] - xspacing / 2 # look for closest grid line yy = [mm for mm, yf in enumerate(ygrid) if yf > (sdict['north'] - yspacing) and yf < (sdict['north'] + yspacing)] # shift station to center of cell in north-south direction if ygrid[yy[0]] < sdict['north']: sdict['north_c'] = ygrid[yy[0]] + yspacing / 2 elif ygrid[yy[0]] > sdict['north']: sdict['north_c'] = ygrid[yy[0]] - yspacing / 2 #=Plot the data if desired========================= if plotyn == 'y': fig = plt.figure(1, figsize=[10, 10], dpi=300) #---plot map view ax1 = fig.add_subplot(1, 2, 1, aspect='equal') for sdict in slst: # make sure the station is in the center of the cell ax1.scatter(sdict['east_c'], sdict['north_c'], marker='v') for xp in xgrid: ax1.plot([xp, xp], [ygrid.min(), ygrid.max()], color='k') for yp in ygrid: ax1.plot([xgrid.min(), xgrid.max()], [yp, yp], color='k') if plotxlimits is None: ax1.set_xlim(locations[:, 0].min() - 10 * xspacing, locations[:, 0].max() + 10 * xspacing) else: ax1.set_xlim(plotxlimits) if plotylimits is None: ax1.set_ylim(locations[:, 1].min() - 50 * yspacing, locations[:, 1].max() + 50 * yspacing) else: ax1.set_ylim(plotylimits) ax1.set_ylabel('Northing (m)', fontdict={'size': 10, 'weight': 'bold'}) ax1.set_xlabel('Easting (m)', fontdict={'size': 10, 'weight': 'bold'}) # ----plot depth view ax2 = fig.add_subplot(1, 2, 2, aspect='auto') for xp in xgrid: ax2.plot([xp, xp], [-zgrid.sum(), 0], color='k') for sdict in slst: ax2.scatter(sdict['east_c'], 0, marker='v') for zz, zp in enumerate(zgrid): ax2.plot([xgrid.min(), xgrid.max()], [-zgrid[0:zz].sum(), -zgrid[0:zz].sum()], color='k') if plotzlimits is None: ax2.set_ylim(-zgrid1.max(), 200) else: ax2.set_ylim(plotzlimits) if plotxlimits is None: ax2.set_xlim(locations[:, 0].min() - xspacing, locations[:, 0].max() + xspacing) else: ax2.set_xlim(plotxlimits) ax2.set_ylabel('Depth (m)', fontdict={'size': 10, 'weight': 'bold'}) ax2.set_xlabel('Easting (m)', fontdict={'size': 10, 'weight': 'bold'}) plt.show() print '-' * 15 print ' Number of stations = {0}'.format(len(slst)) print ' Dimensions: ' print ' e-w = {0}'.format(xgrid.shape[0]) print ' n-s = {0}'.format(ygrid.shape[0]) print ' z = {0}'.format(zgrid.shape[0]) print ' Extensions: ' print ' e-w = {0:.1f} (m)'.format(xgrid.__abs__().sum()) print ' n-s = {0:.1f} (m)'.format(ygrid.__abs__().sum()) print ' 0-z = {0:.1f} (m)'.format(zgrid.__abs__().sum()) print '-' * 15 return ynodes, xnodes, zgrid, locations, slst def writeWSDataFile(periodlst, edilst, sitesfn=None, outfn=None, sitelocations=None, zerr=.05, ptol=.15, zerrmap=[10, 1, 1, 10], savepath=None, ncol=5, units='mv'): """ writes a data file for WSINV3D from winglink outputs Inputs: -------- **periodlst** :list periods to extract from edifiles, can get them from using the function getPeriods. **edilst** : list list of full paths to .edi files to use for inversion **sitelocations** : np.array (ns,2) array of station locations where [:,0] corresponds to the east-west location and [:,1] corresponds to the north-south location. This can be found from Make3DGrid. Locations are in meters in grid coordinates. **sitesfn** : string if you used Winglink to make the model then you need to input the sites filename (full path) **outfn** : string if you used Winglink to make the model need to input the winglink .out file (full path) **savepath** : string directory or full path to save data file to, default path is dirname sitesfn. saves as: savepath/WSDataFile.dat *Need to input if you did not use Winglink* **zerr** : float percent error to give to impedance tensor components in decimal form --> 10% = 0.10 *default* is .05 **ptol** : float percent tolerance to locate frequencies in case edi files don't have the same frequencies. Need to add interpolation. *default* is 0.15 **zerrmap** : tuple (zxx,zxy,zyx,zyy) multiple to multiply err of zxx,zxy,zyx,zyy by. Note the total error is zerr*zerrmap[ii] **ncol** : int number of columns in outfn, sometimes it outputs different number of columns. Returns: -------- **datafn** : full path to data file, saved in dirname(sitesfn) or savepath where savepath can be a directory or full filename """ ns = len(edilst) # get units correctly if units == 'mv': zconv = 1. / 796. # create the output filename if savepath is None: ofile = os.path.join(os.path.dirname(sitesfn), 'WSDataFile.dat') elif savepath.find('.') == -1: ofile = os.path.join(savepath, 'WSDataFile.dat') else: ofile = savepath # if there is a site file from someone who naively used winglink if sitesfn is not None: # read in stations from sites file sitelst, slst = readSitesFile(sitesfn) # get x and y locations on a relative grid xlst, ylst = getXY(sitesfn, outfn, ncol=ncol) # if the user made a grid in python or some other fashion if sitelocations is not None: if isinstance(sitelocations[0], dict): xlst = np.zeros(ns) ylst = np.zeros(ns) slst = [] for dd, sd in enumerate(sitelocations): xlst[dd] = sd['east_c'] ylst[dd] = sd['north_c'] slst.append(sd['station']) else: xlst = sitelocations[:, 0] ylst = sitelocations[:, 1] # define some lengths nperiod = len(periodlst) # make an array to put data into for easy writing zarr = np.zeros((ns, nperiod, 4), dtype='complex') #--------find frequencies------------------------------------------------- linelst = [] for ss, edi in enumerate(edilst): if not os.path.isfile(edi): raise IOError('Could not find ' + edi) z1 = Z.Z(edi) sdict = {} fspot = {} for ff, f1 in enumerate(periodlst): for kk, f2 in enumerate(z1.period): if f2 >= (1 - ptol) * f1 and f2 <= (1 + ptol) * f1: zderr = np.array([abs(z1.zvar[kk, nn, mm]) / abs(z1.z[kk, nn, mm]) * 100 for nn in range(2) for mm in range(2)]) fspot['{0:.6g}'.format(f1)] = (kk, f2, zderr[0], zderr[1], zderr[2], zderr[3]) zarr[ss, ff, :] = z1.z[kk].reshape(4,) print z1.station, len(fspot) sdict['fspot'] = fspot sdict['station'] = z1.station linelst.append(sdict) #-----Write data file----------------------------------------------------- ofid = file(ofile, 'w') ofid.write('{0:d} {1:d} {2:d}\n'.format(ns, nperiod, 8)) # write N-S locations ofid.write('Station_Location: N-S \n') for ii in range(ns / 8 + 1): for ll in range(8): try: ofid.write('{0:+.4e} '.format(ylst[ii * 8 + ll])) except IndexError: pass ofid.write('\n') # write E-W locations ofid.write('Station_Location: E-W \n') for ii in range(ns / 8 + 1): for ll in range(8): try: ofid.write('{0:+.4e} '.format(xlst[ii * 8 + ll])) except IndexError: pass ofid.write('\n') # write impedance tensor components for ii, p1 in enumerate(periodlst): ofid.write('DATA_Period: {0:3.6f}\n'.format(p1)) for ss in range(ns): zline = zarr[ss, ii, :] for jj in range(4): ofid.write('{0:+.4e} '.format(zline[jj].real * zconv)) ofid.write('{0:+.4e} '.format(-zline[jj].imag * zconv)) ofid.write('\n') # write error as a percentage of Z for ii, p1 in enumerate(periodlst): ofid.write('ERROR_Period: {0:3.6f}\n'.format(p1)) for ss in range(ns): zline = zarr[ss, ii, :] for jj in range(4): ofid.write('{0:+.4e} '.format(zline[jj].real * zerr * zconv)) ofid.write('{0:+.4e} '.format(zline[jj].imag * zerr * zconv)) ofid.write('\n') # write error maps for ii, p1 in enumerate(periodlst): ofid.write('ERMAP_Period: {0:3.6f}\n'.format(p1)) for ss in range(ns): zline = zarr[ss, ii, :] for jj in range(4): ofid.write('{0:.5e} '.format(zerrmap[jj])) ofid.write('{0:.5e} '.format(zerrmap[jj])) ofid.write('\n') ofid.close() print 'Wrote file to: ' + ofile # write out places where errors are larger than error tolerance errfid = file(os.path.join(os.path.dirname(ofile), 'DataErrorLocations.txt'), 'w') errfid.write('Errors larger than error tolerance of: \n') errfid.write('Zxx={0} Zxy={1} Zyx={2} Zyy={3} \n'.format(zerrmap[0] * zerr, zerrmap[1] * zerr, zerrmap[2] * zerr, zerrmap[3] * zerr)) errfid.write('-' * 20 + '\n') errfid.write('station T=period(s) Zij err=percentage \n') for pfdict in linelst: for kk, ff in enumerate(pfdict['fspot']): if pfdict['fspot'][ff][2] > zerr * 100 * zerrmap[0]: errfid.write(pfdict['station'] + ' T=' + ff + ' Zxx err={0:.3f} \n'.format(pfdict['fspot'][ff][2])) if pfdict['fspot'][ff][3] > zerr * 100 * zerrmap[1]: errfid.write(pfdict['station'] + ' T=' + ff + ' Zxy err={0:.3f} \n'.format(pfdict['fspot'][ff][3])) if pfdict['fspot'][ff][4] > zerr * 100 * zerrmap[2]: errfid.write(pfdict['station'] + ' T=' + ff + ' Zyx err={0:.3f} \n'.format(pfdict['fspot'][ff][4])) if pfdict['fspot'][ff][5] > zerr * 100 * zerrmap[3]: errfid.write(pfdict['station'] + ' T=' + ff + ' Zyy err={0:.3f} \n'.format(pfdict['fspot'][ff][5])) errfid.close() print 'Wrote errors lager than tolerance to: ' print os.path.join(os.path.dirname(ofile), 'DataErrorLocations.txt') return ofile, linelst def writeInit3DFile_wl(outfn, rhostart=100, ncol=5, savepath=None): """ Makes an init3d file for WSINV3D Inputs: outfn = full path to .out file from winglink rhostart = starting homogeneous half space in Ohm-m ncol = number of columns for data to be written in savepath = full path to save the init file Output: ifile = full path to init file """ # create the output filename if savepath is None: ifile = os.path.join(os.path.dirname(outfn), 'init3d') elif savepath.find('.') == -1: ifile = os.path.join(savepath, 'init3d') else: ifile = savepath dx, dy, dz = readWLOutFile(outfn, ncol=ncol) nx = len(dx) ny = len(dy) nz = len(dz) init_modelFH = open(ifile, 'w') init_modelFH.write('#Initial model \n') init_modelFH.write('%i %i %i 1 \n' % (ny, nx, nz)) # write y locations y_string = '' y_counter = 0 for y_idx in range(ny): y_string += '%.3e ' % (dy[y_idx]) y_counter += 1 if y_counter == 8: y_string += '\n' y_counter = 0 if ny % 8: y_string += '\n' init_modelFH.write(y_string) # write x locations x_string = '' x_counter = 0 for x_idx in range(nx): x_string += '%.3e ' % (dx[x_idx]) x_counter += 1 if x_counter == 8: x_string += '\n' x_counter = 0 if nx % 8: x_string += '\n' init_modelFH.write(x_string) # write z locations z_string = '' z_counter = 0 for z_idx in range(nz): z_string += '%.3e ' % (dz[z_idx]) z_counter += 1 if z_counter == 8: z_string += '\n' z_counter = 0 if nz % 8: z_string += '\n' init_modelFH.write(z_string) init_modelFH.write('%i \n' % int(rhostart)) init_modelFH.close() print 'Wrote init file to: ' + ifile return ifile def writeInit3DFile(xgrid, ygrid, zgrid, savepath, reslst=100, title='Initial File for WSINV3D', resmodel=None): """ will write an initial file for wsinv3d. At the moment can only make a layered model that can then be manipulated later. Input for a layered model is in layers which is [(layer1,layer2,resistivity index for reslst)] Note that x is assumed to be S --> N, y is assumed to be W --> E and z is positive downwards. Also, the xgrid, ygrid and zgrid are assumed to be the relative distance between neighboring nodes. This is needed because wsinv3d builds the model from the bottom NW corner assuming the cell width from the init file. Therefore the first line or index=0 is the southern most row of cells, so if you build a model by hand the the layer block will look upside down if you were to picture it in map view. Confusing, perhaps, but that is the way it is. Argumens: ---------- **xgrid** : np.array(nx) block dimensions (m) in the N-S direction. **Note** that the code reads the grid assuming that index=0 is the southern most point. **ygrid** : np.array(ny) block dimensions (m) in the E-W direction. **Note** that the code reads in the grid assuming that index=0 is the western most point. **zgrid** : np.array(nz) block dimensions (m) in the vertical direction. This is positive downwards. **savepath** : string Path to the director where the initial file will be saved as savepath/init3d **reslst** : float or list The start resistivity as a float or a list of resistivities that coorespond to the starting resistivity model **resmodel**. This must be input if you input **resmodel** **title** : string Title that goes into the first line of savepath/init3d **resmodel** : np.array((nx,ny,nz)) Starting resistivity model. Each cell is allocated an integer value that cooresponds to the index value of **reslst**. **Note** again that the modeling code assumes that the first row it reads in is the southern most row and the first column it reads in is the western most column. Similarly, the first plane it reads in is the Earth's surface. Returns: -------- **initfn** : full path to initial file """ if not isinstance(reslst, list): reslst = [reslst] if os.path.isdir(savepath) == True: ifn = os.path.join(savepath, "init3d") else: ifn = os.path.join(savepath) ifid = file(ifn, 'w') ifid.write('# ' + title + '\n'.upper()) ifid.write('{0} {1} {2} {3}\n'.format(xgrid.shape[0], ygrid.shape[0], zgrid.shape[0], len(reslst))) # write S --> N node block for ii, xx in enumerate(xgrid): ifid.write('{0:>12}'.format('{:.1f}'.format(abs(xx)))) if ii != 0 and np.remainder(ii + 1, 5) == 0: ifid.write('\n') elif ii == xgrid.shape[0] - 1: ifid.write('\n') # write W --> E node block for jj, yy in enumerate(ygrid): ifid.write('{0:>12}'.format('{:.1f}'.format(abs(yy)))) if jj != 0 and np.remainder(jj + 1, 5) == 0: ifid.write('\n') elif jj == ygrid.shape[0] - 1: ifid.write('\n') # write top --> bottom node block for kk, zz in enumerate(zgrid): ifid.write('{0:>12}'.format('{:.1f}'.format(abs(zz)))) if kk != 0 and np.remainder(kk + 1, 5) == 0: ifid.write('\n') elif kk == zgrid.shape[0] - 1: ifid.write('\n') # write the resistivity list for ff in reslst: ifid.write('{0:.1f} '.format(ff)) ifid.write('\n') # else: if resmodel is None: ifid.close() else: # get similar layers l1 = 0 layers = [] for zz in range(zgrid.shape[0] - 1): if (resmodel[:, :, zz] == resmodel[:, :, zz + 1]).all() == False: layers.append((l1, zz)) l1 = zz + 1 # need to add on the bottom layers layers.append((l1, zgrid.shape[0] - 1)) # write out the layers from resmodel for ll in layers: ifid.write('{0} {1}\n'.format(ll[0] + 1, ll[1] + 1)) for xx in range(xgrid.shape[0]): for yy in range(ygrid.shape[0]): ifid.write('{0:.0f} '.format(resmodel[xx, yy, ll[0]])) ifid.write('\n') print 'Wrote file to: ' + ifn return ifn def readInit3D(initfn): """ read an initial file and return the pertinent information including grid positions in coordinates relative to the center point (0,0) and starting model. Arguments: ---------- **initfn** : full path to initializing file. Returns: -------- **xgrid** : np.array(nx) array of nodes in S --> N direction **ygrid** : np.array(ny) array of nodes in the W --> E direction **zgrid** : np.array(nz) array of nodes in vertical direction positive downwards **resistivitivityModel** : dictionary dictionary of the starting model with keys as layers **reslst** : list list of resistivity values in the model **titlestr** : string title string """ ifid = file(initfn, 'r') ilines = ifid.readlines() ifid.close() titlestr = ilines[0] # get size of dimensions, remembering that x is N-S, y is E-W, z is + down nsize = ilines[1].strip().split() nx = int(nsize[0]) ny = int(nsize[1]) nz = int(nsize[2]) # initialize empy arrays to put things into xnodes = np.zeros(nx) ynodes = np.zeros(ny) znodes = np.zeros(nz) resmodel = np.zeros((nx, ny, nz)) # get the grid line locations nn = 2 xx = 0 while xx < nx: iline = ilines[nn].strip().split() for xg in iline: xnodes[xx] = float(xg) xx += 1 nn += 1 yy = 0 while yy < ny: iline = ilines[nn].strip().split() for yg in iline: ynodes[yy] = float(yg) yy += 1 nn += 1 zz = 0 while zz < nz: iline = ilines[nn].strip().split() for zg in iline: znodes[zz] = float(zg) zz += 1 nn += 1 # put the grids into coordinates relative to the center of the grid xgrid = xnodes.copy() xgrid[:int(nx / 2)] = -np.array([xnodes[ii:int(nx / 2)].sum() for ii in range(int(nx / 2))]) xgrid[int(nx / 2):] = np.array([xnodes[int(nx / 2):ii + 1].sum() for ii in range(int(nx / 2), nx)]) - xnodes[int(nx / 2)] ygrid = ynodes.copy() ygrid[:int(ny / 2)] = -np.array([ynodes[ii:int(ny / 2)].sum() for ii in range(int(ny / 2))]) ygrid[int(ny / 2):] = np.array([ynodes[int(ny / 2):ii + 1].sum() for ii in range(int(ny / 2), ny)]) - ynodes[int(ny / 2)] zgrid = np.array([znodes[:ii + 1].sum() for ii in range(nz)]) # get the resistivity values reslst = [float(rr) for rr in ilines[nn].strip().split()] nn += 1 # get model iline = ilines[nn].strip().split() if len(iline) == 0 or len(iline) == 1: return xgrid, ygrid, zgrid, reslst, titlestr, resmodel else: while nn < len(ilines): iline = ilines[nn].strip().split() if len(iline) == 2: l1 = int(iline[0]) - 1 l2 = int(iline[1]) nn += 1 xx = 0 elif len(iline) == 0: break else: yy = 0 while yy < ny: resmodel[xx, yy, l1:l2] = int(iline[yy]) # if l1==20: # print nn,xx,yy,l1,l2,iline[yy] yy += 1 xx += 1 nn += 1 return xgrid, ygrid, zgrid, reslst, titlestr, resmodel, xnodes, ynodes, znodes def writeStartupFile(datafn, initialfn=None, outputfn=None, savepath=None, apriorfn=None, modells=[5, 0.3, 0.3, 0.3], targetrms=1.0, control=None, maxiter=10, errortol=None, staticfn=None, lagrange=None): """ makes a startup file for WSINV3D t. Most of these parameters are not input Inputs: datafn = full path to the data file written for inversion initialfn = full path to init file outputfn = output stem to which the _model and _resp will be written savepath = full path to save the startup file to apriorfn = full path to apriori model modells = smoothing parameters targetrms = target rms control = something maxiter = maximum number of iterations errotol = error tolerance for the computer? staticfn = full path to static shift file name lagrange = starting lagrange multiplier Outputs: sfile = full path to startup file """ # create the output filename if savepath is None: sfile = os.path.join(os.path.dirname(datafn), 'startup') elif savepath.find('.') == -1: sfile = os.path.join(savepath, 'startup') else: sfile = savepath sfid = file(sfile, 'w') sfid.write( 'DATA_FILE' + ' ' * 11 + '../' + os.path.basename(datafn) + '\n') if outputfn is None: sfid.write('OUTPUT_FILE' + ' ' * 9 + 'Iter_ \n') else: sfid.write('OUTPUT_FILE' + ' ' * 9 + outputfn + ' \n') if initialfn is None: sfid.write('INITIAL_MODEL_FILE' + ' ' * 2 + '../init3d \n') else: sfid.write('INITIAL_MODEL_FILE' + ' ' * 2 + initialfn + ' \n') if apriorfn is None: sfid.write('PRIOR_MODEL_FILE' + ' ' * 4 + 'default \n') else: sfid.write('PRIOR_MODEL_FILE' + ' ' * 4 + apriorfn + ' \n') if control is None: sfid.write('CONTROL_MODEL_INDEX' + ' ' + 'default \n') else: sfid.write('CONTROL_MODEL_INDEX' + ' ' + control + ' \n') sfid.write('TARGET_RMS' + ' ' * 10 + '{0} \n'.format(targetrms)) sfid.write('MAX_NO_ITERATION' + ' ' * 4 + '{0} \n'.format(maxiter)) sfid.write('MODEL_LENGTH_SCALE' + ' ' * 2 + '{0} {1:.1f} {1:.1f} {1:.1f} \n'.format(modells[0], modells[1], modells[2], modells[3])) if lagrange is None: sfid.write('LAGRANGE_INFO' + ' ' * 7 + 'default \n') else: sfid.write('LAGRANGE_INFO' + ' ' * 7 + lagrange + ' \n') if errortol is None: sfid.write('ERROR_TOL_LEVEL' + ' ' * 5 + 'default \n') else: sfid.write('ERROR_TOL_LEVEL' + ' ' * 5 + errortol + ' \n') if staticfn is None: sfid.write('STATIC_FILE' + ' ' * 9 + 'default \n') else: sfid.write('STATIC_FILE' + ' ' * 9 + staticfn + ' \n') sfid.close() print 'Wrote startup file to: ' + sfile return sfile def readDataFile(datafn, sitesfn=None, units='mv'): """ read in data file Inputs: datafn = full path to data file sitesfn = full path to sites file output by winglink units = 'mv' always Outputs: period = list of periods used for the inversion zarr = array of impedance values (number of staitons x number of periods x 2 x 2) zerr = array of errors in impedance component nsarr = station locations relative distance from center of grid in N-S ewarr = station locations relative distance from center of grid in E-W sitelst = list of sites used in data """ if units == 'mv': zconv = 796. else: zconv = 1 dfid = file(datafn, 'r') dlines = dfid.readlines() # get size number of stations, number of frequencies, number of Z # components ns, nf, nz = np.array(dlines[0].strip().split(), dtype='int') nsstart = 2 findlst = [] for ii, dline in enumerate(dlines[1:50], 1): if dline.find('Station_Location: N-S') == 0: findlst.append(ii) elif dline.find('Station_Location: E-W') == 0: findlst.append(ii) elif dline.find('DATA_Period:') == 0: findlst.append(ii) ncol = len(dlines[nsstart].strip().split()) # print ncol # nsstop=nsstart+ns/ncol+1 # ewstart=nsstop+1 # ewstop=ewstart+ns/ncol+1 # zstart=ewstop # print nsstop,ewstart,ewstop,zstart # get site names if entered a sites file if sitesfn is not None: slst, sitelst = readSitesFile(sitesfn) else: sitelst = np.arange(ns) # get N-S locations nsarr = np.zeros(ns) for ii, dline in enumerate(dlines[findlst[0] + 1:findlst[1]], 0): dline = dline.strip().split() for jj in range(ncol): try: nsarr[ii * ncol + jj] = float(dline[jj]) except IndexError: pass except ValueError: break # get E-W locations ewarr = np.zeros(ns) for ii, dline in enumerate(dlines[findlst[1] + 1:findlst[2]], 0): dline = dline.strip().split() for jj in range(8): try: ewarr[ii * ncol + jj] = float(dline[jj]) except IndexError: pass except ValueError: break # make some empty array to put stuff into period = np.zeros(nf) zarr = np.zeros((ns, nf, 2, 2), dtype=np.complex) zerr = np.zeros_like(zarr) zerrmap = np.zeros_like(zarr) # get data pcount = 0 zcount = 0 for ii, dl in enumerate(dlines[findlst[2]:findlst[2] + nf * (ns + 1)]): if dl.find('DATA_Period') == 0: period[pcount] = float(dl.strip().split()[1]) kk = 0 pcount += 1 if ii == 0: pass else: zcount += 1 else: zline = np.array(dl.strip().split(), dtype=np.float) * zconv zarr[kk, zcount, :, :] = np.array([[zline[0] - 1j * zline[1], zline[2] - 1j * zline[3]], [zline[4] - 1j * zline[5], zline[6] - 1j * zline[7]]]) kk += 1 # if the data file is made from this program or is the input data file than # get the errors from that file if len(dlines) > 2 * nf * ns: print 'Getting Error' pecount = 0 zecount = 0 for ii, dl in enumerate( dlines[findlst[2] + nf * (ns + 1):findlst[2] + 2 * nf * (ns + 1)]): if dl.find('ERROR_Period') == 0: kk = 0 pecount += 1 if ii == 0: pass else: zecount += 1 else: zline = np.array(dl.strip().split(), dtype=np.float) * zconv zerr[kk, zecount, :, :] = np.array([[zline[0] - 1j * zline[1], zline[2] - 1j * zline[3]], [zline[4] - 1j * zline[5], zline[6] - 1j * zline[7]]]) kk += 1 # get errormap values if len(dlines) > 3 * nf * ns: print 'Getting Error Map' pmcount = 0 zmcount = 0 for ii, dl in enumerate( dlines[findlst[2] + 2 * nf * (ns + 1):findlst[2] + 3 * nf * (ns + 1)]): if dl.find('ERMAP_Period') == 0: kk = 0 pmcount += 1 if ii == 0: pass else: zmcount += 1 else: # account for end of file empty lines if len(dl.split()) > 2: zline = np.array(dl.strip().split(), dtype=np.float) zerrmap[kk, zmcount, :, :] = np.array([[zline[0] - 1j * zline[1], zline[2] - 1j * zline[3]], [zline[4] - 1j * zline[5], zline[6] - 1j * zline[7]]]) kk += 1 # multiply errmap and error and convert from Ohm to mv/km nT zerr = zerr * zerrmap return period, zarr, zerr, nsarr, ewarr, sitelst def plotDataResPhase(datafn, respfn=None, sitesfn=None, plottype='1', plotnum=1, dpi=150, units='mv', colormode='color'): """ plot responses from the data file and if there is a response file Inputs: datafn = fullpath to data file respfn = full path to respsonse file, if not input, just the data is plotted. Can be a list of response files from the same inversion plottype= '1' to plot each station in a different window [stations] for list of stations to plot (stations are numbers) plotnum = 1 for just xy,yx 2 for all components """ # plot in color mode or black and white if colormode == 'color': # color for data cted = (0, 0, 1) ctmd = (1, 0, 0) mted = '*' mtmd = '*' # color for occam model ctem = (0, .3, 1.0) ctmm = (1, .3, 0) mtem = '+' mtmm = '+' elif colormode == 'bw': # color for data cted = (0, 0, 0) ctmd = (0, 0, 0) mted = '*' mtmd = 'v' # color for occam model ctem = (0.6, .6, .6) ctmm = (.6, .6, .6) mtem = '+' mtmm = 'x' # load the data file period, dz, dzerr, north, east, slst = readDataFile(datafn, sitesfn=sitesfn, units=units) # get shape of impedance tensors ns, nf = dz.shape[0], dz.shape[1] # read in response files if respfn is not None: rzlst = [] rzerrlst = [] if not isinstance(respfn, list): respfn = [respfn] for rfile in respfn: period, rz, rzerr, north, east, slst = readDataFile(rfile, sitesfn=sitesfn, units=units) rzlst.append(rz) rzerrlst.append(rzerr) else: rzlst = [] # get number of response files nr = len(rzlst) if isinstance(plottype, list): ns = len(plottype) plt.rcParams['font.size'] = 10 plt.rcParams['figure.subplot.left'] = .13 plt.rcParams['figure.subplot.right'] = .98 plt.rcParams['figure.subplot.bottom'] = .1 plt.rcParams['figure.subplot.top'] = .92 plt.rcParams['figure.subplot.wspace'] = .25 plt.rcParams['figure.subplot.hspace'] = .05 fontdict = {'size': 12, 'weight': 'bold'} gs = gridspec.GridSpec(2, 2, height_ratios=[2, 1.5], hspace=.1) if plottype != '1': pstationlst = [] if not isinstance(plottype, list): plottype = [plottype] for ii, station in enumerate(slst): if isinstance(station, str): for pstation in plottype: if station.find(str(pstation)) >= 0: pstationlst.append(ii) else: for pstation in plottype: if station == int(pstation): pstationlst.append(ii) else: pstationlst = np.arange(ns) for jj in pstationlst: print 'Plotting: ' + str(slst[jj]) # check for masked points dz[jj][np.where(dz[jj] == 7.95204E5 - 7.95204E5j)] = 0.0 + 0.0j dzerr[jj][np.where(dz[jj] == 7.95204E5 - 7.95204E5j)] = 1.0 + 1.0j # convert to apparent resistivity and phase rp = Z.ResPhase(dz[jj], period, zvar=dzerr[jj]) # find locations where points have been masked nzxx = np.where(rp.resxx != 0)[0] nzxy = np.where(rp.resxy != 0)[0] nzyx = np.where(rp.resyx != 0)[0] nzyy = np.where(rp.resyy != 0)[0] if respfn is not None: plotr = True else: plotr = False # make figure for xy,yx components if plotnum == 1: fig = plt.figure(jj, [10, 12], dpi=dpi) gs.update(hspace=.1, wspace=.15, left=.1) elif plotnum == 2: fig = plt.figure(jj, [12, 12], dpi=dpi) gs.update(hspace=.1, wspace=.15, left=.07) #---------plot the apparent resistivity-------------------------------- if plotnum == 1: ax = fig.add_subplot(gs[0, :]) ax2 = fig.add_subplot(gs[1, :], sharex=ax) ax.yaxis.set_label_coords(-.055, 0.5) ax2.yaxis.set_label_coords(-.055, 0.5) elif plotnum == 2: ax = fig.add_subplot(gs[0, 0]) ax2 = fig.add_subplot(gs[1, 0], sharex=ax) ax.yaxis.set_label_coords(-.075, 0.5) ax2.yaxis.set_label_coords(-.075, 0.5) fig.suptitle(str(slst[jj]), fontdict={'size': 15, 'weight': 'bold'}) erxy = ax.errorbar(period[nzxy], rp.resxy[nzxy], marker=mted, ms=4, mfc='None', mec=cted, mew=1, ls=':', yerr=rp.resxyerr[nzxy], ecolor=cted, color=cted) eryx = ax.errorbar(period[nzyx], rp.resyx[nzyx], marker=mtmd, ms=4, mfc='None', mec=ctmd, mew=1, ls=':', yerr=rp.resyxerr[nzyx], ecolor=ctmd, color=ctmd) if plotr == True: for rr in range(nr): if colormode == 'color': cxy = (0, .4 + float(rr) / (3 * nr), 0) cyx = (.7 + float(rr) / (4 * nr), .13, .63 - float(rr) / (4 * nr)) elif colormode == 'bw': cxy = (1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.)) cyx = (1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.)) rpr = Z.ResPhase(rzlst[rr][jj], period, zvar=rzerrlst[rr][jj]) # rms=np.sqrt(np.sum([abs(np.linalg.det(rp.z[ll])- # np.linalg.det(rpr.z[ll]))**2 # for ll in range(len(rp.period))])/len(rp.period)) rms = np.sqrt(np.mean([(np.sqrt(abs(np.linalg.det(rp.z[ll]))) - np.sqrt(abs(np.linalg.det(rpr.z[ll]))))**2 for ll in range(len(rp.period))])) print 'RMS = {:.2f}'.format(rms) erxyr = ax.errorbar(period[nzxy], rpr.resxy[nzxy], marker=mtem, ms=8, mfc='None', mec=cxy, mew=1, ls='--', yerr=rpr.resxyerr[nzxy], ecolor=cxy, color=cxy) eryxr = ax.errorbar(period[nzyx], rpr.resyx[nzyx], marker=mtmm, ms=8, mfc='None', mec=cyx, mew=1, ls='--', yerr=rpr.resyxerr[nzyx], ecolor=cyx, color=cyx) #ax.set_xlabel('Period (s)',fontdict=fontdict) pylab.setp(ax.get_xticklabels(), visible=False) ax.set_ylabel('App. Res. ($\mathbf{\Omega \cdot m}$)', fontdict=fontdict) ax.set_yscale('log') ax.set_xscale('log') ax.set_xlim(xmin=10**(np.floor(np.log10(period[0]))), xmax=10**(np.ceil(np.log10(period[-1])))) ax.grid(True, alpha=.25) if plotr == True: ax.legend((erxy[0], eryx[0], erxyr[0], eryxr[0]), ('Data $E_x/B_y$', 'Data $E_y/B_x$', 'Mod $E_x/B_y$', 'Mod $E_y/B_x$'), loc=0, markerscale=1, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.02) else: ax.legend((erxy[0], eryx[0]), ('$E_x/B_y$', '$E_y/B_x$'), loc=0, markerscale=1, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.02) #-----Plot the phase--------------------------------------------------- ax2.errorbar(period[nzxy], rp.phasexy[nzxy], marker=mted, ms=4, mfc='None', mec=cted, mew=1, ls=':', yerr=rp.phasexyerr[nzxy], ecolor=cted, color=cted) ax2.errorbar(period[nzyx], np.array(rp.phaseyx[nzyx]) + 180, marker=mtmd, ms=4, mfc='None', mec=ctmd, mew=1, ls=':', yerr=rp.phaseyxerr[nzyx], ecolor=ctmd, color=ctmd) if plotr == True: for rr in range(nr): if colormode == 'color': cxy = (0, .4 + float(rr) / (3 * nr), 0) cyx = (.7 + float(rr) / (4 * nr), .13, .63 - float(rr) / (4 * nr)) elif colormode == 'bw': cxy = (1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.)) cyx = (1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.)) rpr = Z.ResPhase(rzlst[rr][jj], period, zvar=rzerrlst[rr][jj]) ax2.errorbar(period[nzxy], rpr.phasexy[nzxy], marker=mtem, ms=8, mfc='None', mec=cxy, mew=1, ls='--', yerr=rp.phasexyerr[nzxy], ecolor=cxy, color=cxy) ax2.errorbar(period[nzyx], np.array(rpr.phaseyx[nzyx]) + 180, marker=mtmm, ms=8, mfc='None', mec=cyx, mew=1, ls='--', yerr=rp.phaseyxerr[nzyx], ecolor=cyx, color=cyx) ax2.set_xlabel('Period (s)', fontdict) ax2.set_ylabel('Phase (deg)', fontdict) ax2.set_xscale('log') # ax2.set_xlim(xmin=10**(np.floor(np.log10(period[0]))), # xmax=10**(np.ceil(np.log10(period[-1])))) # check the phase to see if any point are outside of [0:90] if min(rp.phasexy) < 0 or min(rp.phaseyx + 180) < 0: pymin = min([min(rp.phasexy), min(rp.phaseyx + 180)]) if pymin > 0: pymin = 0 else: pymin = 0 if max(rp.phasexy) > 90 or max(rp.phaseyx + 180) > 90: pymax = min([max(rp.phasexy), max(rp.phaseyx + 180)]) if pymax < 91: pymax = 90 else: pymax = 90 ax2.set_ylim(ymin=pymin, ymax=pymax) ax2.yaxis.set_major_locator(MultipleLocator(30)) ax2.yaxis.set_minor_locator(MultipleLocator(1)) ax2.grid(True, alpha=.25) if plotnum == 2: #---------plot the apparent resistivity---------------------------- ax3 = plt.subplot(gs[0, 1]) ax3.yaxis.set_label_coords(-.1, 0.5) erxx = ax3.errorbar(period[nzxx], rp.resxx[nzxx], marker=mted, ms=4, mfc='None', mec=cted, mew=1, ls=':', yerr=rp.resxxerr[nzxx], ecolor=cted, color=cted) eryy = ax3.errorbar(period[nzyy], rp.resyy[nzyy], marker=mtmd, ms=4, mfc='None', mec=ctmd, mew=1, ls=':', yerr=rp.resyyerr[nzyy], ecolor=ctmd, color=ctmd) if plotr == True: for rr in range(nr): if colormode == 'color': cxy = (0, .4 + float(rr) / (3 * nr), 0) cyx = (.7 + float(rr) / (4 * nr), .13, .63 - float(rr) / (4 * nr)) elif colormode == 'bw': cxy = (1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.)) cyx = (1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.)) rpr = Z.ResPhase( rzlst[rr][jj], period, zvar=rzerrlst[rr][jj]) erxxr = ax3.errorbar(period[nzxx], rpr.resxx[nzxx], marker=mtem, ms=8, mfc='None', mec=cxy, mew=1, ls='--', yerr=rpr.resxxerr[nzxx], ecolor=cxy, color=cxy) eryyr = ax3.errorbar(period[nzyy], rpr.resyy[nzyy], marker=mtmm, ms=8, mfc='None', mec=cyx, mew=1, ls='--', yerr=rpr.resyyerr[nzyy], ecolor=cyx, color=cyx) ax3.set_yscale('log') ax3.set_xscale('log') pylab.setp(ax3.get_xticklabels(), visible=False) ax3.set_xlim(xmin=10**(np.floor(np.log10(period[0]))), xmax=10**(np.ceil(np.log10(period[-1])))) ax3.grid(True, alpha=.25) if plotr == True: ax3.legend((erxx[0], eryy[0], erxxr[0], eryyr[0]), ('Data $E_x/B_x$', 'Data $E_y/B_y$', 'Mod $E_x/B_x$', 'Mod $E_y/B_y$'), loc=0, markerscale=1, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.02) else: ax3.legend((erxx[0], eryy[0]), ('$E_x/B_x$', '$E_y/B_y$'), loc=0, markerscale=1, borderaxespad=.01, labelspacing=.07, handletextpad=.2, borderpad=.02) #-----Plot the phase----------------------------------------------- ax4 = plt.subplot(gs[1, 1], sharex=ax3) ax4.yaxis.set_label_coords(-.1, 0.5) ax4.errorbar(period[nzxx], rp.phasexx[nzxx], marker=mted, ms=4, mfc='None', mec=cted, mew=1, ls=':', yerr=rp.phasexxerr[nzxx], ecolor=cted, color=cted) ax4.errorbar(period[nzyy], np.array(rp.phaseyy[nzyy]), marker=mtmd, ms=4, mfc='None', mec=ctmd, mew=1, ls=':', yerr=rp.phaseyyerr[nzyy], ecolor=ctmd, color=ctmd) if plotr == True: for rr in range(nr): if colormode == 'color': cxy = (0, .4 + float(rr) / (3 * nr), 0) cyx = (.7 + float(rr) / (4 * nr), .13, .63 - float(rr) / (4 * nr)) elif colormode == 'bw': cxy = (1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.)) cyx = (1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.), 1 - 1.25 / (rr + 2.)) rpr = Z.ResPhase( rzlst[rr][jj], period, zvar=rzerrlst[rr][jj]) ax4.errorbar(period[nzxx], rpr.phasexx[nzxx], marker=mtem, ms=8, mfc='None', mec=cxy, mew=1, ls='--', yerr=rp.phasexxerr[nzxx], ecolor=cxy, color=cxy) ax4.errorbar(period[nzyy], np.array(rpr.phaseyy[nzyy]), marker=mtmm, ms=8, mfc='None', mec=cyx, mew=1, ls='--', yerr=rp.phaseyyerr[nzyy], ecolor=cyx, color=cyx) ax4.set_xlabel('Period (s)', fontdict) #ax4.set_ylabel('Imepdance Phase (deg)',fontdict) ax4.set_xscale('log') # ax2.set_xlim(xmin=10**(np.floor(np.log10(period[0]))), # xmax=10**(np.ceil(np.log10(period[-1])))) ax4.set_ylim(ymin=-180, ymax=180) ax4.yaxis.set_major_locator(MultipleLocator(30)) ax4.yaxis.set_minor_locator(MultipleLocator(5)) ax4.grid(True, alpha=.25) def plotTensorMaps(datafn, respfn=None, sitesfn=None, periodlst=None, esize=(1, 1, 5, 5), ecolor='phimin', colormm=[(0, 90), (0, 1), (0, 4), (-2, 2)], xpad=.500, units='mv', dpi=150): """ plot phase tensor maps for data and or response, each figure is of a different period. If response is input a third column is added which is the residual phase tensor showing where the model is not fitting the data well. The data is plotted in km in units of ohm-m. Inputs: datafn = full path to data file respfn = full path to response file, if none just plots data sitesfn = full path to sites file periodlst = indicies of periods you want to plot esize = size of ellipses as: 0 = phase tensor ellipse 1 = phase tensor residual 2 = resistivity tensor ellipse 3 = resistivity tensor residual ecolor = 'phimin' for coloring with phimin or 'beta' for beta coloring colormm = list of min and max coloring for plot, list as follows: 0 = phase tensor min and max for ecolor in degrees 1 = phase tensor residual min and max [0,1] 2 = resistivity tensor coloring as resistivity on log scale 3 = resistivity tensor residual coloring as resistivity on linear scale xpad = padding of map from stations at extremities (km) units = 'mv' to convert to Ohm-m dpi = dots per inch of figure """ period, zd, zderr, nsarr, ewarr, sitelst = readDataFile(datafn, sitesfn=sitesfn, units=units) if respfn is not None: period, zr, zrerr, nsarr, ewarr, sitelst = readDataFile(respfn, sitesfn=sitesfn, units=units) if periodlst is None: periodlst = range(len(period)) # put locations into an logical coordinate system in km nsarr = -nsarr / 1000 ewarr = -ewarr / 1000 # get coloring min's and max's if colormm is not None: ptmin, ptmax = (colormm[0][0] * np.pi / 180, colormm[0][1] * np.pi / 180) ptrmin, ptrmax = colormm[1] rtmin, rtmax = colormm[2] rtrmin, rtrmax = colormm[3] else: pass # get ellipse sizes ptsize = esize[0] ptrsize = esize[1] rtsize = esize[2] rtrsize = esize[3] plt.rcParams['font.size'] = 10 plt.rcParams['figure.subplot.left'] = .03 plt.rcParams['figure.subplot.right'] = .98 plt.rcParams['figure.subplot.bottom'] = .1 plt.rcParams['figure.subplot.top'] = .90 plt.rcParams['figure.subplot.wspace'] = .005 plt.rcParams['figure.subplot.hspace'] = .005 ns = zd.shape[0] for ff, per in enumerate(periodlst): print 'Plotting Period: {0:.5g}'.format(period[per]) fig = plt.figure(per + 1, dpi=dpi) # get phase tensor pt = Z.PhaseTensor(zd[:, per]) # get resistivity tensor rt = Z.ResistivityTensor(zd[:, per], np.repeat(1. / period[per], ns)) if respfn is not None: # get phase tensor and residual phase tensor ptr = Z.PhaseTensor(zr[:, per]) ptd = Z.PhaseTensorResidual(zd[:, per], zr[:, per]) # get resistivity tensor and residual rtr = Z.ResistivityTensor( zr[:, per], np.repeat(1. / period[per], ns)) rtd = Z.ResistivityTensorResidual(zd[:, per], zr[:, per], np.repeat(1. / period[per], ns)) if colormm is None: if ecolor == 'phimin': ptmin, ptmax = (ptr.phimin.min() / (np.pi / 2), ptr.phimin.max() / (np.pi / 2)) elif ecolor == 'beta': ptmin, ptmax = (ptr.beta.min(), ptr.beta.max()) ptrmin, ptrmax = (ptd.ecolor.min(), ptd.ecolor.max()) rtmin, rtmax = (np.log10(rtr.rhodet.min()), np.log10(rtr.rhodet.max())) rtrmin, rtrmax = rtd.rhodet.min(), rtd.rhodet.max() # make subplots ax1 = fig.add_subplot(2, 3, 1, aspect='equal') ax2 = fig.add_subplot(2, 3, 2, aspect='equal') ax3 = fig.add_subplot(2, 3, 3, aspect='equal') ax4 = fig.add_subplot(2, 3, 4, aspect='equal') ax5 = fig.add_subplot(2, 3, 5, aspect='equal') ax6 = fig.add_subplot(2, 3, 6, aspect='equal') for jj in range(ns): #-----------plot data phase tensors--------------- eheightd = pt.phimin[jj] / ptr.phimax.max() * ptsize ewidthd = pt.phimax[jj] / ptr.phimax.max() * ptsize ellipd = Ellipse((ewarr[jj], nsarr[jj]), width=ewidthd, height=eheightd, angle=pt.azimuth[jj]) # color ellipse: if ecolor == 'phimin': cvar = (pt.phimin[jj] / (np.pi / 2) - ptmin) / (ptmax - ptmin) if abs(cvar) > 1: ellipd.set_facecolor((1, 0, .1)) else: ellipd.set_facecolor((1, 1 - abs(cvar), .1)) if ecolor == 'beta': cvar = (abs(pt.beta[jj]) - ptmin) / (ptmax - ptmin) if abs(cvar) > 1: ellipd.set_facecolor((1, 1, .1)) else: ellipd.set_facecolor((1 - cvars, 1 - cvars, 1)) ax1.add_artist(ellipd) #----------plot response phase tensors--------------------- eheightr = ptr.phimin[jj] / ptr.phimax.max() * ptsize ewidthr = ptr.phimax[jj] / ptr.phimax.max() * ptsize ellipr = Ellipse((ewarr[jj], nsarr[jj]), width=ewidthr, height=eheightr, angle=ptr.azimuth[jj]) # color ellipse: if ecolor == 'phimin': cvar = (ptr.phimin[jj] / (np.pi / 2) - ptmin) / (ptmax - ptmin) if abs(cvar) > 1: ellipr.set_facecolor((1, 0, .1)) else: ellipr.set_facecolor((1, 1 - abs(cvar), .1)) if ecolor == 'beta': cvar = (abs(ptr.beta[jj]) - ptmin) / (ptmax - ptmin) if abs(cvar) > 1: ellipr.set_facecolor((1, 1, .1)) else: ellipr.set_facecolor((1 - cvars, 1 - cvars, 1)) ax2.add_artist(ellipr) #--------plot residual phase tensors------------- eheight = ptd.phimin[jj] / ptd.phimax.max() * ptrsize ewidth = ptd.phimax[jj] / ptd.phimax.max() * ptrsize ellip = Ellipse((ewarr[jj], nsarr[jj]), width=ewidth, height=eheight, angle=ptd.azimuth[jj] - 90) # color ellipse: cvar = (ptd.ecolor[jj] - ptrmin) / (ptrmax - ptrmin) if abs(cvar) > 1: ellip.set_facecolor((0, 0, 0)) else: ellip.set_facecolor((abs(cvar), .5, .5)) ax3.add_artist(ellip) #-----------plot data resistivity tensors--------------- rheightd = rt.rhomin[jj] / rtr.rhomax.max() * rtsize rwidthd = rt.rhomax[jj] / rtr.rhomax.max() * rtsize rellipd = Ellipse((ewarr[jj], nsarr[jj]), width=rwidthd, height=rheightd, angle=rt.rhoazimuth[jj]) # color ellipse: cvar = (np.log10(rt.rhodet[jj]) - rtmin) / (rtmax - rtmin) if cvar > .5: if cvar > 1: rellipd.set_facecolor((0, 0, 1)) else: rellipd.set_facecolor( (1 - abs(cvar), 1 - abs(cvar), 1)) else: if cvar < -1: rellipd.set_facecolor((1, 0, 0)) else: rellipd.set_facecolor( (1, 1 - abs(cvar), 1 - abs(cvar))) ax4.add_artist(rellipd) #----------plot response resistivity tensors------------------- rheightr = rtr.rhomin[jj] / rtr.rhomax.max() * rtsize rwidthr = rtr.rhomax[jj] / rtr.rhomax.max() * rtsize rellipr = Ellipse((ewarr[jj], nsarr[jj]), width=rwidthr, height=rheightr, angle=rtr.rhoazimuth[jj]) # color ellipse: cvar = (np.log10(rtr.rhodet[jj]) - rtmin) / (rtmax - rtmin) if cvar > .5: if cvar > 1: rellipr.set_facecolor((0, 0, 1)) else: rellipr.set_facecolor( (1 - abs(cvar), 1 - abs(cvar), 1)) else: if cvar < -1: rellipr.set_facecolor((1, 0, 0)) else: rellipr.set_facecolor( (1, 1 - abs(cvar), 1 - abs(cvar))) ax5.add_artist(rellipr) #--------plot residual resistivity tensors------------- rheight = rtd.rhomin[jj] / rtd.rhomax.max() * rtrsize rwidth = rtd.rhomax[jj] / rtd.rhomax.max() * rtrsize rellip = Ellipse((ewarr[jj], nsarr[jj]), width=rwidth, height=rheight, angle=rtd.azimuth[jj] - 90) # color ellipse: cvar = (rtd.rhodet[jj] - rtrmin) / (rtrmax - rtrmin) if cvar < 0: if cvar < -1: rellip.set_facecolor((0, 0, 1)) else: rellip.set_facecolor((1 - abs(cvar), 1 - abs(cvar), 1)) else: if cvar > 1: rellip.set_facecolor((1, 0, 0)) else: rellip.set_facecolor((1, 1 - abs(cvar), 1 - abs(cvar))) ax6.add_artist(rellip) for aa, ax in enumerate([ax1, ax2, ax3, ax4, ax5, ax6]): ax.set_xlim(ewarr.min() - xpad, ewarr.max() + xpad) ax.set_ylim(nsarr.min() - xpad, nsarr.max() + xpad) ax.grid('on') if aa < 3: pylab.setp(ax.get_xticklabels(), visible=False) if aa == 0 or aa == 3: pass else: pylab.setp(ax.get_yticklabels(), visible=False) cbax = mcb.make_axes( ax, shrink=.9, pad=.05, orientation='vertical') if aa == 0 or aa == 1: cbx = mcb.ColorbarBase(cbax[0], cmap=ptcmap, norm=Normalize(vmin=ptmin * 180 / np.pi, vmax=ptmax * 180 / np.pi), orientation='vertical', format='%.2g') cbx.set_label('Phase (deg)', fontdict={'size': 7, 'weight': 'bold'}) if aa == 2: cbx = mcb.ColorbarBase(cbax[0], cmap=ptcmap2, norm=Normalize(vmin=ptrmin, vmax=ptrmax), orientation='vertical', format='%.2g') cbx.set_label('$\Delta_{\Phi}$', fontdict={'size': 7, 'weight': 'bold'}) if aa == 3 or aa == 4: cbx = mcb.ColorbarBase(cbax[0], cmap=rtcmapr, norm=Normalize(vmin=10**rtmin, vmax=10**rtmax), orientation='vertical', format='%.2g') cbx.set_label('App. Res. ($\Omega \cdot$m)', fontdict={'size': 7, 'weight': 'bold'}) if aa == 5: cbx = mcb.ColorbarBase(cbax[0], cmap=rtcmap, norm=Normalize(vmin=rtrmin, vmax=rtrmax), orientation='vertical', format='%.2g') cbx.set_label('$\Delta_{rho}$', fontdict={'size': 7, 'weight': 'bold'}) plt.show() #----Plot Just the data------------------ else: if colormm is None: if ecolor == 'phimin': ptmin, ptmax = (pt.phimin.min() / (np.pi / 2), pt.phimin.max() / (np.pi / 2)) elif ecolor == 'beta': ptmin, ptmax = (pt.beta.min(), pt.beta.max()) rtmin, rtmax = (np.log10(rt.rhodet.min()), np.log10(rt.rhodet.max())) ax1 = fig.add_subplot(1, 2, 1, aspect='equal') ax2 = fig.add_subplot(1, 2, 2, aspect='equal') for jj in range(ns): #-----------plot data phase tensors--------------- # check for nan in the array cause it messes with the max pt.phimax = np.nan_to_num(pt.phimax) # scale the ellipse eheightd = pt.phimin[jj] / pt.phimax.max() * ptsize ewidthd = pt.phimax[jj] / pt.phimax.max() * ptsize # make the ellipse ellipd = Ellipse((ewarr[jj], nsarr[jj]), width=ewidthd, height=eheightd, angle=pt.azimuth[jj]) # color ellipse: if ecolor == 'phimin': cvar = (pt.phimin[jj] / (np.pi / 2) - ptmin) / (ptmax - ptmin) if abs(cvar) > 1: ellipd.set_facecolor((1, 0, .1)) else: ellipd.set_facecolor((1, 1 - abs(cvar), .1)) if ecolor == 'beta': cvar = (abs(pt.beta[jj]) - ptmin) / (ptmax - ptmin) if abs(cvar) > 1: ellipd.set_facecolor((1, 1, .1)) else: ellipd.set_facecolor((1 - cvars, 1 - cvars, 1)) ax1.add_artist(ellipd) #-----------plot data resistivity tensors--------------- rt.rhomax = np.nan_to_num(rt.rhomax) rheightd = rt.rhomin[jj] / rt.rhomax.max() * rtsize rwidthd = rt.rhomax[jj] / rt.rhomax.max() * rtsize rellipd = Ellipse((ewarr[jj], nsarr[jj]), width=rwidthd, height=rheightd, angle=rt.rhoazimuth[jj]) # color ellipse: cvar = (np.log10(rt.rhodet[jj]) - rtmin) / (rtmax - rtmin) if cvar > .5: if cvar > 1: rellipd.set_facecolor((0, 0, 1)) else: rellipd.set_facecolor( (1 - abs(cvar), 1 - abs(cvar), 1)) else: if cvar < -1: rellipd.set_facecolor((1, 0, 0)) else: rellipd.set_facecolor( (1, 1 - abs(cvar), 1 - abs(cvar))) ax2.add_artist(rellipd) for aa, ax in enumerate([ax1, ax2]): ax.set_xlim(ewarr.min() - xpad, ewarr.max() + xpad) ax.set_ylim(nsarr.min() - xpad, nsarr.max() + xpad) ax.grid('on') ax.set_xlabel('easting (km)', fontdict={'size': 10, 'weight': 'bold'}) if aa == 1: pylab.setp(ax.get_yticklabels(), visible=False) else: ax.set_ylabel('northing (km)', fontdict={'size': 10, 'weight': 'bold'}) # cbax=mcb.make_axes(ax,shrink=.8,pad=.15,orientation='horizontal', # anchor=(.5,1)) # l,b,w,h # cbax=fig.add_axes([.1,.95,.35,.05]) if aa == 0: cbax = fig.add_axes([.12, .97, .31, .02]) cbx = mcb.ColorbarBase(cbax, cmap=ptcmap, norm=Normalize(vmin=ptmin * 180 / np.pi, vmax=ptmax * 180 / np.pi), orientation='horizontal', format='%.2g') cbx.set_label('Phase (deg)', fontdict={'size': 7, 'weight': 'bold'}) if aa == 1: cbax = fig.add_axes([.59, .97, .31, .02]) cbx = mcb.ColorbarBase(cbax, cmap=rtcmapr, norm=Normalize(vmin=10**rtmin, vmax=10**rtmax), orientation='horizontal', format='%.2g') cbx.set_label('App. Res. ($\Omega \cdot$m)', fontdict={'size': 7, 'weight': 'bold'}) cbx.set_ticks((10**rtmin, 10**rtmax)) plt.show() def readModelFile(mfile, ncol=7): """ read in a model file as x-north, y-east, z-positive down """ mfid = file(mfile, 'r') mlines = mfid.readlines() # get info at the beggining of file info = mlines[0].strip().split() infodict = dict( [(info[0][1:], info[1]), (info[2], info[3]), (info[4], info[5])]) # get lengths of things nx, ny, nz, nn = np.array(mlines[1].strip().split(), dtype=np.int) # make empty arrays to put stuff into xarr = np.zeros(nx) yarr = np.zeros(ny) zarr = np.zeros(nz) resarr = np.zeros((nx, ny, nz)) mm = 0 nn = 2 while mm < nx: xline = mlines[nn].strip().split() for xx in xline: xarr[mm] = float(xx) mm += 1 nn += 1 mm = 0 while mm < ny: yline = mlines[nn].strip().split() for yy in yline: yarr[mm] = float(yy) mm += 1 nn += 1 mm = 0 while mm < nz: zline = mlines[nn].strip().split() for zz in zline: zarr[mm] = float(zz) mm += 1 nn += 1 # put the grids into coordinates relative to the center of the grid nsarr = xarr.copy() nsarr[:int(nx / 2)] = -np.array([xarr[ii:int(nx / 2)].sum() for ii in range(int(nx / 2))]) nsarr[int(nx / 2):] = np.array([xarr[int(nx / 2):ii + 1].sum() for ii in range(int(nx / 2), nx)]) - xarr[int(nx / 2)] ewarr = yarr.copy() ewarr[:int(ny / 2)] = -np.array([yarr[ii:int(ny / 2)].sum() for ii in range(int(ny / 2))]) ewarr[int(ny / 2):] = np.array([yarr[int(ny / 2):ii + 1].sum() for ii in range(int(ny / 2), ny)]) - yarr[int(ny / 2)] zdepth = np.array([zarr[0:ii + 1].sum() - zarr[0] for ii in range(nz)]) mm = 0 for kk in range(nz): for jj in range(ny): for ii in range(nx): resarr[(nx - 1) - ii, jj, kk] = float(mlines[nn + mm].strip()) mm += 1 return nsarr, ewarr, zdepth, resarr, infodict def plotDepthSlice(datafn, modelfn, savepath=None, map_scale='km', ew_limits=None, ns_limits=None, depth_index=None, fig_dimensions=[4, 4], dpi=300, font_size=7, climits=(0, 4), cmap='jet_r', plot_grid='n', cb_dict={}): """ plot depth slices """ # create a path to save figure to if it doesn't already exist if savepath is not None: if not os.path.exists(savepath): os.mkdir(savepath) # make map scale if map_scale == 'km': dscale = 1000. elif map_scale == 'm': dscale = 1. # read in data file to station locations period, zz, zzerr, ns, ew, slst = readDataFile(datafn) # scale the station locations to the desired units ns /= dscale ew /= dscale # read in model file x, y, z, resarr, idict = readModelFile(modelfn) # scale the model grid to desired units x /= dscale y /= dscale z /= dscale # create an list of depth slices to plot if depth_index is None: zrange = range(z.shape[0]) elif isinstance(depth_index, int): zrange = [depth_index] elif isinstance(depth_index, list): zrange = depth_index # set the limits of the plot if ew_limits is None: xlimits = (np.floor(ew.min()), np.ceil(ew.max())) else: xlimits = ew_limits if ns_limits is None: ylimits = (np.floor(ns.min()), np.ceil(ns.max())) else: ylimits = ns_limits # make a mesh grid of north and east north1, east1 = np.meshgrid(x, y) fdict = {'size': font_size + 2, 'weight': 'bold'} cblabeldict = {-2: '$10^{-3}$', -1: '$10^{-1}$', 0: '$10^{0}$', 1: '$10^{1}$', 2: '$10^{2}$', 3: '$10^{3}$', 4: '$10^{4}$', 5: '$10^{5}$', 6: '$10^{6}$', 7: '$10^{7}$', 8: '$10^{8}$'} plt.rcParams['font.size'] = font_size for ii in zrange: fig = plt.figure(ii, figsize=fig_dimensions, dpi=dpi) plt.clf() ax1 = fig.add_subplot(1, 1, 1, aspect='equal') ax1.pcolormesh(east1, north1, np.log10(np.rot90(resarr[:, :, ii], 3)), cmap=cmap, vmin=climits[0], vmax=climits[1]) # plot the stations for ee, nn in zip(ew, ns): ax1.text(ee, nn, '*', verticalalignment='center', horizontalalignment='center', fontdict={'size': 5, 'weight': 'bold'}) # set axis properties ax1.set_xlim(xlimits) ax1.set_ylim(ylimits) ax1.xaxis.set_minor_locator(MultipleLocator(100 * 1. / dscale)) ax1.yaxis.set_minor_locator(MultipleLocator(100 * 1. / dscale)) ax1.set_ylabel('Northing (' + map_scale + ')', fontdict=fdict) ax1.set_xlabel('Easting (' + map_scale + ')', fontdict=fdict) ax1.set_title('Depth = {:.3f} '.format(z[ii]) + '(' + map_scale + ')', fontdict=fdict) # plot the grid if desired if plot_grid == 'y': for xx in x: ax1.plot([y.min(), y.max()], [xx, xx], lw=.1, color='k') for yy in y: ax1.plot([yy, yy], [x.min(), x.max()], lw=.1, color='k') # plot the colorbar try: cb_dict['orientation'] except KeyError: cb_dict['orientation'] = 'horizontal' if cb_dict['orientation'] == 'horizontal': try: ax2 = fig.add_axes(cb_dict['position']) except KeyError: ax2 = fig.add_axes((ax1.axes.figbox.bounds[3] - .225, ax1.axes.figbox.bounds[1] + .05, .3, .025)) elif cb_dict['orientation'] == 'vertical': try: ax2 = fig.add_axes(cb_dict['position']) except KeyError: ax2 = fig.add_axes((ax1.axes.figbox.bounds[2] - .15, ax1.axes.figbox.bounds[3] - .21, .025, .3)) cb = mcb.ColorbarBase(ax2, cmap=cmap, norm=Normalize(vmin=climits[0], vmax=climits[1]), orientation=cb_dict['orientation']) if cb_dict['orientation'] == 'horizontal': cb.ax.xaxis.set_label_position('top') cb.ax.xaxis.set_label_coords(.5, 1.3) elif cb_dict['orientation'] == 'vertical': cb.ax.yaxis.set_label_position('right') cb.ax.yaxis.set_label_coords(1.25, .5) cb.ax.yaxis.tick_left() cb.ax.tick_params(axis='y', direction='in') cb.set_label('Resistivity ($\Omega \cdot$m)', fontdict={'size': font_size}) cb.set_ticks(np.arange(climits[0], climits[1] + 1)) cb.set_ticklabels([cblabeldict[cc] for cc in np.arange(climits[0], climits[1] + 1)]) if savepath is not None: fig.savefig(os.path.join(savepath, "Depth_{}_{:.4f}.png".format(ii, z[ii])), dpi=dpi) fig.clear() plt.close() else: pass def computeMemoryUsage(nx, ny, nz, n_stations, n_zelements, n_period): """ compute the memory usage of a model Arguments: ---------- **nx** : int number of cells in N-S direction **ny** : int number of cells in E-W direction **nz** : int number of cells in vertical direction including air layers (7) **n_stations** : int number of stations **n_zelements** : int number of impedence tensor elements either 4 or 8 **n_period** : int number of periods to invert for Returns: -------- **mem_req** : float approximate memory useage in GB """ mem_req = 1.2 * (8 * (n_stations * n_period * n_zelements)**2 + 8 * (nx * ny * nz * n_stations * n_period * n_zelements)) return mem_req * 1E-9
codeparrot/github-code-clean
from __future__ import unicode_literals import calendar import json import logging import os import pycountry import pytz import random import regex import stripe import traceback from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta from decimal import Decimal from django.core.files.storage import default_storage from django.core.urlresolvers import reverse from django.db import models, transaction, connection from django.db.models import Sum, F, Q from django.utils import timezone from django.conf import settings from django.core.exceptions import ValidationError from django.utils.translation import ugettext_lazy as _ from django.utils.text import slugify from django.contrib.auth.models import User, Group from enum import Enum from redis_cache import get_redis_connection from smartmin.models import SmartModel from temba.locations.models import AdminBoundary, BoundaryAlias from temba.nexmo import NexmoClient from temba.utils import analytics, str_to_datetime, get_datetime_format, datetime_to_str, random_string from temba.utils import timezone_to_country_code, languages from temba.utils.cache import get_cacheable_result, get_cacheable_attr, incrby_existing from temba.utils.email import send_template_email from temba.utils.currencies import currency_for_country from twilio.rest import TwilioRestClient from urlparse import urlparse from uuid import uuid4 from .bundles import BUNDLE_MAP, WELCOME_TOPUP_SIZE UNREAD_INBOX_MSGS = 'unread_inbox_msgs' UNREAD_FLOW_MSGS = 'unread_flow_msgs' CURRENT_EXPORT_VERSION = 10 EARLIEST_IMPORT_VERSION = 3 MT_SMS_EVENTS = 1 << 0 MO_SMS_EVENTS = 1 << 1 MT_CALL_EVENTS = 1 << 2 MO_CALL_EVENTS = 1 << 3 ALARM_EVENTS = 1 << 4 ALL_EVENTS = MT_SMS_EVENTS | MO_SMS_EVENTS | MT_CALL_EVENTS | MO_CALL_EVENTS | ALARM_EVENTS FREE_PLAN = 'FREE' TRIAL_PLAN = 'TRIAL' TIER1_PLAN = 'TIER1' TIER2_PLAN = 'TIER2' TIER3_PLAN = 'TIER3' TIER_39_PLAN = 'TIER_39' TIER_249_PLAN = 'TIER_249' TIER_449_PLAN = 'TIER_449' DAYFIRST = 'D' MONTHFIRST = 'M' PLANS = ((FREE_PLAN, _("Free Plan")), (TRIAL_PLAN, _("Trial")), (TIER_39_PLAN, _("Bronze")), (TIER1_PLAN, _("Silver")), (TIER2_PLAN, _("Gold (Legacy)")), (TIER3_PLAN, _("Platinum (Legacy)")), (TIER_249_PLAN, _("Gold")), (TIER_449_PLAN, _("Platinum"))) DATE_PARSING = ((DAYFIRST, "DD-MM-YYYY"), (MONTHFIRST, "MM-DD-YYYY")) APPLICATION_SID = 'APPLICATION_SID' ACCOUNT_SID = 'ACCOUNT_SID' ACCOUNT_TOKEN = 'ACCOUNT_TOKEN' NEXMO_KEY = 'NEXMO_KEY' NEXMO_SECRET = 'NEXMO_SECRET' NEXMO_UUID = 'NEXMO_UUID' TRANSFERTO_ACCOUNT_LOGIN = 'TRANSFERTO_ACCOUNT_LOGIN' TRANSFERTO_AIRTIME_API_TOKEN = 'TRANSFERTO_AIRTIME_API_TOKEN' ORG_STATUS = 'STATUS' SUSPENDED = 'suspended' RESTORED = 'restored' WHITELISTED = 'whitelisted' ORG_LOW_CREDIT_THRESHOLD = 500 ORG_CREDIT_OVER = 'O' ORG_CREDIT_LOW = 'L' ORG_CREDIT_EXPIRING = 'E' # cache keys and TTLs ORG_LOCK_KEY = 'org:%d:lock:%s' ORG_CREDITS_TOTAL_CACHE_KEY = 'org:%d:cache:credits_total' ORG_CREDITS_PURCHASED_CACHE_KEY = 'org:%d:cache:credits_purchased' ORG_CREDITS_USED_CACHE_KEY = 'org:%d:cache:credits_used' ORG_ACTIVE_TOPUP_KEY = 'org:%d:cache:active_topup' ORG_ACTIVE_TOPUP_REMAINING = 'org:%d:cache:credits_remaining:%d' ORG_CREDIT_EXPIRING_CACHE_KEY = 'org:%d:cache:credits_expiring_soon' ORG_LOW_CREDIT_THRESHOLD_CACHE_KEY = 'org:%d:cache:low_credits_threshold' ORG_LOCK_TTL = 60 # 1 minute ORG_CREDITS_CACHE_TTL = 7 * 24 * 60 * 60 # 1 week class OrgEvent(Enum): """ Represents an internal org event """ topup_new = 16 topup_updated = 17 class OrgLock(Enum): """ Org-level lock types """ contacts = 1 channels = 2 credits = 3 field = 4 class OrgCache(Enum): """ Org-level cache types """ display = 1 credits = 2 class Org(SmartModel): """ An Org can have several users and is the main component that holds all Flows, Messages, Contacts, etc. Orgs know their country so they can deal with locally formatted numbers (numbers provided without a country code). As such, each org can only add phone channels from one country. Users will create new Org for Flows that should be kept separate (say for distinct projects), or for each country where they are deploying messaging applications. """ name = models.CharField(verbose_name=_("Name"), max_length=128) plan = models.CharField(verbose_name=_("Plan"), max_length=16, choices=PLANS, default=FREE_PLAN, help_text=_("What plan your organization is on")) plan_start = models.DateTimeField(verbose_name=_("Plan Start"), auto_now_add=True, help_text=_("When the user switched to this plan")) stripe_customer = models.CharField(verbose_name=_("Stripe Customer"), max_length=32, null=True, blank=True, help_text=_("Our Stripe customer id for your organization")) administrators = models.ManyToManyField(User, verbose_name=_("Administrators"), related_name="org_admins", help_text=_("The administrators in your organization")) viewers = models.ManyToManyField(User, verbose_name=_("Viewers"), related_name="org_viewers", help_text=_("The viewers in your organization")) editors = models.ManyToManyField(User, verbose_name=_("Editors"), related_name="org_editors", help_text=_("The editors in your organization")) surveyors = models.ManyToManyField(User, verbose_name=_("Surveyors"), related_name="org_surveyors", help_text=_("The users can login via Android for your organization")) language = models.CharField(verbose_name=_("Language"), max_length=64, null=True, blank=True, choices=settings.LANGUAGES, help_text=_("The main language used by this organization")) timezone = models.CharField(verbose_name=_("Timezone"), max_length=64) date_format = models.CharField(verbose_name=_("Date Format"), max_length=1, choices=DATE_PARSING, default=DAYFIRST, help_text=_("Whether day comes first or month comes first in dates")) webhook = models.TextField(null=True, verbose_name=_("Webhook"), help_text=_("Webhook endpoint and configuration")) webhook_events = models.IntegerField(default=0, verbose_name=_("Webhook Events"), help_text=_("Which type of actions will trigger webhook events.")) country = models.ForeignKey('locations.AdminBoundary', null=True, blank=True, on_delete=models.SET_NULL, help_text="The country this organization should map results for.") msg_last_viewed = models.DateTimeField(verbose_name=_("Message Last Viewed"), auto_now_add=True) flows_last_viewed = models.DateTimeField(verbose_name=_("Flows Last Viewed"), auto_now_add=True) config = models.TextField(null=True, verbose_name=_("Configuration"), help_text=_("More Organization specific configuration")) slug = models.SlugField(verbose_name=_("Slug"), max_length=255, null=True, blank=True, unique=True, error_messages=dict(unique=_("This slug is not available"))) is_anon = models.BooleanField(default=False, help_text=_("Whether this organization anonymizes the phone numbers of contacts within it")) primary_language = models.ForeignKey('orgs.Language', null=True, blank=True, related_name='orgs', help_text=_('The primary language will be used for contacts with no language preference.'), on_delete=models.SET_NULL) brand = models.CharField(max_length=128, default=settings.DEFAULT_BRAND, verbose_name=_("Brand"), help_text=_("The brand used in emails")) surveyor_password = models.CharField(null=True, max_length=128, default=None, help_text=_('A password that allows users to register as surveyors')) parent = models.ForeignKey('orgs.Org', null=True, blank=True, help_text=_('The parent org that manages this org')) multi_org = models.BooleanField(default=False, help_text=_('Put this org on the multi org level')) @classmethod def get_unique_slug(cls, name): slug = slugify(name) unique_slug = slug if unique_slug: existing = Org.objects.filter(slug=unique_slug).exists() count = 2 while existing: unique_slug = "%s-%d" % (slug, count) existing = Org.objects.filter(slug=unique_slug).exists() count += 1 return unique_slug def create_sub_org(self, name, timezone=None, created_by=None): if self.is_multi_org_level() and not self.parent: if not timezone: timezone = self.timezone if not created_by: created_by = self.created_by # generate a unique slug slug = Org.get_unique_slug(name) org = Org.objects.create(name=name, timezone=timezone, brand=self.brand, parent=self, slug=slug, created_by=created_by, modified_by=created_by) org.administrators.add(created_by) # initialize our org, but without any credits org.initialize(brand=org.get_branding(), topup_size=0) return org def get_branding(self): from temba.middleware import BrandingMiddleware return BrandingMiddleware.get_branding_for_host(self.brand) def lock_on(self, lock, qualifier=None): """ Creates the requested type of org-level lock """ r = get_redis_connection() lock_key = ORG_LOCK_KEY % (self.pk, lock.name) if qualifier: lock_key += (":%s" % qualifier) return r.lock(lock_key, ORG_LOCK_TTL) def has_contacts(self): """ Gets whether this org has any contacts """ from temba.contacts.models import ContactGroup counts = ContactGroup.get_system_group_counts(self, (ContactGroup.TYPE_ALL, ContactGroup.TYPE_BLOCKED)) return (counts[ContactGroup.TYPE_ALL] + counts[ContactGroup.TYPE_BLOCKED]) > 0 def has_messages(self): """ Gets whether this org has any messages (or calls) """ from temba.msgs.models import SystemLabel msg_counts = SystemLabel.get_counts(self, (SystemLabel.TYPE_INBOX, SystemLabel.TYPE_OUTBOX, SystemLabel.TYPE_CALLS)) return (msg_counts[SystemLabel.TYPE_INBOX] + msg_counts[SystemLabel.TYPE_OUTBOX] + msg_counts[SystemLabel.TYPE_CALLS]) > 0 def update_caches(self, event, entity): """ Update org-level caches in response to an event """ r = get_redis_connection() if event in [OrgEvent.topup_new, OrgEvent.topup_updated]: r.delete(ORG_CREDITS_TOTAL_CACHE_KEY % self.pk) r.delete(ORG_CREDITS_PURCHASED_CACHE_KEY % self.pk) r.delete(ORG_ACTIVE_TOPUP_KEY % self.pk) r.delete(ORG_CREDIT_EXPIRING_CACHE_KEY % self.pk) r.delete(ORG_LOW_CREDIT_THRESHOLD_CACHE_KEY % self.pk) for topup in self.topups.all(): r.delete(ORG_ACTIVE_TOPUP_REMAINING % (self.pk, topup.pk)) def clear_caches(self, caches): """ Clears the given cache types (currently just credits) for this org. Returns number of keys actually deleted """ if OrgCache.credits in caches: r = get_redis_connection() active_topup_keys = [ORG_ACTIVE_TOPUP_REMAINING % (self.pk, topup.pk) for topup in self.topups.all()] return r.delete(ORG_CREDITS_TOTAL_CACHE_KEY % self.pk, ORG_CREDITS_USED_CACHE_KEY % self.pk, ORG_CREDITS_PURCHASED_CACHE_KEY % self.pk, ORG_ACTIVE_TOPUP_KEY % self.pk, **active_topup_keys) else: return 0 def set_status(self, status): config = self.config_json() config[ORG_STATUS] = status self.config = json.dumps(config) self.save(update_fields=['config']) def set_suspended(self): self.set_status(SUSPENDED) def set_whitelisted(self): self.set_status(WHITELISTED) def set_restored(self): self.set_status(RESTORED) def is_suspended(self): return self.config_json().get(ORG_STATUS, None) == SUSPENDED def is_whitelisted(self): return self.config_json().get(ORG_STATUS, None) == WHITELISTED @transaction.atomic def import_app(self, data, user, site=None): from temba.flows.models import Flow from temba.campaigns.models import Campaign from temba.triggers.models import Trigger # determine if this app is being imported from the same site data_site = data.get('site', None) same_site = False # compare the hosts of the sites to see if they are the same if data_site and site: same_site = urlparse(data_site).netloc == urlparse(site).netloc # see if our export needs to be updated export_version = data.get('version', 0) from temba.orgs.models import EARLIEST_IMPORT_VERSION, CURRENT_EXPORT_VERSION if export_version < EARLIEST_IMPORT_VERSION: raise ValueError(_("Unknown version (%s)" % data.get('version', 0))) if export_version < CURRENT_EXPORT_VERSION: from temba.flows.models import FlowRevision data = FlowRevision.migrate_export(self, data, same_site, export_version) # we need to import flows first, they will resolve to # the appropriate ids and update our definition accordingly Flow.import_flows(data, self, user, same_site) Campaign.import_campaigns(data, self, user, same_site) Trigger.import_triggers(data, self, user, same_site) @classmethod def export_definitions(cls, site_link, flows=[], campaigns=[], triggers=[]): # remove any triggers that aren't included in our flows flow_uuids = set([f.uuid for f in flows]) filtered_triggers = [] for trigger in triggers: if trigger.flow.uuid in flow_uuids: filtered_triggers.append(trigger) triggers = filtered_triggers exported_flows = [] for flow in flows: # only export current versions flow.ensure_current_version() exported_flows.append(flow.as_json(expand_contacts=True)) exported_campaigns = [] for campaign in campaigns: for flow in campaign.get_flows(): flows.add(flow) exported_campaigns.append(campaign.as_json()) exported_triggers = [] for trigger in triggers: exported_triggers.append(trigger.as_json()) return dict(version=CURRENT_EXPORT_VERSION, site=site_link, flows=exported_flows, campaigns=exported_campaigns, triggers=exported_triggers) def config_json(self): if self.config: return json.loads(self.config) else: return dict() def can_add_sender(self): """ If an org's telephone send channel is an Android device, let them add a bulk sender """ from temba.contacts.models import TEL_SCHEME from temba.channels.models import ANDROID send_channel = self.get_send_channel(TEL_SCHEME) return send_channel and send_channel.channel_type == ANDROID def can_add_caller(self): return not self.supports_ivr() and self.is_connected_to_twilio() def supports_ivr(self): return self.get_call_channel() or self.get_answer_channel() def get_channel(self, scheme, country_code, role): """ Gets a channel for this org which supports the given scheme and role """ from temba.channels.models import SEND, CALL channel = self.channels.filter(is_active=True, scheme=scheme, role__contains=role).order_by('-pk') if country_code: channel = channel.filter(country=country_code) channel = channel.first() # no channel? try without country if not channel and country_code: channel = self.channels.filter(is_active=True, scheme=scheme, role__contains=role).order_by('-pk').first() if channel and (role == SEND or role == CALL): return channel.get_delegate(role) else: return channel def get_channel_for_role(self, role, scheme=None, contact_urn=None, country_code=None): from temba.contacts.models import TEL_SCHEME from temba.channels.models import SEND from temba.contacts.models import ContactURN if not scheme and not contact_urn: raise ValueError("Must specify scheme or contact URN") if contact_urn: if contact_urn: scheme = contact_urn.scheme # if URN has a previously used channel that is still active, use that if contact_urn.channel and contact_urn.channel.is_active and role == SEND: previous_sender = self.get_channel_delegate(contact_urn.channel, role) if previous_sender: return previous_sender if scheme == TEL_SCHEME: path = contact_urn.path # we don't have a channel for this contact yet, let's try to pick one from the same carrier # we need at least one digit to overlap to infer a channel contact_number = path.strip('+') prefix = 1 channel = None # try to use only a channel in the same country if not country_code: country_code = ContactURN.derive_country_from_tel(path) channels = [] if country_code: for c in self.channels.all(): if c.country == country_code: channels.append(c) # no country specific channel, try to find any channel at all if not channels: channels = [c for c in self.channels.all()] # filter based on role and activity (we do this in python as channels can be prefetched so it is quicker in those cases) senders = [] for c in channels: if c.is_active and c.address and role in c.role and not c.parent_id: senders.append(c) senders.sort(key=lambda chan: chan.id) # if we have more than one match, find the one with the highest overlap if len(senders) > 1: for sender in senders: channel_number = sender.address.strip('+') for idx in range(prefix, len(channel_number)): if idx >= prefix and channel_number[0:idx] == contact_number[0:idx]: prefix = idx channel = sender else: break elif senders: channel = senders[0] if channel: if role == SEND: return self.get_channel_delegate(channel, SEND) else: return channel # get any send channel without any country or URN hints return self.get_channel(scheme, country_code, role) def get_send_channel(self, scheme=None, contact_urn=None, country_code=None): from temba.channels.models import SEND return self.get_channel_for_role(SEND, scheme=scheme, contact_urn=contact_urn, country_code=country_code) def get_receive_channel(self, scheme, contact_urn=None, country_code=None): from temba.channels.models import RECEIVE return self.get_channel_for_role(RECEIVE, scheme=scheme, contact_urn=contact_urn, country_code=country_code) def get_call_channel(self, contact_urn=None, country_code=None): from temba.contacts.models import TEL_SCHEME from temba.channels.models import CALL return self.get_channel_for_role(CALL, scheme=TEL_SCHEME, contact_urn=contact_urn, country_code=country_code) def get_answer_channel(self, contact_urn=None, country_code=None): from temba.contacts.models import TEL_SCHEME from temba.channels.models import ANSWER return self.get_channel_for_role(ANSWER, scheme=TEL_SCHEME, contact_urn=contact_urn, country_code=country_code) def get_channel_delegate(self, channel, role): """ Gets a channel's delegate for the given role with caching on the org object """ cache_attr = '__%d__delegate_%s' % (channel.id, role) if hasattr(self, cache_attr): return getattr(self, cache_attr) delegate = channel.get_delegate(role) setattr(self, cache_attr, delegate) return delegate def get_schemes(self, role): """ Gets all URN schemes which this org has org has channels configured for """ cache_attr = '__schemes__%s' % role if hasattr(self, cache_attr): return getattr(self, cache_attr) schemes = set() for channel in self.channels.filter(is_active=True, role__contains=role): schemes.add(channel.scheme) setattr(self, cache_attr, schemes) return schemes def normalize_contact_tels(self): """ Attempts to normalize any contacts which don't have full e164 phone numbers """ from temba.contacts.models import ContactURN, TEL_SCHEME # do we have an org-level country code? if so, try to normalize any numbers not starting with + country_code = self.get_country_code() if country_code: urns = ContactURN.objects.filter(org=self, scheme=TEL_SCHEME).exclude(path__startswith="+") for urn in urns: urn.ensure_number_normalization(country_code) def get_resthooks(self): """ Returns the resthooks configured on this Org """ return self.resthooks.filter(is_active=True).order_by('slug') def get_webhook_url(self): """ Returns a string with webhook url. """ return json.loads(self.webhook).get('url') if self.webhook else None def get_webhook_headers(self): """ Returns a dictionary of any webhook headers, e.g.: {'Authorization': 'Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==', 'X-My-Special-Header': 'woo'} """ return json.loads(self.webhook).get('headers', dict()) if self.webhook else dict() def get_channel_countries(self): channel_countries = [] if not self.is_connected_to_transferto(): return channel_countries channel_country_codes = self.channels.filter(is_active=True).exclude(country=None) channel_country_codes = channel_country_codes.values_list('country', flat=True).distinct() for country_code in channel_country_codes: country_obj = pycountry.countries.get(alpha2=country_code) country_name = country_obj.name currency = currency_for_country(country_code) channel_countries.append(dict(code=country_code, name=country_name, currency_code=currency.letter, currency_name=currency.name)) return sorted(channel_countries, key=lambda k: k['name']) @classmethod def get_possible_countries(cls): return AdminBoundary.objects.filter(level=0).order_by('name') def trigger_send(self, msgs=None): """ Triggers either our Android channels to sync, or for all our pending messages to be queued to send. """ from temba.msgs.models import Msg from temba.channels.models import Channel, ANDROID # if we have msgs, then send just those if msgs: ids = [m.id for m in msgs] # trigger syncs for our android channels for channel in self.channels.filter(is_active=True, channel_type=ANDROID, msgs__id__in=ids): channel.trigger_sync() # and send those messages Msg.send_messages(msgs) # otherwise, sync all pending messages and channels else: for channel in self.channels.filter(is_active=True, channel_type=ANDROID): channel.trigger_sync() # otherwise, send any pending messages on our channels r = get_redis_connection() key = 'trigger_send_%d' % self.pk # only try to send all pending messages if nobody is doing so already if not r.get(key): with r.lock(key, timeout=900): pending = Channel.get_pending_messages(self) Msg.send_messages(pending) def has_airtime_transfers(self): from temba.airtime.models import AirtimeTransfer return AirtimeTransfer.objects.filter(org=self).exists() def connect_transferto(self, account_login, airtime_api_token, user): transferto_config = {TRANSFERTO_ACCOUNT_LOGIN: account_login.strip(), TRANSFERTO_AIRTIME_API_TOKEN: airtime_api_token.strip()} config = self.config_json() config.update(transferto_config) self.config = json.dumps(config) self.modified_by = user self.save() def is_connected_to_transferto(self): if self.config: config = self.config_json() transferto_account_login = config.get(TRANSFERTO_ACCOUNT_LOGIN, None) transferto_airtime_api_token = config.get(TRANSFERTO_AIRTIME_API_TOKEN, None) return transferto_account_login and transferto_airtime_api_token else: return False def remove_transferto_account(self, user): if self.config: config = self.config_json() config[TRANSFERTO_ACCOUNT_LOGIN] = '' config[TRANSFERTO_AIRTIME_API_TOKEN] = '' self.config = json.dumps(config) self.modified_by = user self.save() def connect_nexmo(self, api_key, api_secret, user): nexmo_uuid = str(uuid4()) nexmo_config = {NEXMO_KEY: api_key.strip(), NEXMO_SECRET: api_secret.strip(), NEXMO_UUID: nexmo_uuid} config = self.config_json() config.update(nexmo_config) self.config = json.dumps(config) self.modified_by = user self.save() # clear all our channel configurations self.clear_channel_caches() def nexmo_uuid(self): config = self.config_json() return config.get(NEXMO_UUID, None) def connect_twilio(self, account_sid, account_token, user): client = TwilioRestClient(account_sid, account_token) app_name = "%s/%d" % (settings.TEMBA_HOST.lower(), self.pk) apps = client.applications.list(friendly_name=app_name) if apps: temba_app = apps[0] else: app_url = "https://" + settings.TEMBA_HOST + "%s" % reverse('handlers.twilio_handler') # the the twiml to run when the voice app fails fallback_url = "https://" + settings.AWS_BUCKET_DOMAIN + "/voice_unavailable.xml" temba_app = client.applications.create(friendly_name=app_name, voice_url=app_url, voice_fallback_url=fallback_url, voice_fallback_method='GET', sms_url=app_url, sms_method="POST") application_sid = temba_app.sid twilio_config = {ACCOUNT_SID: account_sid, ACCOUNT_TOKEN: account_token, APPLICATION_SID: application_sid} config = self.config_json() config.update(twilio_config) self.config = json.dumps(config) self.modified_by = user self.save() # clear all our channel configurations self.clear_channel_caches() def is_connected_to_nexmo(self): if self.config: config = self.config_json() nexmo_key = config.get(NEXMO_KEY, None) nexmo_secret = config.get(NEXMO_SECRET, None) nexmo_uuid = config.get(NEXMO_UUID, None) return nexmo_key and nexmo_secret and nexmo_uuid else: return False def is_connected_to_twilio(self): if self.config: config = self.config_json() account_sid = config.get(ACCOUNT_SID, None) account_token = config.get(ACCOUNT_TOKEN, None) application_sid = config.get(APPLICATION_SID, None) if account_sid and account_token and application_sid: return True return False def remove_nexmo_account(self, user): if self.config: config = self.config_json() config[NEXMO_KEY] = '' config[NEXMO_SECRET] = '' self.config = json.dumps(config) self.modified_by = user self.save() # release any nexmo channels from temba.channels.models import NEXMO channels = self.channels.filter(is_active=True, channel_type=NEXMO) for channel in channels: channel.release() # clear all our channel configurations self.clear_channel_caches() def remove_twilio_account(self, user): if self.config: config = self.config_json() config[ACCOUNT_SID] = '' config[ACCOUNT_TOKEN] = '' config[APPLICATION_SID] = '' self.config = json.dumps(config) self.modified_by = user self.save() # release any twilio channels from temba.channels.models import TWILIO channels = self.channels.filter(is_active=True, channel_type=TWILIO) for channel in channels: channel.release() # clear all our channel configurations self.clear_channel_caches() def get_verboice_client(self): from temba.ivr.clients import VerboiceClient channel = self.get_call_channel() from temba.channels.models import VERBOICE if channel.channel_type == VERBOICE: return VerboiceClient(channel) return None def get_twilio_client(self): config = self.config_json() from temba.ivr.clients import TwilioClient if config: account_sid = config.get(ACCOUNT_SID, None) auth_token = config.get(ACCOUNT_TOKEN, None) if account_sid and auth_token: return TwilioClient(account_sid, auth_token, org=self) return None def get_nexmo_client(self): config = self.config_json() if config: api_key = config.get(NEXMO_KEY, None) api_secret = config.get(NEXMO_SECRET, None) if api_key and api_secret: return NexmoClient(api_key, api_secret) return None def clear_channel_caches(self): """ Clears any cached configurations we have for any of our channels. """ from temba.channels.models import Channel for channel in self.channels.exclude(channel_type='A'): Channel.clear_cached_channel(channel.pk) def get_country_code(self): """ Gets the 2-digit country code, e.g. RW, US """ # first try the actual country field if self.country: try: country = pycountry.countries.get(name=self.country.name) if country: return country.alpha2 except KeyError: # pycountry blows up if we pass it a country name it doesn't know pass # if that isn't set and we only have have one country set for our channels, use that countries = self.channels.filter(is_active=True).exclude(country=None).order_by('country') countries = countries.distinct('country').values_list('country', flat=True) if len(countries) == 1: return countries[0] return None def get_language_codes(self): return get_cacheable_attr(self, '_language_codes', lambda: {l.iso_code for l in self.languages.all()}) def set_languages(self, user, iso_codes, primary): """ Sets languages for this org, creating and deleting language objects as necessary """ for iso_code in iso_codes: name = languages.get_language_name(iso_code) language = self.languages.filter(iso_code=iso_code).first() # if it's valid and doesn't exist yet, create it if name and not language: language = self.languages.create(iso_code=iso_code, name=name, created_by=user, modified_by=user) if iso_code == primary: self.primary_language = language self.save(update_fields=('primary_language',)) # unset the primary language if not in the new list of codes if self.primary_language and self.primary_language.iso_code not in iso_codes: self.primary_language = None self.save(update_fields=('primary_language',)) # remove any languages that are not in the new list self.languages.exclude(iso_code__in=iso_codes).delete() if hasattr(self, '_language_codes'): # invalidate language cache if set delattr(self, '_language_codes') def get_dayfirst(self): return self.date_format == DAYFIRST def get_tzinfo(self): return pytz.timezone(self.timezone) def format_date(self, datetime, show_time=True): """ Formats a datetime with or without time using this org's date format """ formats = get_datetime_format(self.get_dayfirst()) format = formats[1] if show_time else formats[0] return datetime_to_str(datetime, format, False, self.get_tzinfo()) def parse_date(self, date_string): if isinstance(date_string, datetime): return date_string return str_to_datetime(date_string, self.get_tzinfo(), self.get_dayfirst()) def parse_decimal(self, decimal_string): parsed = None try: parsed = Decimal(decimal_string) if not parsed.is_finite() or parsed > Decimal('999999999999999999999999'): parsed = None except Exception: pass return parsed def generate_location_query(self, name, level, is_alias=False): if is_alias: query = dict(name__iexact=name, boundary__level=level) query['__'.join(['boundary'] + ['parent'] * level)] = self.country else: query = dict(name__iexact=name, level=level) query['__'.join(['parent'] * level)] = self.country return query def find_boundary_by_name(self, name, level, parent): """ Finds the boundary with the passed in name or alias on this organization at the stated level. @returns Iterable of matching boundaries """ # first check if we have a direct name match if parent: boundary = parent.children.filter(name__iexact=name, level=level) else: query = self.generate_location_query(name, level) boundary = AdminBoundary.objects.filter(**query) # not found by name, try looking up by alias if not boundary: if parent: alias = BoundaryAlias.objects.filter(name__iexact=name, boundary__level=level, boundary__parent=parent).first() else: query = self.generate_location_query(name, level, True) alias = BoundaryAlias.objects.filter(**query).first() if alias: boundary = [alias.boundary] return boundary def parse_location(self, location_string, level, parent=None): """ Attempts to parse the passed in location string at the passed in level. This does various tokenizing of the string to try to find the best possible match. @returns Iterable of matching boundaries """ # no country? bail if not self.country or not isinstance(location_string, basestring): return [] # now look up the boundary by full name boundary = self.find_boundary_by_name(location_string, level, parent) if not boundary: # try removing punctuation and try that bare_name = regex.sub(r"\W+", " ", location_string, flags=regex.UNICODE | regex.V0).strip() boundary = self.find_boundary_by_name(bare_name, level, parent) # if we didn't find it, tokenize it if not boundary: words = regex.split(r"\W+", location_string.lower(), flags=regex.UNICODE | regex.V0) if len(words) > 1: for word in words: boundary = self.find_boundary_by_name(word, level, parent) if not boundary: break if not boundary: # still no boundary? try n-gram of 2 for i in range(0, len(words) - 1): bigram = " ".join(words[i:i + 2]) boundary = self.find_boundary_by_name(bigram, level, parent) if boundary: break return boundary def get_org_admins(self): return self.administrators.all() def get_org_editors(self): return self.editors.all() def get_org_viewers(self): return self.viewers.all() def get_org_surveyors(self): return self.surveyors.all() def get_org_users(self): org_users = self.get_org_admins() | self.get_org_editors() | self.get_org_viewers() | self.get_org_surveyors() return org_users.distinct().order_by('email') def latest_admin(self): admin = self.get_org_admins().last() # no admins? try editors if not admin: admin = self.get_org_editors().last() # no editors? try viewers if not admin: admin = self.get_org_viewers().last() return admin def is_free_plan(self): return self.plan == FREE_PLAN or self.plan == TRIAL_PLAN def is_multi_user_level(self): return self.get_purchased_credits() >= settings.MULTI_USER_THRESHOLD def is_multi_org_level(self): return not self.parent and (self.multi_org or self.get_purchased_credits() >= settings.MULTI_ORG_THRESHOLD) def has_added_credits(self): return self.get_credits_total() > WELCOME_TOPUP_SIZE def get_user_org_group(self, user): if user in self.get_org_admins(): user._org_group = Group.objects.get(name="Administrators") elif user in self.get_org_editors(): user._org_group = Group.objects.get(name="Editors") elif user in self.get_org_viewers(): user._org_group = Group.objects.get(name="Viewers") elif user in self.get_org_surveyors(): user._org_group = Group.objects.get(name="Surveyors") elif user.is_staff: user._org_group = Group.objects.get(name="Administrators") else: user._org_group = None return getattr(user, '_org_group', None) def has_twilio_number(self): from temba.channels.models import TWILIO return self.channels.filter(channel_type=TWILIO) def has_nexmo_number(self): from temba.channels.models import NEXMO return self.channels.filter(channel_type=NEXMO) def create_welcome_topup(self, topup_size=WELCOME_TOPUP_SIZE): if topup_size: return TopUp.create(self.created_by, price=0, credits=topup_size, org=self) return None def create_system_labels_and_groups(self): """ Creates our system labels and groups for this organization so that we can keep track of counts etc.. """ from temba.contacts.models import ContactGroup from temba.msgs.models import SystemLabel SystemLabel.create_all(self) self.all_groups.create(name='All Contacts', group_type=ContactGroup.TYPE_ALL, created_by=self.created_by, modified_by=self.modified_by) self.all_groups.create(name='Blocked Contacts', group_type=ContactGroup.TYPE_BLOCKED, created_by=self.created_by, modified_by=self.modified_by) self.all_groups.create(name='Failed Contacts', group_type=ContactGroup.TYPE_STOPPED, created_by=self.created_by, modified_by=self.modified_by) def create_sample_flows(self, api_url): import json # get our sample dir filename = os.path.join(settings.STATICFILES_DIRS[0], 'examples', 'sample_flows.json') # for each of our samples with open(filename, 'r') as example_file: example = example_file.read() user = self.get_user() if user: # some some substitutions org_example = example.replace("{{EMAIL}}", user.username) org_example = org_example.replace("{{API_URL}}", api_url) try: self.import_app(json.loads(org_example), user) except Exception: import traceback logger = logging.getLogger(__name__) msg = 'Failed creating sample flows' logger.error(msg, exc_info=True, extra=dict(definition=json.loads(org_example))) traceback.print_exc() def is_notified_of_mt_sms(self): return self.webhook_events & MT_SMS_EVENTS > 0 def is_notified_of_mo_sms(self): return self.webhook_events & MO_SMS_EVENTS > 0 def is_notified_of_mt_call(self): return self.webhook_events & MT_CALL_EVENTS > 0 def is_notified_of_mo_call(self): return self.webhook_events & MO_CALL_EVENTS > 0 def is_notified_of_alarms(self): return self.webhook_events & ALARM_EVENTS > 0 def get_user(self): return self.administrators.filter(is_active=True).first() def get_credits_expiring_soon(self): """ Get the number of credits expiring in less than a month. """ return get_cacheable_result(ORG_CREDIT_EXPIRING_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL, self._calculate_credits_expiring_soon) def _calculate_credits_expiring_soon(self): now = timezone.now() one_month_period = now + timedelta(days=30) expiring_topups_qs = self.topups.filter(is_active=True, expires_on__lte=one_month_period).exclude(expires_on__lte=now) used_credits = TopUpCredits.objects.filter(topup__in=expiring_topups_qs).aggregate(Sum('used')).get('used__sum') expiring_topups_credits = expiring_topups_qs.aggregate(Sum('credits')).get('credits__sum') more_valid_credits_qs = self.topups.filter(is_active=True, expires_on__gt=one_month_period) more_valid_credits = more_valid_credits_qs.aggregate(Sum('credits')).get('credits__sum') if more_valid_credits or not expiring_topups_credits: return 0 return expiring_topups_credits - used_credits def has_low_credits(self): return self.get_credits_remaining() <= self.get_low_credits_threshold() def get_low_credits_threshold(self): """ Get the credits number to consider as low threshold to this org """ return get_cacheable_result(ORG_LOW_CREDIT_THRESHOLD_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL, self._calculate_low_credits_threshold) def _calculate_low_credits_threshold(self): now = timezone.now() last_topup_credits = self.topups.filter(is_active=True, expires_on__gte=now).aggregate(Sum('credits')).get('credits__sum') return int(last_topup_credits * 0.15) if last_topup_credits else 0 def get_credits_total(self, force_dirty=False): """ Gets the total number of credits purchased or assigned to this org """ return get_cacheable_result(ORG_CREDITS_TOTAL_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL, self._calculate_credits_total, force_dirty=force_dirty) def get_purchased_credits(self): """ Returns the total number of credits purchased :return: """ return get_cacheable_result(ORG_CREDITS_PURCHASED_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL, self._calculate_purchased_credits) def _calculate_purchased_credits(self): purchased_credits = self.topups.filter(is_active=True, price__gt=0).aggregate(Sum('credits')).get('credits__sum') return purchased_credits if purchased_credits else 0 def _calculate_credits_total(self): active_credits = self.topups.filter(is_active=True, expires_on__gte=timezone.now()).aggregate(Sum('credits')).get('credits__sum') active_credits = active_credits if active_credits else 0 # these are the credits that have been used in expired topups expired_credits = TopUpCredits.objects.filter( topup__org=self, topup__is_active=True, topup__expires_on__lte=timezone.now() ).aggregate(Sum('used')).get('used__sum') expired_credits = expired_credits if expired_credits else 0 return active_credits + expired_credits def get_credits_used(self): """ Gets the number of credits used by this org """ return get_cacheable_result(ORG_CREDITS_USED_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL, self._calculate_credits_used) def _calculate_credits_used(self): used_credits_sum = TopUpCredits.objects.filter(topup__org=self, topup__is_active=True) used_credits_sum = used_credits_sum.aggregate(Sum('used')).get('used__sum') used_credits_sum = used_credits_sum if used_credits_sum else 0 unassigned_sum = self.msgs.filter(contact__is_test=False, topup=None, purged=False).count() return used_credits_sum + unassigned_sum def _calculate_credit_caches(self): """ Calculates both our total as well as our active topup """ get_cacheable_result(ORG_CREDITS_TOTAL_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL, self._calculate_credits_total, force_dirty=True) get_cacheable_result(ORG_CREDITS_USED_CACHE_KEY % self.pk, ORG_CREDITS_CACHE_TTL, self._calculate_credits_used, force_dirty=True) def get_credits_remaining(self): """ Gets the number of credits remaining for this org """ return self.get_credits_total() - self.get_credits_used() def allocate_credits(self, user, org, amount): """ Allocates credits to a sub org of the current org, but only if it belongs to us and we have enough credits to do so. """ if org.parent == self or self.parent == org.parent or self.parent == org: if self.get_credits_remaining() >= amount: with self.lock_on(OrgLock.credits): # now debit our account debited = None while amount or debited == 0: # remove the credits from ourselves (topup_id, debited) = self.decrement_credit(amount) if topup_id: topup = TopUp.objects.get(id=topup_id) # create the topup for our child, expiring on the same date new_topup = TopUp.create(user, credits=debited, org=org, expires_on=topup.expires_on, price=None) # create a debit for transaction history Debit.objects.create(topup_id=topup_id, amount=debited, beneficiary=new_topup, debit_type=Debit.TYPE_ALLOCATION, created_by=user, modified_by=user) # decrease the amount of credits we need amount -= debited else: break # recalculate our caches self._calculate_credit_caches() org._calculate_credit_caches() return True # couldn't allocate credits return False def decrement_credit(self, amount=1): """ Decrements this orgs credit by amount. Determines the active topup and returns that along with how many credits we were able to decrement it by. Amount decremented is not guaranteed to be the full amount requested. """ total_used_key = ORG_CREDITS_USED_CACHE_KEY % self.pk incrby_existing(total_used_key, amount) r = get_redis_connection() active_topup_key = ORG_ACTIVE_TOPUP_KEY % self.pk active_topup_pk = r.get(active_topup_key) if active_topup_pk: remaining_key = ORG_ACTIVE_TOPUP_REMAINING % (self.pk, int(active_topup_pk)) # decrement our active # of credits remaining = r.decr(remaining_key, amount) # near the edge? calculate our active topup from scratch if not remaining or int(remaining) < 100: active_topup_pk = None # calculate our active topup if we need to if active_topup_pk is None: active_topup = self._calculate_active_topup() if active_topup: active_topup_pk = active_topup.pk r.set(active_topup_key, active_topup_pk, ORG_CREDITS_CACHE_TTL) # can only reduce as much as we have available if active_topup.get_remaining() < amount: amount = active_topup.get_remaining() remaining_key = ORG_ACTIVE_TOPUP_REMAINING % (self.pk, active_topup_pk) r.set(remaining_key, active_topup.get_remaining() - amount, ORG_CREDITS_CACHE_TTL) return (active_topup_pk, amount) def _calculate_active_topup(self): """ Calculates the oldest non-expired topup that still has credits """ non_expired_topups = self.topups.filter(is_active=True, expires_on__gte=timezone.now()).order_by('expires_on', 'id') active_topups = non_expired_topups.annotate(used_credits=Sum('topupcredits__used'))\ .filter(credits__gt=0)\ .filter(Q(used_credits__lt=F('credits')) | Q(used_credits=None)) return active_topups.first() def apply_topups(self): """ We allow users to receive messages even if they're out of credit. Once they re-add credit, this function retro-actively applies topups to any messages or IVR actions that don't have a topup """ from temba.msgs.models import Msg with self.lock_on(OrgLock.credits): # get all items that haven't been credited msg_uncredited = self.msgs.filter(topup=None, contact__is_test=False, purged=False).order_by('created_on') all_uncredited = list(msg_uncredited) # get all topups that haven't expired unexpired_topups = list(self.topups.filter(is_active=True, expires_on__gte=timezone.now()).order_by('-expires_on')) # dict of topups to lists of their newly assigned items new_topup_items = {topup: [] for topup in unexpired_topups} # assign topup with credits to items... current_topup = None current_topup_remaining = 0 for item in all_uncredited: # find a topup with remaining credit while current_topup_remaining <= 0: if not unexpired_topups: break current_topup = unexpired_topups.pop() current_topup_remaining = current_topup.credits - current_topup.get_used() if current_topup_remaining: # if we found some credit, assign the item to the current topup new_topup_items[current_topup].append(item) current_topup_remaining -= 1 else: # if not, then stop processing items break # update items in the database with their new topups for topup, items in new_topup_items.iteritems(): Msg.all_messages.filter(id__in=[item.pk for item in items if isinstance(item, Msg)]).update(topup=topup) # deactive all our credit alerts CreditAlert.reset_for_org(self) def current_plan_start(self): today = timezone.now().date() # move it to the same day our plan started (taking into account short months) plan_start = today.replace(day=min(self.plan_start.day, calendar.monthrange(today.year, today.month)[1])) if plan_start > today: plan_start -= relativedelta(months=1) return plan_start def current_plan_end(self): plan_start = self.current_plan_start() plan_end = plan_start + relativedelta(months=1) return plan_end def get_stripe_customer(self): # pragma: no cover # We can't test stripe in unit tests since it requires javascript tokens to be generated if not self.stripe_customer: return None try: stripe.api_key = get_stripe_credentials()[1] customer = stripe.Customer.retrieve(self.stripe_customer) return customer except Exception: traceback.print_exc() return None def add_credits(self, bundle, token, user): # look up our bundle if bundle not in BUNDLE_MAP: raise ValidationError(_("Invalid bundle: %s, cannot upgrade.") % bundle) bundle = BUNDLE_MAP[bundle] # adds credits to this org stripe.api_key = get_stripe_credentials()[1] # our actual customer object customer = self.get_stripe_customer() # 3 possible cases # 1. we already have a stripe customer and the token matches it # 2. we already have a stripe customer, but they have just added a new card, we need to use that one # 3. we don't have a customer, so we need to create a new customer and use that card # for our purposes, #1 and #2 are treated the same, we just always update the default card try: if not customer: # then go create a customer object for this user customer = stripe.Customer.create(card=token, email=user, description="{ org: %d }" % self.pk) stripe_customer = customer.id self.stripe_customer = stripe_customer self.save() # update the stripe card to the one they just entered else: # remove existing cards # TODO: this is all a bit wonky because we are using the Stripe JS widget.. # if we instead used on our mechanism to display / edit cards we could be a bit smarter existing_cards = [c for c in customer.cards.all().data] for card in existing_cards: card.delete() card = customer.cards.create(card=token) customer.default_card = card.id customer.save() stripe_customer = customer.id charge = stripe.Charge.create(amount=bundle['cents'], currency='usd', customer=stripe_customer, description=bundle['description']) remaining = self.get_credits_remaining() # create our top up topup = TopUp.create(user, price=bundle['cents'], credits=bundle['credits'], stripe_charge=charge.id, org=self) context = dict(description=bundle['description'], charge_id=charge.id, charge_date=timezone.now().strftime("%b %e, %Y"), amount=bundle['dollars'], credits=bundle['credits'], remaining=remaining, org=self.name, cc_last4=charge.card.last4, cc_type=charge.card.type, cc_name=charge.card.name) from temba.middleware import BrandingMiddleware branding = BrandingMiddleware.get_branding_for_host(self.brand) subject = _("%(name)s Receipt") % branding template = "orgs/email/receipt_email" to_email = user.email context['customer'] = user context['branding'] = branding context['subject'] = subject send_template_email(to_email, subject, template, context, branding) # apply our new topups self.apply_topups() return topup except Exception as e: traceback.print_exc(e) raise ValidationError(_("Sorry, we were unable to charge your card, please try again later or contact us.")) def account_value(self): """ How much has this org paid to date in dollars? """ paid = TopUp.objects.filter(org=self).aggregate(paid=Sum('price'))['paid'] if not paid: paid = 0 return paid / 100 def update_plan(self, new_plan, token, user): # pragma: no cover # We can't test stripe in unit tests since it requires javascript tokens to be generated stripe.api_key = get_stripe_credentials()[1] # no plan change? do nothing if new_plan == self.plan: return None # this is our stripe customer id stripe_customer = None # our actual customer object customer = self.get_stripe_customer() if customer: stripe_customer = customer.id # cancel our plan on our stripe customer if new_plan == FREE_PLAN: if customer: analytics.track(user.username, 'temba.plan_cancelled', dict(cancelledPlan=self.plan)) try: subscription = customer.cancel_subscription(at_period_end=True) except Exception as e: traceback.print_exc(e) raise ValidationError(_("Sorry, we are unable to cancel your plan at this time. Please contact us.")) else: raise ValidationError(_("Sorry, we are unable to cancel your plan at this time. Please contact us.")) else: # we have a customer, try to upgrade them if customer: try: subscription = customer.update_subscription(plan=new_plan) analytics.track(user.username, 'temba.plan_upgraded', dict(previousPlan=self.plan, plan=new_plan)) except Exception as e: # can't load it, oh well, we'll try to create one dynamically below traceback.print_exc(e) customer = None # if we don't have a customer, go create one if not customer: try: # then go create a customer object for this user customer = stripe.Customer.create(card=token, plan=new_plan, email=user, description="{ org: %d }" % self.pk) stripe_customer = customer.id subscription = customer['subscription'] analytics.track(user.username, 'temba.plan_upgraded', dict(previousPlan=self.plan, plan=new_plan)) except Exception as e: traceback.print_exc(e) raise ValidationError(_("Sorry, we were unable to charge your card, please try again later or contact us.")) # update our org self.stripe_customer = stripe_customer if subscription['status'] != 'active': self.plan = FREE_PLAN else: self.plan = new_plan self.plan_start = datetime.fromtimestamp(subscription['start']) self.save() return subscription def get_export_flows(self, include_archived=False): from temba.flows.models import Flow flows = self.flows.all().exclude(is_active=False).exclude(flow_type=Flow.MESSAGE).order_by('-modified_on') if not include_archived: flows = flows.filter(is_archived=False) return flows def get_recommended_channel(self): from temba.channels.views import TWILIO_SEARCH_COUNTRIES NEXMO_RECOMMEND_COUNTRIES = ['US', 'CA', 'GB', 'AU', 'AT', 'FI', 'DE', 'HK', 'HU', 'LT', 'NL', 'NO', 'PL', 'SE', 'CH', 'BE', 'ES', 'ZA'] countrycode = timezone_to_country_code(self.timezone) recommended = 'android' if countrycode in [country[0] for country in TWILIO_SEARCH_COUNTRIES]: recommended = 'twilio' elif countrycode in NEXMO_RECOMMEND_COUNTRIES: recommended = 'nexmo' elif countrycode == 'KE': recommended = 'africastalking' elif countrycode == 'ID': recommended = 'hub9' elif countrycode == 'SO': recommended = 'shaqodoon' elif countrycode == 'NP': recommended = 'blackmyna' elif countrycode == 'UG': recommended = 'yo' elif countrycode == 'PH': recommended = 'globe' return recommended def increment_unread_msg_count(self, type): """ Increments our redis cache of how many unread messages exist for this org and type. @param type: either UNREAD_INBOX_MSGS or UNREAD_FLOW_MSGS """ r = get_redis_connection() r.hincrby(type, self.id, 1) def get_unread_msg_count(self, msg_type): """ Gets the value of our redis cache of how many unread messages exist for this org and type. @param msg_type: either UNREAD_INBOX_MSGS or UNREAD_FLOW_MSGS """ r = get_redis_connection() count = r.hget(msg_type, self.id) return 0 if count is None else int(count) def clear_unread_msg_count(self, msg_type): """ Clears our redis cache of how many unread messages exist for this org and type. @param msg_type: either UNREAD_INBOX_MSGS or UNREAD_FLOW_MSGS """ r = get_redis_connection() r.hdel(msg_type, self.id) def initialize(self, brand=None, topup_size=WELCOME_TOPUP_SIZE): """ Initializes an organization, creating all the dependent objects we need for it to work properly. """ from temba.middleware import BrandingMiddleware if not brand: brand = BrandingMiddleware.get_branding_for_host('') self.create_system_labels_and_groups() self.create_sample_flows(brand.get('api_link', "")) self.create_welcome_topup(topup_size) def save_media(self, file, extension): """ Saves the given file data with the extension and returns an absolute url to the result """ random_file = str(uuid4()) random_dir = random_file[0:4] filename = '%s/%s' % (random_dir, random_file) if extension: filename = '%s.%s' % (filename, extension) path = '%s/%d/media/%s' % (settings.STORAGE_ROOT_DIR, self.pk, filename) location = default_storage.save(path, file) return "https://%s/%s" % (settings.AWS_BUCKET_DOMAIN, location) @classmethod def create_user(cls, email, password): user = User.objects.create_user(username=email, email=email, password=password) return user @classmethod def get_org(cls, user): if not user: return None if not hasattr(user, '_org'): org = Org.objects.filter(administrators=user, is_active=True).first() if org: user._org = org return getattr(user, '_org', None) def __unicode__(self): return self.name # ===================== monkey patch User class with a few extra functions ======================== def get_user_orgs(user, brand=None): org = user.get_org() if not brand: brand = org.brand if org else settings.DEFAULT_BRAND if user.is_superuser: return Org.objects.all() user_orgs = user.org_admins.all() | user.org_editors.all() | user.org_viewers.all() | user.org_surveyors.all() return user_orgs.filter(brand=brand).distinct().order_by('name') def get_org(obj): return getattr(obj, '_org', None) def is_alpha_user(user): return user.groups.filter(name='Alpha') def is_beta_user(user): return user.groups.filter(name='Beta') def get_settings(user): if not user: return None settings = UserSettings.objects.filter(user=user).first() if not settings: settings = UserSettings.objects.create(user=user) return settings def set_org(obj, org): obj._org = org def get_org_group(obj): org_group = None org = obj.get_org() if org: org_group = org.get_user_org_group(obj) return org_group def _user_has_org_perm(user, org, permission): """ Determines if a user has the given permission in this org """ if user.is_superuser: return True if user.is_anonymous(): return False # has it innately? (customer support) if user.has_perm(permission): return True org_group = org.get_user_org_group(user) if not org_group: return False (app_label, codename) = permission.split(".") return org_group.permissions.filter(content_type__app_label=app_label, codename=codename).exists() User.get_org = get_org User.set_org = set_org User.is_alpha = is_alpha_user User.is_beta = is_beta_user User.get_settings = get_settings User.get_user_orgs = get_user_orgs User.get_org_group = get_org_group User.has_org_perm = _user_has_org_perm USER_GROUPS = (('A', _("Administrator")), ('E', _("Editor")), ('V', _("Viewer")), ('S', _("Surveyor"))) def get_stripe_credentials(): public_key = os.environ.get('STRIPE_PUBLIC_KEY', getattr(settings, 'STRIPE_PUBLIC_KEY', 'MISSING_STRIPE_PUBLIC_KEY')) private_key = os.environ.get('STRIPE_PRIVATE_KEY', getattr(settings, 'STRIPE_PRIVATE_KEY', 'MISSING_STRIPE_PRIVATE_KEY')) return (public_key, private_key) class Language(SmartModel): """ A Language that has been added to the org. In the end and language is just an iso_code and name and it is not really restricted to real-world languages at this level. Instead we restrict the language selection options to real-world languages. """ name = models.CharField(max_length=128) iso_code = models.CharField(max_length=4) org = models.ForeignKey(Org, verbose_name=_("Org"), related_name="languages") @classmethod def create(cls, org, user, name, iso_code): return cls.objects.create(org=org, name=name, iso_code=iso_code, created_by=user, modified_by=user) def as_json(self): return dict(name=self.name, iso_code=self.iso_code) @classmethod def get_localized_text(cls, text_translations, preferred_languages, default_text): """ Returns the appropriate translation to use. :param text_translations: A dictionary (or plain text) which contains our message indexed by language iso code :param preferred_languages: The prioritized list of language preferences (list of iso codes) :param default_text: default text to use if no match is found """ # No translations, return our default text if not text_translations: return default_text # If we are handed raw text without translations, just return that if not isinstance(text_translations, dict): return text_translations # otherwise, find the first preferred language for lang in preferred_languages: localized = text_translations.get(lang, None) if localized: return localized return default_text def __unicode__(self): return '%s' % self.name class Invitation(SmartModel): """ An Invitation to an e-mail address to join an Org with specific roles. """ org = models.ForeignKey(Org, verbose_name=_("Org"), related_name="invitations", help_text=_("The organization to which the account is invited to view")) email = models.EmailField(verbose_name=_("Email"), help_text=_("The email to which we send the invitation of the viewer")) secret = models.CharField(verbose_name=_("Secret"), max_length=64, unique=True, help_text=_("a unique code associated with this invitation")) host = models.CharField(max_length=32, help_text=_("The host this invitation was created on")) user_group = models.CharField(max_length=1, choices=USER_GROUPS, default='V', verbose_name=_("User Role")) @classmethod def create(cls, org, user, email, user_group, host): return cls.objects.create(org=org, email=email, user_group=user_group, host=host, created_by=user, modified_by=user) def save(self, *args, **kwargs): if not self.secret: secret = random_string(64) while Invitation.objects.filter(secret=secret): secret = random_string(64) self.secret = secret return super(Invitation, self).save(*args, **kwargs) @classmethod def generate_random_string(cls, length): """ Generates a [length] characters alpha numeric secret """ letters = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ" # avoid things that could be mistaken ex: 'I' and '1' return ''.join([random.choice(letters) for _ in range(length)]) def send_invitation(self): from .tasks import send_invitation_email_task send_invitation_email_task(self.id) def send_email(self): # no=op if we do not know the email if not self.email: return from temba.middleware import BrandingMiddleware branding = BrandingMiddleware.get_branding_for_host(self.host) subject = _("%(name)s Invitation") % branding template = "orgs/email/invitation_email" to_email = self.email context = dict(org=self.org, now=timezone.now(), branding=branding, invitation=self) context['subject'] = subject send_template_email(to_email, subject, template, context, branding) class UserSettings(models.Model): """ User specific configuration """ user = models.ForeignKey(User, related_name='settings') language = models.CharField(max_length=8, choices=settings.LANGUAGES, default="en-us", help_text=_('Your preferred language')) tel = models.CharField(verbose_name=_("Phone Number"), max_length=16, null=True, blank=True, help_text=_("Phone number for testing and recording voice flows")) def get_tel_formatted(self): if self.tel: import phonenumbers normalized = phonenumbers.parse(self.tel, None) return phonenumbers.format_number(normalized, phonenumbers.PhoneNumberFormat.INTERNATIONAL) class TopUp(SmartModel): """ TopUps are used to track usage across the platform. Each TopUp represents a certain number of credits that can be consumed by messages. """ org = models.ForeignKey(Org, related_name='topups', help_text="The organization that was toppped up") price = models.IntegerField(null=True, blank=True, verbose_name=_("Price Paid"), help_text=_("The price paid for the messages in this top up (in cents)")) credits = models.IntegerField(verbose_name=_("Number of Credits"), help_text=_("The number of credits bought in this top up")) expires_on = models.DateTimeField(verbose_name=_("Expiration Date"), help_text=_("The date that this top up will expire")) stripe_charge = models.CharField(verbose_name=_("Stripe Charge Id"), max_length=32, null=True, blank=True, help_text=_("The Stripe charge id for this charge")) comment = models.CharField(max_length=255, null=True, blank=True, help_text="Any comment associated with this topup, used when we credit accounts") @classmethod def create(cls, user, price, credits, stripe_charge=None, org=None, expires_on=None): """ Creates a new topup """ if not org: org = user.get_org() if not expires_on: expires_on = timezone.now() + timedelta(days=365) # credits last 1 year topup = TopUp.objects.create(org=org, price=price, credits=credits, expires_on=expires_on, stripe_charge=stripe_charge, created_by=user, modified_by=user) org.update_caches(OrgEvent.topup_new, topup) return topup def get_ledger(self): debits = self.debits.filter(debit_type=Debit.TYPE_ALLOCATION).order_by('-created_by') balance = self.credits ledger = [] for debit in debits: balance -= debit.amount ledger.append(dict(date=debit.created_on, comment=_('Transfer to %(org)s') % dict(org=debit.beneficiary.org.name), amount=-debit.amount, balance=balance)) now = timezone.now() expired = self.expires_on < now # add a line for used message credits if self.get_remaining() < balance: ledger.append(dict(date=self.expires_on if expired else now, comment=_('Messaging credits used'), amount=self.get_remaining() - balance, balance=self.get_remaining())) # add a line for expired credits if expired and self.get_remaining() > 0: ledger.append(dict(date=self.expires_on, comment=_('Expired credits'), amount=-self.get_remaining(), balance=0)) return ledger def get_price_display(self): if self.price is None: return "" elif self.price == 0: return _("Free") return "$%.2f" % self.dollars() def dollars(self): if self.price == 0: return 0 else: return Decimal(self.price) / Decimal(100) def revert_topup(self): # unwind any items that were assigned to this topup self.msgs.update(topup=None) # mark this topup as inactive self.is_active = False self.save() def get_stripe_charge(self): try: stripe.api_key = get_stripe_credentials()[1] return stripe.Charge.retrieve(self.stripe_charge) except Exception: traceback.print_exc() return None def get_used(self): """ Calculates how many topups have actually been used """ used = TopUpCredits.objects.filter(topup=self).aggregate(used=Sum('used')) return 0 if not used['used'] else used['used'] def get_remaining(self): """ Returns how many credits remain on this topup """ return self.credits - self.get_used() def __unicode__(self): return "%s Credits" % self.credits class Debit(SmartModel): """ Transactional history of credits allocated to other topups or chunks of archived messages """ TYPE_ALLOCATION = 'A' TYPE_PURGE = 'P' DEBIT_TYPES = ((TYPE_ALLOCATION, 'Allocation'), (TYPE_PURGE, 'Purge')) topup = models.ForeignKey(TopUp, related_name="debits", help_text=_("The topup these credits are applied against")) amount = models.IntegerField(help_text=_('How many credits were debited')) beneficiary = models.ForeignKey(TopUp, null=True, related_name="allocations", help_text=_('Optional topup that was allocated with these credits')) debit_type = models.CharField(max_length=1, choices=DEBIT_TYPES, null=False, help_text=_('What caused this debit')) class TopUpCredits(models.Model): """ Used to track number of credits used on a topup, mostly maintained by triggers on Msg insertion. """ topup = models.ForeignKey(TopUp, help_text=_("The topup these credits are being used against")) used = models.IntegerField(help_text=_("How many credits were used, can be negative")) LAST_SQUASH_KEY = 'last_topupcredits_squash' @classmethod def squash_credits(cls): # get the id of the last count we squashed r = get_redis_connection() last_squash = r.get(TopUpCredits.LAST_SQUASH_KEY) if not last_squash: last_squash = 0 # get the unique flow ids for all new ones squash_count = 0 for credits in TopUpCredits.objects.filter(id__gt=last_squash).order_by('topup_id').distinct('topup_id'): # perform our atomic squash in SQL by calling our squash method with connection.cursor() as c: c.execute("SELECT temba_squash_topupcredits(%s);", (credits.topup_id,)) squash_count += 1 # insert our new top squashed id max_id = TopUpCredits.objects.all().order_by('-id').first() if max_id: r.set(TopUpCredits.LAST_SQUASH_KEY, max_id.id) class CreditAlert(SmartModel): """ Tracks when we have sent alerts to organization admins about low credits. """ ALERT_TYPES_CHOICES = ((ORG_CREDIT_OVER, _("Credits Over")), (ORG_CREDIT_LOW, _("Low Credits")), (ORG_CREDIT_EXPIRING, _("Credits expiring soon"))) org = models.ForeignKey(Org, help_text="The organization this alert was triggered for") alert_type = models.CharField(max_length=1, choices=ALERT_TYPES_CHOICES, help_text="The type of this alert") @classmethod def trigger_credit_alert(cls, org, alert_type): # is there already an active alert at this threshold? if so, exit if CreditAlert.objects.filter(is_active=True, org=org, alert_type=alert_type): return None print "triggering %s credits alert type for %s" % (alert_type, org.name) admin = org.get_org_admins().first() if admin: # Otherwise, create our alert objects and trigger our event alert = CreditAlert.objects.create(org=org, alert_type=alert_type, created_by=admin, modified_by=admin) alert.send_alert() def send_alert(self): from .tasks import send_alert_email_task send_alert_email_task(self.id) def send_email(self): email = self.created_by.email if not email: return from temba.middleware import BrandingMiddleware branding = BrandingMiddleware.get_branding_for_host(self.org.brand) subject = _("%(name)s Credits Alert") % branding template = "orgs/email/alert_email" to_email = email context = dict(org=self.org, now=timezone.now(), branding=branding, alert=self, customer=self.created_by) context['subject'] = subject send_template_email(to_email, subject, template, context, branding) @classmethod def reset_for_org(cls, org): CreditAlert.objects.filter(org=org).update(is_active=False) @classmethod def check_org_credits(cls): from temba.msgs.models import Msg # all active orgs in the last hour active_orgs = Msg.current_messages.filter(created_on__gte=timezone.now() - timedelta(hours=1)) active_orgs = active_orgs.order_by('org').distinct('org') for msg in active_orgs: org = msg.org # does this org have less than 0 messages? org_remaining_credits = org.get_credits_remaining() org_low_credits = org.has_low_credits() org_credits_expiring = org.get_credits_expiring_soon() if org_remaining_credits <= 0: CreditAlert.trigger_credit_alert(org, ORG_CREDIT_OVER) elif org_low_credits: CreditAlert.trigger_credit_alert(org, ORG_CREDIT_LOW) elif org_credits_expiring > 0: CreditAlert.trigger_credit_alert(org, ORG_CREDIT_EXPIRING)
codeparrot/github-code-clean
# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import ddt import mock from oslo_config import cfg from oslo_serialization import jsonutils import six import webob from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.v2 import share_replicas from manila.api.v2 import shares from manila.common import constants from manila import context from manila import db from manila import exception from manila import policy from manila.share import api as share_api from manila.share import share_types from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes from manila.tests import db_utils from manila import utils CONF = cfg.CONF @ddt.ddt class ShareAPITest(test.TestCase): """Share API Test.""" def setUp(self): super(self.__class__, self).setUp() self.controller = shares.ShareController() self.mock_object(db, 'availability_zone_get') self.mock_object(share_api.API, 'get_all', stubs.stub_get_all_shares) self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'update', stubs.stub_share_update) self.mock_object(share_api.API, 'delete', stubs.stub_share_delete) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.maxDiff = None self.share = { "size": 100, "display_name": "Share Test Name", "display_description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "is_public": False, } self.create_mock = mock.Mock( return_value=stubs.stub_share( '1', display_name=self.share['display_name'], display_description=self.share['display_description'], size=100, share_proto=self.share['share_proto'].upper(), instance={ 'availability_zone': self.share['availability_zone'], }) ) self.vt = { 'id': 'fake_volume_type_id', 'name': 'fake_volume_type_name', } CONF.set_default("default_share_type", None) def _get_expected_share_detailed_response(self, values=None, admin=False): share = { 'id': '1', 'name': 'displayname', 'availability_zone': 'fakeaz', 'description': 'displaydesc', 'export_location': 'fake_location', 'export_locations': ['fake_location', 'fake_location2'], 'project_id': 'fakeproject', 'host': 'fakehost', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'share_proto': 'FAKEPROTO', 'metadata': {}, 'size': 1, 'snapshot_id': '2', 'share_network_id': None, 'status': 'fakestatus', 'share_type': '1', 'volume_type': '1', 'snapshot_support': True, 'is_public': False, 'consistency_group_id': None, 'source_cgsnapshot_member_id': None, 'task_state': None, 'share_type_name': None, 'links': [ { 'href': 'http://localhost/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/fake/shares/1', 'rel': 'bookmark' } ], } if values: if 'display_name' in values: values['name'] = values.pop('display_name') if 'display_description' in values: values['description'] = values.pop('display_description') share.update(values) if share.get('share_proto'): share['share_proto'] = share['share_proto'].upper() if admin: share['share_server_id'] = 'fake_share_server_id' return {'share': share} @ddt.data("2.0", "2.1") def test_share_create_original(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) expected['share'].pop('snapshot_support') expected['share'].pop('share_type_name') expected['share'].pop('task_state') expected['share'].pop('consistency_group_id') expected['share'].pop('source_cgsnapshot_member_id') self.assertEqual(expected, res_dict) @ddt.data("2.2", "2.3") def test_share_create_with_snapshot_support_without_cg(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) expected['share'].pop('share_type_name') expected['share'].pop('task_state') expected['share'].pop('consistency_group_id') expected['share'].pop('source_cgsnapshot_member_id') self.assertEqual(expected, res_dict) @ddt.data("2.4", "2.5") def test_share_create_with_consistency_group(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) expected['share'].pop('share_type_name') if (api_version.APIVersionRequest(microversion) == api_version.APIVersionRequest('2.4')): expected['share'].pop('task_state') self.assertEqual(expected, res_dict) def test_share_create_with_valid_default_share_type(self): self.mock_object(share_types, 'get_share_type_by_name', mock.Mock(return_value=self.vt)) CONF.set_default("default_share_type", self.vt['name']) self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) share_types.get_share_type_by_name.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.vt['name']) self.assertEqual(expected, res_dict) def test_share_create_with_invalid_default_share_type(self): self.mock_object( share_types, 'get_default_share_type', mock.Mock(side_effect=exception.ShareTypeNotFoundByName( self.vt['name'])), ) CONF.set_default("default_share_type", self.vt['name']) req = fakes.HTTPRequest.blank('/shares', version='2.7') self.assertRaises(exception.ShareTypeNotFoundByName, self.controller.create, req, {'share': self.share}) share_types.get_default_share_type.assert_called_once_with() def test_share_create_with_replication(self): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank( '/shares', version=share_replicas.MIN_SUPPORTED_API_VERSION) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) expected['share']['task_state'] = None expected['share']['consistency_group_id'] = None expected['share']['source_cgsnapshot_member_id'] = None expected['share']['replication_type'] = None expected['share']['share_type_name'] = None expected['share']['has_replicas'] = False expected['share']['access_rules_status'] = 'active' expected['share'].pop('export_location') expected['share'].pop('export_locations') self.assertEqual(expected, res_dict) def test_share_create_with_share_net(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "share_network_id": "fakenetid" } create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id'])) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value={'id': 'fakenetid'})) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(shr) self.assertEqual(expected, res_dict) self.assertEqual("fakenetid", create_mock.call_args[1]['share_network_id']) @ddt.data("2.15", "2.16") def test_share_create_original_with_user_id(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share) if api_version.APIVersionRequest(microversion) >= ( api_version.APIVersionRequest("2.16")): expected['share']['user_id'] = 'fakeuser' else: self.assertNotIn('user_id', expected['share']) expected['share']['task_state'] = None expected['share']['consistency_group_id'] = None expected['share']['source_cgsnapshot_member_id'] = None expected['share']['replication_type'] = None expected['share']['share_type_name'] = None expected['share']['has_replicas'] = False expected['share']['access_rules_status'] = 'active' expected['share'].pop('export_location') expected['share'].pop('export_locations') self.assertEqual(expected, res_dict) def test_migration_start(self): share = db_utils.create_share() share_network = db_utils.create_share_network() share_type = {'share_type_id': 'fake_type_id'} req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True context = req.environ['manila.context'] self.mock_object(db, 'share_network_get', mock.Mock( return_value=share_network)) self.mock_object(db, 'share_type_get', mock.Mock( return_value=share_type)) body = { 'migration_start': { 'host': 'fake_host', 'new_share_network_id': 'fake_net_id', 'new_share_type_id': 'fake_type_id', } } method = 'migration_start' self.mock_object(share_api.API, 'migration_start') self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) response = getattr(self.controller, method)(req, share['id'], body) self.assertEqual(202, response.status_int) share_api.API.get.assert_called_once_with(context, share['id']) share_api.API.migration_start.assert_called_once_with( context, share, 'fake_host', False, True, True, False, new_share_network=share_network, new_share_type=share_type) db.share_network_get.assert_called_once_with( context, 'fake_net_id') db.share_type_get.assert_called_once_with( context, 'fake_type_id') def test_migration_start_has_replicas(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True) req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request = api_version.APIVersionRequest('2.22') req.api_version_request.experimental = True body = {'migration_start': {'host': 'fake_host'}} self.mock_object(share_api.API, 'migration_start', mock.Mock(side_effect=exception.Conflict(err='err'))) self.assertRaises(webob.exc.HTTPConflict, self.controller.migration_start, req, share['id'], body) def test_migration_start_no_share_id(self): req = fakes.HTTPRequest.blank('/shares/%s/action' % 'fake_id', use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_start': {'host': 'fake_host'}} method = 'migration_start' self.mock_object(share_api.API, 'get', mock.Mock(side_effect=[exception.NotFound])) self.assertRaises(webob.exc.HTTPNotFound, getattr(self.controller, method), req, 'fake_id', body) def test_migration_start_no_host(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_start': {}} method = 'migration_start' self.assertRaises(webob.exc.HTTPBadRequest, getattr(self.controller, method), req, share['id'], body) def test_migration_start_new_share_network_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') context = req.environ['manila.context'] req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_start': {'host': 'fake_host', 'new_share_network_id': 'nonexistent'}} self.mock_object(db, 'share_network_get', mock.Mock(side_effect=exception.NotFound())) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.migration_start, req, share['id'], body) db.share_network_get.assert_called_once_with(context, 'nonexistent') def test_migration_start_new_share_type_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') context = req.environ['manila.context'] req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_start': {'host': 'fake_host', 'new_share_type_id': 'nonexistent'}} self.mock_object(db, 'share_type_get', mock.Mock(side_effect=exception.NotFound())) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.migration_start, req, share['id'], body) db.share_type_get.assert_called_once_with(context, 'nonexistent') def test_migration_start_invalid_force_host_assisted_migration(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_start': {'host': 'fake_host', 'force_host_assisted_migration': 'fake'}} method = 'migration_start' self.assertRaises(webob.exc.HTTPBadRequest, getattr(self.controller, method), req, share['id'], body) @ddt.data('writable', 'preserve_metadata') def test_migration_start_invalid_writable_preserve_metadata( self, parameter): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_start': {'host': 'fake_host', parameter: 'invalid'}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.migration_start, req, share['id'], body) @ddt.data(constants.TASK_STATE_MIGRATION_ERROR, None) def test_reset_task_state(self, task_state): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'task_state': task_state} body = {'reset_task_state': update} self.mock_object(db, 'share_update') response = self.controller.reset_task_state(req, share['id'], body) self.assertEqual(202, response.status_int) db.share_update.assert_called_once_with(utils.IsAMatcher( context.RequestContext), share['id'], update) def test_reset_task_state_error_body(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'error': 'error'} body = {'reset_task_state': update} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.reset_task_state, req, share['id'], body) def test_reset_task_state_error_invalid(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'task_state': 'error'} body = {'reset_task_state': update} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.reset_task_state, req, share['id'], body) def test_reset_task_state_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR} body = {'reset_task_state': update} self.mock_object(db, 'share_update', mock.Mock(side_effect=exception.NotFound())) self.assertRaises(webob.exc.HTTPNotFound, self.controller.reset_task_state, req, share['id'], body) db.share_update.assert_called_once_with(utils.IsAMatcher( context.RequestContext), share['id'], update) def test_migration_complete(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_complete': None} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'migration_complete') response = self.controller.migration_complete(req, share['id'], body) self.assertEqual(202, response.status_int) share_api.API.migration_complete.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share) def test_migration_complete_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_complete': None} self.mock_object(share_api.API, 'get', mock.Mock(side_effect=exception.NotFound())) self.mock_object(share_api.API, 'migration_complete') self.assertRaises(webob.exc.HTTPNotFound, self.controller.migration_complete, req, share['id'], body) def test_migration_cancel(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_cancel': None} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'migration_cancel') response = self.controller.migration_cancel(req, share['id'], body) self.assertEqual(202, response.status_int) share_api.API.migration_cancel.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share) def test_migration_cancel_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_cancel': None} self.mock_object(share_api.API, 'get', mock.Mock(side_effect=exception.NotFound())) self.mock_object(share_api.API, 'migration_cancel') self.assertRaises(webob.exc.HTTPNotFound, self.controller.migration_cancel, req, share['id'], body) def test_migration_get_progress(self): share = db_utils.create_share( task_state=constants.TASK_STATE_MIGRATION_SUCCESS) req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_get_progress': None} expected = { 'total_progress': 'fake', 'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, } self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'migration_get_progress', mock.Mock(return_value=expected)) response = self.controller.migration_get_progress(req, share['id'], body) self.assertEqual(expected, response) share_api.API.migration_get_progress.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share) def test_migration_get_progress_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_get_progress': None} self.mock_object(share_api.API, 'get', mock.Mock(side_effect=exception.NotFound())) self.mock_object(share_api.API, 'migration_get_progress') self.assertRaises(webob.exc.HTTPNotFound, self.controller.migration_get_progress, req, share['id'], body) def test_share_create_from_snapshot_without_share_net_no_parent(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": None, } create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(shr) self.assertEqual(expected, res_dict) def test_share_create_from_snapshot_without_share_net_parent_exists(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": None, } parent_share_net = 444 create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(share_api.API, 'get', mock.Mock( return_value=mock.Mock( instance={'share_network_id': parent_share_net}))) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value={'id': parent_share_net})) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(shr) self.assertEqual(expected, res_dict) self.assertEqual(parent_share_net, create_mock.call_args[1]['share_network_id']) def test_share_create_from_snapshot_with_share_net_equals_parent(self): parent_share_net = 444 shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": parent_share_net } create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(share_api.API, 'get', mock.Mock( return_value=mock.Mock( instance={'share_network_id': parent_share_net}))) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value={'id': parent_share_net})) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(shr) self.assertEqual(expected, res_dict) self.assertEqual(parent_share_net, create_mock.call_args[1]['share_network_id']) def test_share_create_from_snapshot_invalid_share_net(self): self.mock_object(share_api.API, 'create') shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": 1234 } body = {"share": shr} req = fakes.HTTPRequest.blank('/shares', version='2.7') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_share_creation_fails_with_bad_size(self): shr = {"size": '', "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1"} body = {"share": shr} req = fakes.HTTPRequest.blank('/shares', version='2.7') self.assertRaises(exception.InvalidInput, self.controller.create, req, body) def test_share_create_no_body(self): req = fakes.HTTPRequest.blank('/shares', version='2.7') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, {}) def test_share_create_invalid_availability_zone(self): self.mock_object( db, 'availability_zone_get', mock.Mock(side_effect=exception.AvailabilityZoneNotFound(id='id')) ) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/shares', version='2.7') self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) def test_share_show(self): req = fakes.HTTPRequest.blank('/shares/1') expected = self._get_expected_share_detailed_response() expected['share'].pop('snapshot_support') expected['share'].pop('share_type_name') expected['share'].pop('task_state') expected['share'].pop('consistency_group_id') expected['share'].pop('source_cgsnapshot_member_id') res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_with_consistency_group(self): req = fakes.HTTPRequest.blank('/shares/1', version='2.4') expected = self._get_expected_share_detailed_response() expected['share'].pop('share_type_name') expected['share'].pop('task_state') res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_with_share_type_name(self): req = fakes.HTTPRequest.blank('/shares/1', version='2.6') res_dict = self.controller.show(req, '1') expected = self._get_expected_share_detailed_response() expected['share']['consistency_group_id'] = None expected['share']['source_cgsnapshot_member_id'] = None expected['share']['share_type_name'] = None expected['share']['task_state'] = None self.assertEqual(expected, res_dict) @ddt.data("2.15", "2.16") def test_share_show_with_user_id(self, microversion): req = fakes.HTTPRequest.blank('/shares/1', version=microversion) res_dict = self.controller.show(req, '1') expected = self._get_expected_share_detailed_response() if api_version.APIVersionRequest(microversion) >= ( api_version.APIVersionRequest("2.16")): expected['share']['user_id'] = 'fakeuser' else: self.assertNotIn('user_id', expected['share']) expected['share']['consistency_group_id'] = None expected['share']['source_cgsnapshot_member_id'] = None expected['share']['share_type_name'] = None expected['share']['task_state'] = None expected['share']['access_rules_status'] = 'active' expected['share'].pop('export_location') expected['share'].pop('export_locations') expected['share']['replication_type'] = None expected['share']['has_replicas'] = False self.assertEqual(expected, res_dict) def test_share_show_admin(self): req = fakes.HTTPRequest.blank('/shares/1', use_admin_context=True) expected = self._get_expected_share_detailed_response(admin=True) expected['share'].pop('snapshot_support') expected['share'].pop('share_type_name') expected['share'].pop('task_state') expected['share'].pop('consistency_group_id') expected['share'].pop('source_cgsnapshot_member_id') res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_no_share(self): self.mock_object(share_api.API, 'get', stubs.stub_share_get_notfound) req = fakes.HTTPRequest.blank('/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '1') def test_share_show_with_replication_type(self): req = fakes.HTTPRequest.blank( '/shares/1', version=share_replicas.MIN_SUPPORTED_API_VERSION) res_dict = self.controller.show(req, '1') expected = self._get_expected_share_detailed_response() expected['share']['task_state'] = None expected['share']['consistency_group_id'] = None expected['share']['source_cgsnapshot_member_id'] = None expected['share']['access_rules_status'] = 'active' expected['share']['share_type_name'] = None expected['share']['replication_type'] = None expected['share']['has_replicas'] = False expected['share'].pop('export_location') expected['share'].pop('export_locations') self.assertEqual(expected, res_dict) def test_share_delete(self): req = fakes.HTTPRequest.blank('/shares/1') resp = self.controller.delete(req, 1) self.assertEqual(202, resp.status_int) def test_share_delete_has_replicas(self): req = fakes.HTTPRequest.blank('/shares/1') self.mock_object(share_api.API, 'get', mock.Mock(return_value=self.share)) self.mock_object(share_api.API, 'delete', mock.Mock(side_effect=exception.Conflict(err='err'))) self.assertRaises( webob.exc.HTTPConflict, self.controller.delete, req, 1) def test_share_delete_in_consistency_group_param_not_provided(self): fake_share = stubs.stub_share('fake_share', consistency_group_id='fake_cg_id') self.mock_object(share_api.API, 'get', mock.Mock(return_value=fake_share)) req = fakes.HTTPRequest.blank('/shares/1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, 1) def test_share_delete_in_consistency_group(self): fake_share = stubs.stub_share('fake_share', consistency_group_id='fake_cg_id') self.mock_object(share_api.API, 'get', mock.Mock(return_value=fake_share)) req = fakes.HTTPRequest.blank( '/shares/1?consistency_group_id=fake_cg_id') resp = self.controller.delete(req, 1) self.assertEqual(202, resp.status_int) def test_share_delete_in_consistency_group_wrong_id(self): fake_share = stubs.stub_share('fake_share', consistency_group_id='fake_cg_id') self.mock_object(share_api.API, 'get', mock.Mock(return_value=fake_share)) req = fakes.HTTPRequest.blank( '/shares/1?consistency_group_id=not_fake_cg_id') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, 1) def test_share_update(self): shr = self.share body = {"share": shr} req = fakes.HTTPRequest.blank('/share/1') res_dict = self.controller.update(req, 1, body) self.assertEqual(shr["display_name"], res_dict['share']["name"]) self.assertEqual(shr["display_description"], res_dict['share']["description"]) self.assertEqual(shr['is_public'], res_dict['share']['is_public']) def test_share_update_with_consistency_group(self): shr = self.share body = {"share": shr} req = fakes.HTTPRequest.blank('/share/1', version="2.4") res_dict = self.controller.update(req, 1, body) self.assertIsNone(res_dict['share']["consistency_group_id"]) self.assertIsNone(res_dict['share']["source_cgsnapshot_member_id"]) def test_share_not_updates_size(self): req = fakes.HTTPRequest.blank('/share/1') res_dict = self.controller.update(req, 1, {"share": self.share}) self.assertNotEqual(res_dict['share']["size"], self.share["size"]) def test_share_delete_no_share(self): self.mock_object(share_api.API, 'get', stubs.stub_share_get_notfound) req = fakes.HTTPRequest.blank('/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1) def _share_list_summary_with_search_opts(self, use_admin_context): search_opts = { 'name': 'fake_name', 'status': constants.STATUS_AVAILABLE, 'share_server_id': 'fake_share_server_id', 'share_type_id': 'fake_share_type_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'fake_host', 'share_network_id': 'fake_share_network_id', 'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1 'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'limit': '1', 'offset': '1', 'is_public': 'False', } # fake_key should be filtered for non-admin url = '/shares?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) shares = [ {'id': 'id1', 'display_name': 'n1'}, {'id': 'id2', 'display_name': 'n2'}, {'id': 'id3', 'display_name': 'n3'}, ] self.mock_object(share_api.API, 'get_all', mock.Mock(return_value=shares)) result = self.controller.index(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_server_id': search_opts['share_server_id'], 'share_type_id': search_opts['share_type_id'], 'snapshot_id': search_opts['snapshot_id'], 'host': search_opts['host'], 'share_network_id': search_opts['share_network_id'], 'metadata': {'k1': 'v1'}, 'extra_specs': {'k2': 'v2'}, 'is_public': 'False', } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['shares'])) self.assertEqual(shares[1]['id'], result['shares'][0]['id']) self.assertEqual( shares[1]['display_name'], result['shares'][0]['name']) def test_share_list_summary_with_search_opts_by_non_admin(self): self._share_list_summary_with_search_opts(use_admin_context=False) def test_share_list_summary_with_search_opts_by_admin(self): self._share_list_summary_with_search_opts(use_admin_context=True) def test_share_list_summary(self): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) req = fakes.HTTPRequest.blank('/shares') res_dict = self.controller.index(req) expected = { 'shares': [ { 'name': 'displayname', 'id': '1', 'links': [ { 'href': 'http://localhost/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/fake/shares/1', 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) def _share_list_detail_with_search_opts(self, use_admin_context): search_opts = { 'name': 'fake_name', 'status': constants.STATUS_AVAILABLE, 'share_server_id': 'fake_share_server_id', 'share_type_id': 'fake_share_type_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'fake_host', 'share_network_id': 'fake_share_network_id', 'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1 'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'limit': '1', 'offset': '1', 'is_public': 'False', } # fake_key should be filtered for non-admin url = '/shares/detail?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) shares = [ {'id': 'id1', 'display_name': 'n1'}, { 'id': 'id2', 'display_name': 'n2', 'status': constants.STATUS_AVAILABLE, 'snapshot_id': 'fake_snapshot_id', 'share_type_id': 'fake_share_type_id', 'instance': { 'host': 'fake_host', 'share_network_id': 'fake_share_network_id', }, }, {'id': 'id3', 'display_name': 'n3'}, ] self.mock_object(share_api.API, 'get_all', mock.Mock(return_value=shares)) result = self.controller.detail(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_server_id': search_opts['share_server_id'], 'share_type_id': search_opts['share_type_id'], 'snapshot_id': search_opts['snapshot_id'], 'host': search_opts['host'], 'share_network_id': search_opts['share_network_id'], 'metadata': {'k1': 'v1'}, 'extra_specs': {'k2': 'v2'}, 'is_public': 'False', } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['shares'])) self.assertEqual(shares[1]['id'], result['shares'][0]['id']) self.assertEqual( shares[1]['display_name'], result['shares'][0]['name']) self.assertEqual( shares[1]['snapshot_id'], result['shares'][0]['snapshot_id']) self.assertEqual( shares[1]['status'], result['shares'][0]['status']) self.assertEqual( shares[1]['share_type_id'], result['shares'][0]['share_type']) self.assertEqual( shares[1]['snapshot_id'], result['shares'][0]['snapshot_id']) self.assertEqual( shares[1]['instance']['host'], result['shares'][0]['host']) self.assertEqual( shares[1]['instance']['share_network_id'], result['shares'][0]['share_network_id']) def test_share_list_detail_with_search_opts_by_non_admin(self): self._share_list_detail_with_search_opts(use_admin_context=False) def test_share_list_detail_with_search_opts_by_admin(self): self._share_list_detail_with_search_opts(use_admin_context=True) def _list_detail_common_expected(self): return { 'shares': [ { 'status': 'fakestatus', 'description': 'displaydesc', 'export_location': 'fake_location', 'export_locations': ['fake_location', 'fake_location2'], 'availability_zone': 'fakeaz', 'name': 'displayname', 'share_proto': 'FAKEPROTO', 'metadata': {}, 'project_id': 'fakeproject', 'host': 'fakehost', 'id': '1', 'snapshot_id': '2', 'snapshot_support': True, 'share_network_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1, 'share_type': '1', 'volume_type': '1', 'is_public': False, 'links': [ { 'href': 'http://localhost/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/fake/shares/1', 'rel': 'bookmark' } ], } ] } def _list_detail_test_common(self, req, expected): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) res_dict = self.controller.detail(req) self.assertEqual(expected, res_dict) self.assertEqual(res_dict['shares'][0]['volume_type'], res_dict['shares'][0]['share_type']) def test_share_list_detail(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env) expected = self._list_detail_common_expected() expected['shares'][0].pop('snapshot_support') self._list_detail_test_common(req, expected) def test_share_list_detail_with_consistency_group(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env, version="2.4") expected = self._list_detail_common_expected() expected['shares'][0]['consistency_group_id'] = None expected['shares'][0]['source_cgsnapshot_member_id'] = None self._list_detail_test_common(req, expected) def test_share_list_detail_with_task_state(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env, version="2.5") expected = self._list_detail_common_expected() expected['shares'][0]['consistency_group_id'] = None expected['shares'][0]['source_cgsnapshot_member_id'] = None expected['shares'][0]['task_state'] = None self._list_detail_test_common(req, expected) def test_share_list_detail_without_export_locations(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/shares/detail', environ=env, version="2.9") expected = self._list_detail_common_expected() expected['shares'][0]['consistency_group_id'] = None expected['shares'][0]['source_cgsnapshot_member_id'] = None expected['shares'][0]['task_state'] = None expected['shares'][0]['share_type_name'] = None expected['shares'][0].pop('export_location') expected['shares'][0].pop('export_locations') self._list_detail_test_common(req, expected) def test_share_list_detail_with_replication_type(self): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank( '/shares/detail', environ=env, version=share_replicas.MIN_SUPPORTED_API_VERSION) res_dict = self.controller.detail(req) expected = { 'shares': [ { 'status': 'fakestatus', 'description': 'displaydesc', 'availability_zone': 'fakeaz', 'name': 'displayname', 'share_proto': 'FAKEPROTO', 'metadata': {}, 'project_id': 'fakeproject', 'access_rules_status': 'active', 'host': 'fakehost', 'id': '1', 'snapshot_id': '2', 'share_network_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1, 'share_type_name': None, 'share_type': '1', 'volume_type': '1', 'is_public': False, 'consistency_group_id': None, 'source_cgsnapshot_member_id': None, 'snapshot_support': True, 'has_replicas': False, 'replication_type': None, 'task_state': None, 'links': [ { 'href': 'http://localhost/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/fake/shares/1', 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) self.assertEqual(res_dict['shares'][0]['volume_type'], res_dict['shares'][0]['share_type']) def test_remove_invalid_options(self): ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=False) search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} expected_opts = {'a': 'a', 'c': 'c'} allowed_opts = ['a', 'c'] common.remove_invalid_options(ctx, search_opts, allowed_opts) self.assertEqual(expected_opts, search_opts) def test_remove_invalid_options_admin(self): ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=True) search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} expected_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} allowed_opts = ['a', 'c'] common.remove_invalid_options(ctx, search_opts, allowed_opts) self.assertEqual(expected_opts, search_opts) def _fake_access_get(self, ctxt, access_id): class Access(object): def __init__(self, **kwargs): self.STATE_NEW = 'fake_new' self.STATE_ACTIVE = 'fake_active' self.STATE_ERROR = 'fake_error' self.params = kwargs self.params['state'] = self.STATE_NEW self.share_id = kwargs.get('share_id') self.id = access_id def __getitem__(self, item): return self.params[item] access = Access(access_id=access_id, share_id='fake_share_id') return access @ddt.ddt class ShareActionsTest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.controller = shares.ShareController() self.mock_object(share_api.API, 'get', stubs.stub_share_get) @ddt.data( {'access_type': 'ip', 'access_to': '127.0.0.1'}, {'access_type': 'user', 'access_to': '1' * 4}, {'access_type': 'user', 'access_to': '1' * 32}, {'access_type': 'user', 'access_to': 'fake\\]{.-_\'`;}['}, {'access_type': 'user', 'access_to': 'MYDOMAIN\\Administrator'}, {'access_type': 'cert', 'access_to': 'x'}, {'access_type': 'cert', 'access_to': 'tenant.example.com'}, {'access_type': 'cert', 'access_to': 'x' * 64}, ) def test_allow_access(self, access): self.mock_object(share_api.API, 'allow_access', mock.Mock(return_value={'fake': 'fake'})) self.mock_object(self.controller._access_view_builder, 'view', mock.Mock(return_value={'access': {'fake': 'fake'}})) id = 'fake_share_id' body = {'allow_access': access} expected = {'access': {'fake': 'fake'}} req = fakes.HTTPRequest.blank( '/v2/tenant1/shares/%s/action' % id, version="2.7") res = self.controller.allow_access(req, id, body) self.assertEqual(expected, res) @ddt.data( {'access_type': 'error_type', 'access_to': '127.0.0.1'}, {'access_type': 'ip', 'access_to': 'localhost'}, {'access_type': 'ip', 'access_to': '127.0.0.*'}, {'access_type': 'ip', 'access_to': '127.0.0.0/33'}, {'access_type': 'ip', 'access_to': '127.0.0.256'}, {'access_type': 'user', 'access_to': '1'}, {'access_type': 'user', 'access_to': '1' * 3}, {'access_type': 'user', 'access_to': '1' * 33}, {'access_type': 'user', 'access_to': 'root^'}, {'access_type': 'cert', 'access_to': ''}, {'access_type': 'cert', 'access_to': ' '}, {'access_type': 'cert', 'access_to': 'x' * 65}, ) def test_allow_access_error(self, access): id = 'fake_share_id' body = {'allow_access': access} req = fakes.HTTPRequest.blank('/v2/tenant1/shares/%s/action' % id, version="2.7") self.assertRaises(webob.exc.HTTPBadRequest, self.controller.allow_access, req, id, body) @ddt.unpack @ddt.data( {'exc': None, 'access_to': 'alice', 'version': '2.13'}, {'exc': webob.exc.HTTPBadRequest, 'access_to': 'alice', 'version': '2.11'} ) def test_allow_access_ceph(self, exc, access_to, version): share_id = "fake_id" self.mock_object(share_api.API, 'allow_access', mock.Mock(return_value={'fake': 'fake'})) self.mock_object(self.controller._access_view_builder, 'view', mock.Mock(return_value={'access': {'fake': 'fake'}})) req = fakes.HTTPRequest.blank( '/v2/shares/%s/action' % share_id, version=version) body = {'allow_access': { 'access_type': 'cephx', 'access_to': access_to, 'access_level': 'rw' }} if exc: self.assertRaises(exc, self.controller.allow_access, req, share_id, body) else: expected = {'access': {'fake': 'fake'}} res = self.controller.allow_access(req, id, body) self.assertEqual(expected, res) def test_deny_access(self): def _stub_deny_access(*args, **kwargs): pass self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) id = 'fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) res = self.controller._deny_access(req, id, body) self.assertEqual(202, res.status_int) def test_deny_access_not_found(self): def _stub_deny_access(*args, **kwargs): pass self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) id = 'super_fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPNotFound, self.controller._deny_access, req, id, body) def test_access_list(self): fake_access_list = [ { "state": "fakestatus", "id": "fake_access_id", "access_type": "fakeip", "access_to": "127.0.0.1", } ] self.mock_object(self.controller._access_view_builder, 'list_view', mock.Mock(return_value={'access_list': fake_access_list})) id = 'fake_share_id' body = {"os-access_list": None} req = fakes.HTTPRequest.blank('/v2/tenant1/shares/%s/action' % id) res_dict = self.controller._access_list(req, id, body) self.assertEqual({'access_list': fake_access_list}, res_dict) @ddt.unpack @ddt.data( {'body': {'os-extend': {'new_size': 2}}, 'version': '2.6'}, {'body': {'extend': {'new_size': 2}}, 'version': '2.7'}, ) def test_extend(self, body, version): id = 'fake_share_id' share = stubs.stub_share_get(None, None, id) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, "extend") size = '2' req = fakes.HTTPRequest.blank( '/v2/shares/%s/action' % id, version=version) actual_response = self.controller._extend(req, id, body) share_api.API.get.assert_called_once_with(mock.ANY, id) share_api.API.extend.assert_called_once_with( mock.ANY, share, int(size)) self.assertEqual(202, actual_response.status_int) @ddt.data({"os-extend": ""}, {"os-extend": {"new_size": "foo"}}, {"os-extend": {"new_size": {'foo': 'bar'}}}) def test_extend_invalid_body(self, body): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._extend, req, id, body) @ddt.data({'source': exception.InvalidInput, 'target': webob.exc.HTTPBadRequest}, {'source': exception.InvalidShare, 'target': webob.exc.HTTPBadRequest}, {'source': exception.ShareSizeExceedsAvailableQuota, 'target': webob.exc.HTTPForbidden}) @ddt.unpack def test_extend_exception(self, source, target): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) body = {"os-extend": {'new_size': '123'}} self.mock_object(share_api.API, "extend", mock.Mock(side_effect=source('fake'))) self.assertRaises(target, self.controller._extend, req, id, body) @ddt.unpack @ddt.data( {'body': {'os-shrink': {'new_size': 1}}, 'version': '2.6'}, {'body': {'shrink': {'new_size': 1}}, 'version': '2.7'}, ) def test_shrink(self, body, version): id = 'fake_share_id' share = stubs.stub_share_get(None, None, id) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, "shrink") size = '1' req = fakes.HTTPRequest.blank( '/v2/shares/%s/action' % id, version=version) actual_response = self.controller._shrink(req, id, body) share_api.API.get.assert_called_once_with(mock.ANY, id) share_api.API.shrink.assert_called_once_with( mock.ANY, share, int(size)) self.assertEqual(202, actual_response.status_int) @ddt.data({"os-shrink": ""}, {"os-shrink": {"new_size": "foo"}}, {"os-shrink": {"new_size": {'foo': 'bar'}}}) def test_shrink_invalid_body(self, body): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._shrink, req, id, body) @ddt.data({'source': exception.InvalidInput, 'target': webob.exc.HTTPBadRequest}, {'source': exception.InvalidShare, 'target': webob.exc.HTTPBadRequest}) @ddt.unpack def test_shrink_exception(self, source, target): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id) body = {"os-shrink": {'new_size': '123'}} self.mock_object(share_api.API, "shrink", mock.Mock(side_effect=source('fake'))) self.assertRaises(target, self.controller._shrink, req, id, body) @ddt.ddt class ShareAdminActionsAPITest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() CONF.set_default("default_share_type", None) self.flags(rpc_backend='manila.openstack.common.rpc.impl_fake') self.share_api = share_api.API() self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_share_data(self, share=None, version='2.7'): if share is None: share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size='1', override_defaults=True) req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], version=version) return share, req def _reset_status(self, ctxt, model, req, db_access_method, valid_code, valid_status=None, body=None, version='2.7'): if float(version) > 2.6: action_name = 'reset_status' else: action_name = 'os-reset_status' if body is None: body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = six.b(jsonutils.dumps(body)) req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) if valid_code == 404: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) else: actual_model = db_access_method(ctxt, model['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data(*fakes.fixture_reset_status_with_different_roles) @ddt.unpack def test_share_reset_status_with_different_roles(self, role, valid_code, valid_status, version): share, req = self._setup_share_data(version=version) ctxt = self._get_context(role) self._reset_status(ctxt, share, req, db.share_get, valid_code, valid_status, version=version) @ddt.data(*fakes.fixture_invalid_reset_status_body) def test_share_invalid_reset_status_body(self, body): share, req = self._setup_share_data(version='2.6') ctxt = self.admin_context self._reset_status(ctxt, share, req, db.share_get, 400, constants.STATUS_AVAILABLE, body, version='2.6') @ddt.data('2.6', '2.7') def test_share_reset_status_for_missing(self, version): fake_share = {'id': 'missing-share-id'} req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % fake_share['id'], version=version) self._reset_status(self.admin_context, fake_share, req, db.share_snapshot_get, 404, version=version) def _force_delete(self, ctxt, model, req, db_access_method, valid_code, check_model_in_db=False, version='2.7'): if float(version) > 2.6: action_name = 'force_delete' else: action_name = 'os-force_delete' req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = six.b(jsonutils.dumps({action_name: {}})) req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response self.assertEqual(valid_code, resp.status_int) if valid_code == 202 and check_model_in_db: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) @ddt.data(*fakes.fixture_force_delete_with_different_roles) @ddt.unpack def test_share_force_delete_with_different_roles(self, role, resp_code, version): share, req = self._setup_share_data(version=version) ctxt = self._get_context(role) self._force_delete(ctxt, share, req, db.share_get, resp_code, check_model_in_db=True, version=version) @ddt.data('2.6', '2.7') def test_share_force_delete_missing(self, version): share, req = self._setup_share_data( share={'id': 'fake'}, version=version) ctxt = self._get_context('admin') self._force_delete( ctxt, share, req, db.share_get, 404, version=version) @ddt.ddt class ShareUnmanageTest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.controller = shares.ShareController() self.mock_object(share_api.API, 'get_all', stubs.stub_get_all_shares) self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'update', stubs.stub_share_update) self.mock_object(share_api.API, 'delete', stubs.stub_share_delete) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.share_id = 'fake' self.request = fakes.HTTPRequest.blank( '/share/%s/unmanage' % self.share_id, use_admin_context=True, version='2.7', ) def test_unmanage_share(self): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'unmanage', mock.Mock()) self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[])) actual_result = self.controller.unmanage(self.request, share['id']) self.assertEqual(202, actual_result.status_int) self.controller.share_api.db.share_snapshot_get_all_for_share.\ assert_called_once_with( self.request.environ['manila.context'], share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) share_api.API.unmanage.assert_called_once_with( self.request.environ['manila.context'], share) def test_unmanage_share_that_has_snapshots(self): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) snapshots = ['foo', 'bar'] self.mock_object(self.controller.share_api, 'unmanage') self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=snapshots)) self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.assertFalse(self.controller.share_api.unmanage.called) self.controller.share_api.db.share_snapshot_get_all_for_share.\ assert_called_once_with( self.request.environ['manila.context'], share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) def test_unmanage_share_based_on_share_server(self): share = dict(instance=dict(share_server_id='foo_id'), id='bar_id') self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) @ddt.data(*constants.TRANSITIONAL_STATUSES) def test_unmanage_share_with_transitional_state(self, share_status): share = dict(status=share_status, id='foo_id', instance={}) self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) def test_unmanage_share_not_found(self): self.mock_object(share_api.API, 'get', mock.Mock( side_effect=exception.NotFound)) self.mock_object(share_api.API, 'unmanage', mock.Mock()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.unmanage, self.request, self.share_id) @ddt.data(exception.InvalidShare(reason="fake"), exception.PolicyNotAuthorized(action="fake"),) def test_unmanage_share_invalid(self, side_effect): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'unmanage', mock.Mock( side_effect=side_effect)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.unmanage, self.request, self.share_id) def test_wrong_permissions(self): share_id = 'fake' req = fakes.HTTPRequest.blank('/share/%s/unmanage' % share_id, use_admin_context=False, version='2.7') self.assertRaises(webob.exc.HTTPForbidden, self.controller.unmanage, req, share_id) def test_unsupported_version(self): share_id = 'fake' req = fakes.HTTPRequest.blank('/share/%s/unmanage' % share_id, use_admin_context=False, version='2.6') self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.unmanage, req, share_id) def get_fake_manage_body(export_path='/fake', service_host='fake@host#POOL', protocol='fake', share_type='fake', **kwargs): fake_share = { 'export_path': export_path, 'service_host': service_host, 'protocol': protocol, 'share_type': share_type, } fake_share.update(kwargs) return {'share': fake_share} @ddt.ddt class ShareManageTest(test.TestCase): def setUp(self): super(self.__class__, self).setUp() self.controller = shares.ShareController() self.resource_name = self.controller.resource_name self.request = fakes.HTTPRequest.blank( '/v2/shares/manage', use_admin_context=True, version='2.7') self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) def _setup_manage_mocks(self, service_is_up=True): self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock( return_value={'host': 'fake'})) self.mock_object(share_types, 'get_share_type_by_name_or_id', mock.Mock(return_value={'id': 'fake'})) self.mock_object(utils, 'service_is_up', mock.Mock( return_value=service_is_up)) if service_is_up: self.mock_object(utils, 'validate_service_host') else: self.mock_object( utils, 'validate_service_host', mock.Mock(side_effect=exception.ServiceIsDown(service='fake'))) @ddt.data({}, {'shares': {}}, {'share': get_fake_manage_body('', None, None)}) def test_share_manage_invalid_body(self, body): self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.manage, self.request, body) def test_share_manage_service_not_found(self): body = get_fake_manage_body() self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock( side_effect=exception.ServiceNotFound(service_id='fake'))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.manage, self.request, body) def test_share_manage_share_type_not_found(self): body = get_fake_manage_body() self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock()) self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object(db, 'share_type_get_by_name', mock.Mock( side_effect=exception.ShareTypeNotFoundByName( share_type_name='fake'))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.manage, self.request, body) @ddt.data({'service_is_up': False, 'service_host': 'fake@host#POOL'}, {'service_is_up': True, 'service_host': 'fake@host'}) def test_share_manage_bad_request(self, settings): body = get_fake_manage_body(service_host=settings.pop('service_host')) self._setup_manage_mocks(**settings) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, self.request, body) def test_share_manage_duplicate_share(self): body = get_fake_manage_body() exc = exception.InvalidShare(reason="fake") self._setup_manage_mocks() self.mock_object(share_api.API, 'manage', mock.Mock(side_effect=exc)) self.assertRaises(webob.exc.HTTPConflict, self.controller.manage, self.request, body) def test_share_manage_forbidden_manage(self): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action='')) self.mock_object(share_api.API, 'manage', error) self.assertRaises(webob.exc.HTTPForbidden, self.controller.manage, self.request, body) def test_share_manage_forbidden_validate_service_host(self): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action='')) self.mock_object( utils, 'validate_service_host', mock.Mock(side_effect=error)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.manage, self.request, body) @ddt.data( get_fake_manage_body(name='foo', description='bar'), get_fake_manage_body(display_name='foo', description='bar'), get_fake_manage_body(name='foo', display_description='bar'), get_fake_manage_body(display_name='foo', display_description='bar'), get_fake_manage_body(display_name='foo', display_description='bar', driver_options=dict(volume_id='quuz')), ) def test_share_manage(self, data): self._test_share_manage(data, "2.7") @ddt.data( get_fake_manage_body(name='foo', description='bar', is_public=True), get_fake_manage_body(name='foo', description='bar', is_public=False) ) def test_share_manage_with_is_public(self, data): self._test_share_manage(data, "2.8") def test_share_manage_with_user_id(self): self._test_share_manage(get_fake_manage_body( name='foo', description='bar', is_public=True), "2.16") def _test_share_manage(self, data, version): expected = { 'share': { 'status': 'fakestatus', 'description': 'displaydesc', 'availability_zone': 'fakeaz', 'name': 'displayname', 'share_proto': 'FAKEPROTO', 'metadata': {}, 'project_id': 'fakeproject', 'host': 'fakehost', 'id': 'fake', 'snapshot_id': '2', 'share_network_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1, 'share_type_name': None, 'share_server_id': 'fake_share_server_id', 'share_type': '1', 'volume_type': '1', 'is_public': False, 'consistency_group_id': None, 'source_cgsnapshot_member_id': None, 'snapshot_support': True, 'task_state': None, 'links': [ { 'href': 'http://localhost/v1/fake/shares/fake', 'rel': 'self' }, { 'href': 'http://localhost/fake/shares/fake', 'rel': 'bookmark' } ], } } self._setup_manage_mocks() return_share = mock.Mock( return_value=stubs.stub_share('fake', instance={})) self.mock_object( share_api.API, 'manage', return_share) share = { 'host': data['share']['service_host'], 'export_location': data['share']['export_path'], 'share_proto': data['share']['protocol'].upper(), 'share_type_id': 'fake', 'display_name': 'foo', 'display_description': 'bar', } driver_options = data['share'].get('driver_options', {}) if (api_version.APIVersionRequest(version) <= api_version.APIVersionRequest('2.8')): expected['share']['export_location'] = 'fake_location' expected['share']['export_locations'] = ( ['fake_location', 'fake_location2']) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.10')): expected['share']['access_rules_status'] = ( constants.STATUS_ACTIVE) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.11')): expected['share']['has_replicas'] = False expected['share']['replication_type'] = None if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.16')): expected['share']['user_id'] = 'fakeuser' if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.8')): share['is_public'] = data['share']['is_public'] req = fakes.HTTPRequest.blank('/v2/shares/manage', version=version, use_admin_context=True) actual_result = self.controller.manage(req, data) share_api.API.manage.assert_called_once_with( mock.ANY, share, driver_options) self.assertIsNotNone(actual_result) self.assertEqual(expected, actual_result) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'manage') def test_wrong_permissions(self): body = get_fake_manage_body() self.assertRaises( webob.exc.HTTPForbidden, self.controller.manage, fakes.HTTPRequest.blank( '/share/manage', use_admin_context=False, version='2.7'), body, ) def test_unsupported_version(self): share_id = 'fake' req = fakes.HTTPRequest.blank( '/share/manage', use_admin_context=False, version='2.6') self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.manage, req, share_id)
codeparrot/github-code-clean
#!/usr/bin/env python # Copyright 2016 Daniel Nunes # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from os import makedirs, listdir from os.path import expanduser, normpath, basename, join, relpath, isdir, isfile, abspath from io import BytesIO from threading import Thread from queue import Queue from webbrowser import open_new_tab from datetime import datetime from collections import deque from json import JSONDecodeError from jsonpickle import encode, decode, set_encoder_options from lxml.etree import parse, tostring, Comment from PyQt5.QtWidgets import (QFileDialog, QColorDialog, QMessageBox, QLabel, QHBoxLayout, QCommandLinkButton, QDialog, QFormLayout, QLineEdit, QSpinBox, QComboBox, QWidget, QPushButton, QSizePolicy, QStatusBar, QCompleter, QApplication, QMainWindow, QUndoCommand, QUndoStack, QMenu, QHeaderView, QAction, QVBoxLayout, QGroupBox, QCheckBox, QRadioButton) from PyQt5.QtGui import QIcon, QPixmap, QColor, QFont, QStandardItemModel, QStandardItem from PyQt5.QtCore import Qt, pyqtSignal, QStringListModel, QMimeData, QEvent from PyQt5.uic import loadUi from requests import get, head, codes, ConnectionError, Timeout from validator import validate_tree, check_warnings, ValidatorError, ValidationError, WarningError, MissingFolderError from . import cur_folder, __version__ from .nodes import _NodeElement, NodeComment from .io import import_, new, export, node_factory, copy_node from .previews import PreviewDispatcherThread from .props import PropertyFile, PropertyColour, PropertyFolder, PropertyCombo, PropertyInt, PropertyText, \ PropertyFlagLabel, PropertyFlagValue, PropertyHTML from .exceptions import DesignerError from .ui_templates import window_intro, window_mainframe, window_about, window_settings, window_texteditor, \ window_plaintexteditor, preview_mo class IntroWindow(QMainWindow, window_intro.Ui_MainWindow): """ The class for the intro window. Subclassed from QDialog and created in Qt Designer. """ def __init__(self): super().__init__() self.setupUi(self) self.setWindowIcon(QIcon(join(cur_folder, "resources/window_icon.svg"))) self.setWindowTitle("FOMOD Designer") self.version.setText("Version " + __version__) self.settings_dict = read_settings() recent_files = self.settings_dict["Recent Files"] for path in recent_files: if not isdir(path): recent_files.remove(path) continue button = QCommandLinkButton(basename(path), path, self) button.setIcon(QIcon(join(cur_folder, "resources/logos/logo_enter.png"))) button.clicked.connect(lambda _, path_=path: self.open_path(path_)) self.scroll_layout.addWidget(button) if not self.settings_dict["General"]["show_intro"]: main_window = MainFrame() main_window.move(self.pos()) main_window.show() self.close() else: self.show() self.new_button.clicked.connect(lambda: self.open_path("")) self.button_help.clicked.connect(MainFrame.help) self.button_about.clicked.connect(lambda _, self_=self: MainFrame.about(self_)) def open_path(self, path): """ Method used to open a path in the main window - closes the intro window and show the main. :param path: The path to open. """ main_window = MainFrame() self_center = self.mapToGlobal(self.rect().center()) main_center = main_window.mapToGlobal(main_window.rect().center()) main_window.move(self_center - main_center) main_window.open(path) main_window.show() self.close() if self.settings_dict["General"]["tutorial_advanced"]: main_window.setEnabled(False) tutorial = loadUi(join(cur_folder, "resources/templates/tutorial_advanced.ui")) tutorial.frame_node.resize(main_window.node_tree_view.size()) tutorial.frame_node.move( main_window.node_tree_view.mapTo(main_window, main_window.node_tree_view.pos()) ) tutorial.frame_preview.resize(main_window.tabWidget.size()) tutorial.frame_preview.move( main_window.tabWidget.mapTo(main_window, main_window.tabWidget.pos()) ) tutorial.frame_prop.resize(main_window.dockWidgetContents.size()) tutorial.frame_prop.move( main_window.dockWidgetContents.mapTo(main_window, main_window.dockWidgetContents.pos()) ) tutorial.frame_child.resize(main_window.dockWidgetContents_3.size()) tutorial.frame_child.move( main_window.dockWidgetContents_3.mapTo(main_window, main_window.dockWidgetContents_3.pos()) ) tutorial.button_exit.clicked.connect(lambda: main_window.setEnabled(True)) tutorial.button_exit.clicked.connect(tutorial.close) tutorial.setParent(main_window) tutorial.setWindowFlags(Qt.FramelessWindowHint | Qt.Dialog) tutorial.setAttribute(Qt.WA_TranslucentBackground) main_center = main_window.mapToGlobal(main_window.rect().center()) tutorial_center = tutorial.mapToGlobal(tutorial.rect().center()) tutorial.move(main_center - tutorial_center) tutorial.setEnabled(True) tutorial.exec_() self.settings_dict["General"]["tutorial_advanced"] = False self.settings_dict["General"]["show_intro"] = not self.check_intro.isChecked() self.settings_dict["General"]["show_advanced"] = self.check_advanced.isChecked() makedirs(join(expanduser("~"), ".fomod"), exist_ok=True) with open(join(expanduser("~"), ".fomod", ".designer"), "w") as configfile: set_encoder_options("json", indent=4) configfile.write(encode(self.settings_dict)) class MainFrame(QMainWindow, window_mainframe.Ui_MainWindow): """ The class for the main window. Subclassed from QMainWindow and created in Qt Designer. """ #: Signals the xml code has changed. xml_code_changed = pyqtSignal([object]) #: Signals the code preview is updated. update_code_preview = pyqtSignal([str]) #: Signals there is an update available. update_check_update_available = pyqtSignal() #: Signals the app is up-to-date. update_check_up_to_date = pyqtSignal() #: Signals a connection timed out. update_check_timeout = pyqtSignal() #: Signals there was an error with the internet connection. update_check_connection_error = pyqtSignal() #: Signals a new node has been selected in the node tree. select_node = pyqtSignal([object]) #: Signals the previews need to be updated. update_previews = pyqtSignal([object]) class NodeMimeData(QMimeData): def __init__(self): super().__init__() self._node = None self._item = None self._original_item = None def has_node(self): if self._node is None: return False else: return True def node(self): return self._node def set_node(self, node): self._node = node def has_item(self): if self._item is None: return False else: return True def item(self): return self._item def set_item(self, item): self._item = item def original_item(self): return self._original_item def set_original_item(self, item): self._original_item = item class NodeStandardModel(QStandardItemModel): def mimeData(self, index_list): if not index_list: return 0 mime_data = MainFrame.NodeMimeData() new_node = copy_node(self.itemFromIndex(index_list[0]).xml_node) mime_data.set_item(new_node.model_item) mime_data.set_node(new_node) mime_data.set_original_item(self.itemFromIndex(index_list[0])) return mime_data def canDropMimeData(self, mime_data, drop_action, row, col, parent_index): if self.itemFromIndex(parent_index) and mime_data.has_node() and mime_data.has_item() and drop_action == 2: if isinstance(self.itemFromIndex(parent_index).xml_node, type(mime_data.node().getparent())): return True else: return False else: return False def dropMimeData(self, mime_data, drop_action, row, col, parent_index): if not self.canDropMimeData(mime_data, drop_action, row, col, parent_index): return False parent = self.itemFromIndex(parent_index) xml_node = mime_data.node() parent.xml_node.remove(mime_data.original_item().xml_node) parent.xml_node.append(mime_data.node()) parent.insertRow(row, xml_node.model_item) for row_index in range(0, parent.rowCount()): if parent.child(row_index) == mime_data.original_item(): continue parent.child(row_index).xml_node.user_sort_order = str(parent.child(row_index).row()).zfill(7) parent.child(row_index).xml_node.save_metadata() return True def supportedDragActions(self): return Qt.MoveAction class LineEditChangeCommand(QUndoCommand): def __init__(self, original_text, new_text, current_prop_widgets, widget_index, tree_model, item, select_node): super().__init__("Line edit changed.") self.original_text = original_text self.new_text = new_text self.current_prop_widgets = current_prop_widgets self.widget_index = widget_index self.tree_model = tree_model self.item = item self.select_node = select_node def redo(self): self.select_node.emit(self.tree_model.indexFromItem(self.item)) self.current_prop_widgets[self.widget_index].setText(self.new_text) def undo(self): self.select_node.emit(self.tree_model.indexFromItem(self.item)) self.current_prop_widgets[self.widget_index].setText(self.original_text) class WidgetLineEditChangeCommand(QUndoCommand): def __init__(self, original_text, new_text, current_prop_widgets, widget_index, tree_model, item, select_node): super().__init__("Widget with line edit changed.") self.original_text = original_text self.new_text = new_text self.current_prop_widgets = current_prop_widgets self.widget_index = widget_index self.tree_model = tree_model self.item = item self.select_node = select_node def redo(self): self.select_node.emit(self.tree_model.indexFromItem(self.item)) line_edit = None for index in range(self.current_prop_widgets[self.widget_index].layout().count()): widget = self.current_prop_widgets[self.widget_index].layout().itemAt(index).widget() if isinstance(widget, QLineEdit): line_edit = widget line_edit.setText(self.new_text) def undo(self): self.select_node.emit(self.tree_model.indexFromItem(self.item)) line_edit = None for index in range(self.current_prop_widgets[self.widget_index].layout().count()): widget = self.current_prop_widgets[self.widget_index].layout().itemAt(index).widget() if isinstance(widget, QLineEdit): line_edit = widget line_edit.setText(self.original_text) class ComboBoxChangeCommand(QUndoCommand): def __init__(self, original_text, new_text, current_prop_widgets, widget_index, tree_model, item, select_node): super().__init__("Combo box changed.") self.original_text = original_text self.new_text = new_text self.current_prop_widgets = current_prop_widgets self.widget_index = widget_index self.tree_model = tree_model self.item = item self.select_node = select_node def redo(self): self.select_node.emit(self.tree_model.indexFromItem(self.item)) self.current_prop_widgets[self.widget_index].setCurrentText(self.new_text) def undo(self): self.select_node.emit(self.tree_model.indexFromItem(self.item)) self.current_prop_widgets[self.widget_index].setCurrentText(self.original_text) class SpinBoxChangeCommand(QUndoCommand): def __init__(self, original_int, new_int, current_prop_widgets, widget_index, tree_model, item, select_node): super().__init__("Spin box changed.") self.original_int = original_int self.new_int = new_int self.current_prop_widgets = current_prop_widgets self.widget_index = widget_index self.tree_model = tree_model self.item = item self.select_node = select_node def redo(self): self.select_node.emit(self.tree_model.indexFromItem(self.item)) self.current_prop_widgets[self.widget_index].setValue(self.new_int) def undo(self): self.select_node.emit(self.tree_model.indexFromItem(self.item)) self.current_prop_widgets[self.widget_index].setValue(self.original_int) class RunWizardCommand(QUndoCommand): def __init__(self, parent_node, original_node, modified_node, tree_model, select_node_signal): super().__init__("Wizard was run on this node.") self.parent_node = parent_node self.original_node = original_node self.modified_node = modified_node self.tree_model = tree_model self.select_node_signal = select_node_signal def redo(self): self.parent_node.remove_child(self.original_node) self.parent_node.add_child(self.modified_node) self.parent_node.model_item.sortChildren(0) self.select_node_signal.emit(self.tree_model.indexFromItem(self.modified_node.model_item)) def undo(self): self.parent_node.remove_child(self.modified_node) self.parent_node.add_child(self.original_node) self.parent_node.model_item.sortChildren(0) self.select_node_signal.emit(self.tree_model.indexFromItem(self.original_node.model_item)) class DeleteCommand(QUndoCommand): def __init__(self, node_to_delete, tree_model, select_node_signal): super().__init__("Node deleted.") self.node_to_delete = node_to_delete self.parent_node = node_to_delete.getparent() self.tree_model = tree_model self.select_node_signal = select_node_signal def redo(self): object_to_delete = self.node_to_delete new_index = self.tree_model.indexFromItem(self.parent_node.model_item) self.parent_node.remove_child(object_to_delete) self.select_node_signal.emit(new_index) def undo(self): self.parent_node.add_child(self.node_to_delete) self.select_node_signal.emit(self.tree_model.indexFromItem(self.node_to_delete.model_item)) self.tree_model.sort(0) class AddChildCommand(QUndoCommand): def __init__(self, child_tag, parent_node, tree_model, settings_dict, select_node_signal): super().__init__("Child added.") self.child_tag = child_tag self.parent_node = parent_node self.tree_model = tree_model self.settings_dict = settings_dict self.select_node_signal = select_node_signal self.new_child_node = None def redo(self): if self.new_child_node is None: self.new_child_node = node_factory(self.child_tag, self.parent_node) defaults_dict = self.settings_dict["Defaults"] if self.child_tag in defaults_dict and defaults_dict[self.child_tag].enabled(): self.new_child_node.properties[defaults_dict[self.child_tag].key()].set_value( defaults_dict[self.child_tag].value() ) self.parent_node.add_child(self.new_child_node) self.tree_model.sort(0) # select the new item self.select_node_signal.emit(self.tree_model.indexFromItem(self.new_child_node.model_item)) def undo(self): self.parent_node.remove_child(self.new_child_node) # select the parent after removing self.select_node_signal.emit(self.tree_model.indexFromItem(self.parent_node.model_item)) class PasteCommand(QUndoCommand): def __init__(self, parent_item, status_bar, tree_model, select_node_signal): super().__init__("Node pasted.") self.parent_item = parent_item self.status_bar = status_bar self.tree_model = tree_model self.select_node_signal = select_node_signal self.pasted_node = None def redo(self): self.pasted_node = copy_node(QApplication.clipboard().mimeData().node()) self.parent_item.xml_node.append(self.pasted_node) self.parent_item.appendRow(self.pasted_node.model_item) self.parent_item.sortChildren(0) def undo(self): self.parent_item.xml_node.remove_child(self.pasted_node) # select the parent after removing self.select_node_signal.emit(self.tree_model.indexFromItem(self.parent_item.xml_node.model_item)) def __init__(self): super().__init__() self.setupUi(self) # setup the icons properly self.setWindowIcon(QIcon(join(cur_folder, "resources/window_icon.svg"))) self.action_Open.setIcon(QIcon(join(cur_folder, "resources/logos/logo_open_file.png"))) self.action_Save.setIcon(QIcon(join(cur_folder, "resources/logos/logo_floppy_disk.png"))) self.actionO_ptions.setIcon(QIcon(join(cur_folder, "resources/logos/logo_gear.png"))) self.action_Refresh.setIcon(QIcon(join(cur_folder, "resources/logos/logo_refresh.png"))) self.action_Delete.setIcon(QIcon(join(cur_folder, "resources/logos/logo_cross.png"))) self.action_About.setIcon(QIcon(join(cur_folder, "resources/logos/logo_notepad.png"))) self.actionHe_lp.setIcon(QIcon(join(cur_folder, "resources/logos/logo_info.png"))) self.actionCopy.setIcon(QIcon(join(cur_folder, "resources/logos/logo_copy.png"))) self.actionPaste.setIcon(QIcon(join(cur_folder, "resources/logos/logo_paste.png"))) self.actionRedo.setIcon(QIcon(join(cur_folder, "resources/logos/logo_redo.png"))) self.actionUndo.setIcon(QIcon(join(cur_folder, "resources/logos/logo_undo.png"))) self.actionClear.setIcon(QIcon(join(cur_folder, "resources/logos/logo_clear.png"))) self.menu_Recent_Files.setIcon(QIcon(join(cur_folder, "resources/logos/logo_recent.png"))) self.actionExpand_All.setIcon(QIcon(join(cur_folder, "resources/logos/logo_expand.png"))) self.actionCollapse_All.setIcon(QIcon(join(cur_folder, "resources/logos/logo_collapse.png"))) self.actionHide_Node.setIcon(QIcon(join(cur_folder, "resources/logos/logo_hide.png"))) self.actionShow_Node.setIcon(QIcon(join(cur_folder, "resources/logos/logo_show.png"))) # manage undo and redo self.undo_stack = QUndoStack(self) self.undo_stack.setUndoLimit(25) self.undo_stack.canRedoChanged.connect(self.actionRedo.setEnabled) self.undo_stack.canUndoChanged.connect(self.actionUndo.setEnabled) self.actionRedo.triggered.connect(self.undo_stack.redo) self.actionUndo.triggered.connect(self.undo_stack.undo) # manage the node tree view self.node_tree_view.clicked.connect(self.select_node.emit) self.node_tree_view.activated.connect(self.select_node.emit) self.node_tree_view.setContextMenuPolicy(Qt.CustomContextMenu) self.node_tree_view.customContextMenuRequested.connect(self.on_custom_context_menu) # manage node tree model self.node_tree_model = self.NodeStandardModel() self.node_tree_view.setModel(self.node_tree_model) self.node_tree_model.itemChanged.connect(lambda item: item.xml_node.save_metadata()) self.node_tree_model.itemChanged.connect(lambda item: self.xml_code_changed.emit(item.xml_node)) # connect actions to the respective methods self.action_Open.triggered.connect(self.open) self.action_Save.triggered.connect(self.save) self.actionO_ptions.triggered.connect(self.settings) self.action_Refresh.triggered.connect(self.refresh) self.action_Delete.triggered.connect(self.delete) self.actionHide_Node.triggered.connect(self.hide_node) self.actionShow_Node.triggered.connect(self.show_node) self.actionHe_lp.triggered.connect(self.help) self.action_About.triggered.connect(lambda _, self_=self: self.about(self_)) self.actionClear.triggered.connect(self.clear_recent_files) self.actionCopy.triggered.connect( lambda: self.copy_item_to_clipboard() if self.node_tree_view.selectedIndexes() else None ) self.actionPaste.triggered.connect( lambda: self.paste_item_from_clipboard() if self.node_tree_view.selectedIndexes() else None ) self.actionExpand_All.triggered.connect(self.node_tree_view.expandAll) self.actionCollapse_All.triggered.connect(self.node_tree_view.collapseAll) self.action_Object_Tree.toggled.connect(self.node_tree.setVisible) self.actionObject_Box.toggled.connect(self.children_box.setVisible) self.action_Property_Editor.toggled.connect(self.property_editor.setVisible) self.node_tree.visibilityChanged.connect(self.action_Object_Tree.setChecked) self.children_box.visibilityChanged.connect(self.actionObject_Box.setChecked) self.property_editor.visibilityChanged.connect(self.action_Property_Editor.setChecked) # setup any necessary variables self.original_title = self.windowTitle() self._package_path = "" self.package_name = "" self.settings_dict = read_settings() self._info_root = None self._config_root = None self._current_prop_list = [] self.original_prop_value_list = {} # start the preview threads self.preview_queue = Queue() self.preview_gui_worker = PreviewMoGui(self.layout_mo) self.update_previews.connect(self.preview_queue.put) self.update_code_preview.connect(self.xml_code_browser.setHtml) self.preview_thread = PreviewDispatcherThread( self.preview_queue, self.update_code_preview, **{ "package_path": self.package_path, "info_root": self.info_root, "config_root": self.config_root, "gui_worker": self.preview_gui_worker } ) self.preview_thread.start() # manage the wizard button self.button_wizard.clicked.connect(self.run_wizard) # manage auto-completion self.flag_label_model = QStringListModel() self.flag_label_completer = QCompleter() self.flag_label_completer.setCaseSensitivity(Qt.CaseInsensitive) self.flag_label_completer.setModel(self.flag_label_model) self.flag_value_model = QStringListModel() self.flag_value_completer = QCompleter() self.flag_value_completer.setCaseSensitivity(Qt.CaseInsensitive) self.flag_value_completer.setModel(self.flag_value_model) # connect node selected signal self.current_node = None # type: _NodeElement self.select_node.connect( lambda index: self.set_current_node(self.node_tree_model.itemFromIndex(index).xml_node) ) self.select_node.connect(lambda index: self.node_tree_view.setCurrentIndex(index)) self.select_node.connect( lambda: self.update_previews.emit(self.current_node) if self.settings_dict["General"]["code_refresh"] >= 2 else None ) self.select_node.connect(self.update_children_box) self.select_node.connect(self.update_props_list) self.select_node.connect(lambda: self.action_Delete.setEnabled(True)) self.select_node.connect( lambda: self.button_wizard.setEnabled(False) if self.current_node.wizard is None else self.button_wizard.setEnabled(True) ) self.select_node.connect( lambda index: self.actionHide_Node.setEnabled(True) if self.current_node is not self._config_root and self.current_node is not self._info_root and self.current_node not in self.current_node.getparent().hidden_children and not self.current_node.allowed_instances else self.actionHide_Node.setEnabled(False) ) self.select_node.connect( lambda index: self.actionShow_Node.setEnabled(True) if self.current_node is not self._config_root and self.current_node is not self._info_root and self.current_node in self.current_node.getparent().hidden_children and not self.current_node.allowed_instances else self.actionShow_Node.setEnabled(False) ) # manage code changed signal self.xml_code_changed.connect(self.update_previews.emit) # manage clean/dirty states self.undo_stack.cleanChanged.connect( lambda clean: self.setWindowTitle(self.package_name + " - " + self.original_title) if clean else self.setWindowTitle("*" + self.package_name + " - " + self.original_title) ) self.undo_stack.cleanChanged.connect( lambda clean: self.action_Save.setEnabled(not clean) ) self.update_recent_files() self.check_updates() # disable the wizards until they're up-to-date self.button_wizard.hide() def on_custom_context_menu(self, position): index = self.node_tree_view.indexAt(position) node_tree_context_menu = QMenu(self.node_tree_view) node_tree_context_menu.addActions([self.actionExpand_All, self.actionCollapse_All]) if index.isValid(): self.select_node.emit(index) node_tree_context_menu.addSeparator() node_tree_context_menu.addAction(self.action_Delete) if self.current_node is not self._config_root and self.current_node is not self._info_root: if self.current_node in self.current_node.getparent().hidden_children: node_tree_context_menu.addAction(self.actionShow_Node) else: node_tree_context_menu.addAction(self.actionHide_Node) node_tree_context_menu.addSeparator() node_tree_context_menu.addActions([self.actionCopy, self.actionPaste]) node_tree_context_menu.addSeparator() node_tree_context_menu.addActions([self.actionUndo, self.actionRedo]) node_tree_context_menu.move(self.node_tree_view.mapToGlobal(position)) node_tree_context_menu.exec_() def set_current_node(self, selected_node): self.current_node = selected_node @property def current_prop_list(self): return self._current_prop_list def info_root(self): return self._info_root def config_root(self): return self._config_root def package_path(self): return self._package_path def copy_item_to_clipboard(self): item = self.node_tree_model.itemFromIndex(self.node_tree_view.selectedIndexes()[0]) QApplication.clipboard().setMimeData(self.node_tree_model.mimeData([self.node_tree_model.indexFromItem(item)])) self.actionPaste.setEnabled(True) def paste_item_from_clipboard(self): parent_item = self.node_tree_model.itemFromIndex(self.node_tree_view.selectedIndexes()[0]) new_node = copy_node(QApplication.clipboard().mimeData().node()) if not parent_item.xml_node.can_add_child(new_node): self.statusBar().showMessage("This parent is not valid!") else: self.undo_stack.push( self.PasteCommand( parent_item, self.statusBar(), self.node_tree_model, self.select_node ) ) @staticmethod def update_flag_label_completer(label_model, elem_root): label_list = [] for elem in elem_root.iter(): if elem.tag == "flag": value = elem.properties["name"].value if value not in label_list: label_list.append(value) label_model.setStringList(label_list) @staticmethod def update_flag_value_completer(value_model, elem_root, label): value_list = [] for elem in elem_root.iter(): if elem.tag == "flag" and elem.text not in value_list and elem.properties["name"].value == label: value_list.append(elem.text) value_model.setStringList(value_list) def check_updates(self): """ Checks the version number on the remote repository (Github Releases) and compares it against the current version. If the remote version is higher, then the user is warned in the status bar and advised to get the new one. Otherwise, ignore. """ def update_available_button(): update_button = QPushButton("New Version Available!") update_button.setFlat(True) update_button.clicked.connect(lambda: open_new_tab("https://github.com/GandaG/fomod-designer/releases/latest")) self.statusBar().addPermanentWidget(update_button) def check_remote(): try: response = get("https://api.github.com/repos/GandaG/fomod-designer/releases", timeout=10) if response.status_code == codes.ok and response.json()[0]["tag_name"][1:] > __version__: self.update_check_update_available.emit() else: self.update_check_up_to_date.emit() except Timeout: self.update_check_timeout.emit() except ConnectionError: self.update_check_connection_error.emit() self.update_check_up_to_date.connect(lambda: self.setStatusBar(QStatusBar())) self.update_check_up_to_date.connect( lambda: self.statusBar().addPermanentWidget(QLabel("Everything is up-to-date.")) ) self.update_check_update_available.connect(lambda: self.setStatusBar(QStatusBar())) self.update_check_update_available.connect(update_available_button) self.update_check_timeout.connect(lambda: self.setStatusBar(QStatusBar())) self.update_check_timeout.connect(lambda: self.statusBar().addPermanentWidget(QLabel("Connection timed out."))) self.update_check_connection_error.connect(lambda: self.setStatusBar(QStatusBar())) self.update_check_connection_error.connect( lambda: self.statusBar().addPermanentWidget(QLabel( "Could not connect to remote server, check your internet connection." )) ) self.statusBar().addPermanentWidget(QLabel("Checking for updates...")) Thread(target=check_remote).start() def hide_node(self): if self.current_node is not None: self.current_node.set_hidden(True) def show_node(self): if self.current_node is not None: self.current_node.set_hidden(False) def open(self, path=""): """ Open a new installer if one exists at path (if no path is given a dialog pops up asking the user to choose one) or create a new one. If enabled in the Settings the installer is also validated and checked for common errors. :param path: Optional. The path to open/create an installer at. """ try: answer = self.check_fomod_state() if answer == QMessageBox.Save: self.save() elif answer == QMessageBox.Cancel: return else: pass if not path: open_dialog = QFileDialog() package_path = open_dialog.getExistingDirectory(self, "Select package root directory:", expanduser("~")) else: package_path = path if package_path: info_root, config_root = import_(normpath(package_path)) if info_root is not None and config_root is not None: if self.settings_dict["Load"]["validate"]: try: validate_tree( parse(BytesIO(tostring(config_root, pretty_print=True))), join(cur_folder, "resources", "mod_schema.xsd"), ) except ValidationError as p: generic_errorbox(p.title, str(p), p.detailed).exec_() if not self.settings_dict["Load"]["validate_ignore"]: return if self.settings_dict["Load"]["warnings"]: try: check_warnings( package_path, config_root, ) except WarningError as p: generic_errorbox(p.title, str(p), p.detailed).exec_() if not self.settings_dict["Save"]["warn_ignore"]: return else: info_root, config_root = new() self._package_path = package_path self._info_root, self._config_root = info_root, config_root self.node_tree_model.clear() self.node_tree_model.appendRow(self._info_root.model_item) self.node_tree_model.appendRow(self._config_root.model_item) self.package_name = basename(normpath(self._package_path)) self.current_node = None self.xml_code_changed.emit(self.current_node) self.undo_stack.setClean() self.undo_stack.cleanChanged.emit(True) self.undo_stack.clear() QApplication.clipboard().clear() self.actionPaste.setEnabled(False) self.action_Delete.setEnabled(False) self.update_recent_files(self._package_path) self.clear_prop_list() self.button_wizard.setEnabled(False) except (DesignerError, ValidatorError) as p: generic_errorbox(p.title, str(p), p.detailed).exec_() return def save(self): """ Saves the current installer at the current path. If enabled in the Settings the installer is also validated and checked for common errors. """ try: if self._info_root is None and self._config_root is None: return elif not self.undo_stack.isClean(): self._info_root.sort() self._config_root.sort() if self.settings_dict["Save"]["validate"]: try: validate_tree( parse(BytesIO(tostring(self._config_root, pretty_print=True))), join(cur_folder, "resources", "mod_schema.xsd"), ) except ValidationError as e: generic_errorbox(e.title, str(e), e.detailed).exec_() if not self.settings_dict["Save"]["validate_ignore"]: return if self.settings_dict["Save"]["warnings"]: try: check_warnings( self._package_path, self._config_root, ) except MissingFolderError: pass except WarningError as e: generic_errorbox(e.title, str(e), e.detailed).exec_() if not self.settings_dict["Save"]["warn_ignore"]: return export(self._info_root, self._config_root, self._package_path) self.undo_stack.setClean() except (DesignerError, ValidatorError) as e: generic_errorbox(e.title, str(e), e.detailed).exec_() return def settings(self): """ Opens the Settings dialog. """ config = SettingsDialog(self) config.exec_() self.settings_dict = read_settings() def refresh(self): """ Refreshes all the previews if the refresh rate in Settings is high enough. """ if self.settings_dict["General"]["code_refresh"] >= 1: self.update_previews.emit(self.current_node) def delete(self): """ Deletes the current node in the tree. No effect when using the Basic View. """ if self.current_node is None: self.statusBar().showMessage("Can't delete nothing.") elif self.current_node.getparent() is None: self.statusBar().showMessage("Can't delete root nodes.") else: if self.current_node.is_hidden: self.current_node.set_hidden(False) self.undo_stack.push(self.DeleteCommand( self.current_node, self.node_tree_model, self.select_node )) @staticmethod def help(): docs_url = "http://fomod-designer.readthedocs.io/en/stable/index.html" local_docs = "file://" + abspath(join(cur_folder, "resources", "docs", "index.html")) try: if head(docs_url, timeout=0.5).status_code == codes.ok: open_new_tab(docs_url) else: raise ConnectionError() except (Timeout, ConnectionError): open_new_tab(local_docs) @staticmethod def about(parent): """ Opens the About dialog. This method is static to be able to be called from the Intro window. :param parent: The parent of the dialog. """ about_dialog = About(parent) about_dialog.exec_() def clear_recent_files(self): """ Clears the Recent Files gui menu and settings. """ self.settings_dict["Recent Files"].clear() makedirs(join(expanduser("~"), ".fomod"), exist_ok=True) with open(join(expanduser("~"), ".fomod", ".designer"), "w") as configfile: set_encoder_options("json", indent=4) configfile.write(encode(self.settings_dict)) for child in self.menu_Recent_Files.actions(): if child is not self.actionClear: self.menu_Recent_Files.removeAction(child) del child def update_recent_files(self, add_new=None): """ Updates the Recent Files gui menu and settings. If called when opening an installer, pass that installer as add_new so it can be added to list or placed at the top. :param add_new: If a new installer is being opened, add it to the list or move it to the top. """ file_list = deque(self.settings_dict["Recent Files"], maxlen=5) self.clear_recent_files() # check for invalid paths and remove them for path in file_list: if not isdir(path): file_list.remove(path) # check if the path is new or if it already exists - delete the last one or reorder respectively if add_new: if add_new in file_list: file_list.remove(add_new) file_list.appendleft(add_new) # write the new list to the settings file self.settings_dict["Recent Files"] = file_list makedirs(join(expanduser("~"), ".fomod"), exist_ok=True) with open(join(expanduser("~"), ".fomod", ".designer"), "w") as configfile: set_encoder_options("json", indent=4) configfile.write(encode(self.settings_dict)) # populate the gui menu with the new files list self.menu_Recent_Files.removeAction(self.actionClear) for path in self.settings_dict["Recent Files"]: action = self.menu_Recent_Files.addAction(path) action.triggered.connect(lambda _, path_=path: self.open(path_)) self.menu_Recent_Files.addSeparator() self.menu_Recent_Files.addAction(self.actionClear) def update_children_box(self): """ Updates the possible children to add in Object Box. """ spacer = self.layout_box.takeAt(self.layout_box.count() - 1) for index in reversed(range(self.layout_box.count())): widget = self.layout_box.takeAt(index).widget() if widget is not None: widget.deleteLater() children_list = list(self.current_node.allowed_children) if self.current_node.tag is not Comment: children_list.insert(0, NodeComment) for child in children_list: new_object = child() child_button = QPushButton(new_object.name) font_button = QFont() font_button.setPointSize(8) child_button.setFont(font_button) child_button.setMaximumSize(5000, 30) child_button.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) child_button.setStatusTip("A possible child node.") child_button.clicked.connect( lambda _, tag_=new_object.tag, parent_node=self.current_node, tree_model=self.node_tree_model, settings_dict=self.settings_dict, : self.undo_stack.push(self.AddChildCommand( tag_, parent_node, tree_model, settings_dict, self.select_node )) ) if not self.current_node.can_add_child(new_object): child_button.setEnabled(False) if child in self.current_node.required_children: child_button.setStyleSheet( "background-color: " + QColor(self.settings_dict["Appearance"]["required_colour"]).name() ) child_button.setStatusTip( "A button of this colour indicates that at least one of this node is required." ) if child in self.current_node.either_children_group: child_button.setStyleSheet( "background-color: " + QColor(self.settings_dict["Appearance"]["either_colour"]).name() ) child_button.setStatusTip( "A button of this colour indicates that only one of these buttons must be used." ) if child in self.current_node.at_least_one_children_group: child_button.setStyleSheet( "background-color: " + QColor(self.settings_dict["Appearance"]["atleastone_colour"]).name() ) child_button.setStatusTip( "A button of this colour indicates that from all of these buttons, at least one is required." ) self.layout_box.addWidget(child_button) self.layout_box.addSpacerItem(spacer) def clear_prop_list(self): """ Deletes all the properties from the Property Editor """ self._current_prop_list.clear() for index in reversed(range(self.layout_prop_editor.count())): widget = self.layout_prop_editor.takeAt(index).widget() if widget is not None: widget.deleteLater() def update_props_list(self): """ Updates the Property Editor's prop list. Deletes everything and then creates the list from the node's properties. """ self.clear_prop_list() prop_index = 0 og_values = self.original_prop_value_list prop_list = self._current_prop_list props = self.current_node.properties for key in props: if not props[key].editable: continue label = QLabel(self.dockWidgetContents) label.setObjectName("label_" + str(prop_index)) label.setText(props[key].name) self.layout_prop_editor.setWidget(prop_index, QFormLayout.LabelRole, label) if type(props[key]) is PropertyText: def open_plain_editor(line_edit_, node): dialog_ui = window_plaintexteditor.Ui_Dialog() dialog = QDialog(self) dialog_ui.setupUi(dialog) dialog_ui.edit_text.setPlainText(line_edit_.text()) if node.tag is Comment: for sequence in node.forbidden_sequences: dialog_ui.edit_text.textChanged.connect( lambda: dialog_ui.edit_text.setText( dialog_ui.edit_text.toPlainText().replace(sequence, "") ) if sequence in dialog_ui.edit_text.toPlainText() else None ) dialog_ui.buttonBox.accepted.connect(dialog.close) dialog_ui.buttonBox.accepted.connect(lambda: line_edit_.setText(dialog_ui.edit_text.toPlainText())) dialog_ui.buttonBox.accepted.connect(line_edit_.editingFinished.emit) dialog.exec_() og_values[prop_index] = props[key].value prop_list.append(QWidget(self.dockWidgetContents)) layout = QHBoxLayout(prop_list[prop_index]) text_edit = QLineEdit(prop_list[prop_index]) text_button = QPushButton(prop_list[prop_index]) text_button.setText("...") text_button.setMaximumWidth(30) layout.addWidget(text_edit) layout.addWidget(text_button) layout.setContentsMargins(0, 0, 0, 0) text_edit.setText(props[key].value) if self.current_node.tag is Comment: for sequence in self.current_node.forbidden_sequences: text_edit.textChanged.connect( lambda: text_edit.setText( text_edit.text().replace(sequence, "") ) if sequence in text_edit.text() else None ) text_edit.textChanged.connect(props[key].set_value) text_edit.textChanged[str].connect(self.current_node.write_attribs) text_edit.textChanged[str].connect(self.current_node.update_item_name) text_edit.textChanged[str].connect( lambda: self.xml_code_changed.emit(self.current_node) if self.settings_dict["General"]["code_refresh"] >= 3 else None ) text_edit.editingFinished.connect( lambda index=prop_index: self.undo_stack.push( self.WidgetLineEditChangeCommand( og_values[index], text_edit.text(), self.current_prop_list, index, self.node_tree_model, self.current_node.model_item, self.select_node ) ) if og_values[index] != text_edit.text() else None ) text_edit.editingFinished.connect( lambda index=prop_index: og_values.update({index: text_edit.text()}) ) text_button.clicked.connect( lambda _, line_edit_=text_edit, node=self.current_node: open_plain_editor(line_edit_, node) ) if type(props[key]) is PropertyHTML: def open_plain_editor(line_edit_): dialog_ui = window_texteditor.Ui_Dialog() dialog = QDialog(self) dialog_ui.setupUi(dialog) dialog_ui.radio_html.toggled.connect(dialog_ui.widget_warning.setVisible) dialog_ui.button_colour.clicked.connect( lambda: dialog_ui.edit_text.setTextColor(QColorDialog.getColor()) ) dialog_ui.button_bold.clicked.connect( lambda: dialog_ui.edit_text.setFontWeight(QFont.Bold) if dialog_ui.edit_text.fontWeight() == QFont.Normal else dialog_ui.edit_text.setFontWeight(QFont.Normal) ) dialog_ui.button_italic.clicked.connect( lambda: dialog_ui.edit_text.setFontItalic(not dialog_ui.edit_text.fontItalic()) ) dialog_ui.button_underline.clicked.connect( lambda: dialog_ui.edit_text.setFontUnderline(not dialog_ui.edit_text.fontUnderline()) ) dialog_ui.button_align_left.clicked.connect( lambda: dialog_ui.edit_text.setAlignment(Qt.AlignLeft) ) dialog_ui.button_align_center.clicked.connect( lambda: dialog_ui.edit_text.setAlignment(Qt.AlignCenter) ) dialog_ui.button_align_right.clicked.connect( lambda: dialog_ui.edit_text.setAlignment(Qt.AlignRight) ) dialog_ui.button_align_justify.clicked.connect( lambda: dialog_ui.edit_text.setAlignment(Qt.AlignJustify) ) dialog_ui.buttonBox.accepted.connect(dialog.close) dialog_ui.buttonBox.accepted.connect( lambda: line_edit_.setText(dialog_ui.edit_text.toPlainText()) if dialog_ui.radio_plain.isChecked() else line_edit_.setText(dialog_ui.edit_text.toHtml()) ) dialog_ui.buttonBox.accepted.connect(line_edit_.editingFinished.emit) dialog_ui.widget_warning.hide() dialog_ui.label_warning.setPixmap(QPixmap(join(cur_folder, "resources/logos/logo_danger.png"))) dialog_ui.button_colour.setIcon(QIcon(join(cur_folder, "resources/logos/logo_font_colour.png"))) dialog_ui.button_bold.setIcon(QIcon(join(cur_folder, "resources/logos/logo_font_bold.png"))) dialog_ui.button_italic.setIcon(QIcon(join(cur_folder, "resources/logos/logo_font_italic.png"))) dialog_ui.button_underline.setIcon(QIcon( join(cur_folder, "resources/logos/logo_font_underline.png") )) dialog_ui.button_align_left.setIcon(QIcon( join(cur_folder, "resources/logos/logo_font_align_left.png") )) dialog_ui.button_align_center.setIcon(QIcon( join(cur_folder, "resources/logos/logo_font_align_center.png") )) dialog_ui.button_align_right.setIcon(QIcon( join(cur_folder, "resources/logos/logo_font_align_right.png") )) dialog_ui.button_align_justify.setIcon(QIcon( join(cur_folder, "resources/logos/logo_font_align_justify.png") )) dialog_ui.edit_text.setText(line_edit_.text()) dialog.exec_() og_values[prop_index] = props[key].value prop_list.append(QWidget(self.dockWidgetContents)) layout = QHBoxLayout(prop_list[prop_index]) text_edit = QLineEdit(prop_list[prop_index]) text_button = QPushButton(prop_list[prop_index]) text_button.setText("...") text_button.setMaximumWidth(30) layout.addWidget(text_edit) layout.addWidget(text_button) layout.setContentsMargins(0, 0, 0, 0) text_edit.setText(props[key].value) text_edit.textChanged.connect(props[key].set_value) text_edit.textChanged[str].connect(self.current_node.write_attribs) text_edit.textChanged[str].connect(self.current_node.update_item_name) text_edit.textChanged[str].connect( lambda: self.xml_code_changed.emit(self.current_node) if self.settings_dict["General"]["code_refresh"] >= 3 else None ) text_edit.editingFinished.connect( lambda index=prop_index: self.undo_stack.push( self.WidgetLineEditChangeCommand( og_values[index], text_edit.text(), self.current_prop_list, index, self.node_tree_model, self.current_node.model_item, self.select_node ) ) if og_values[index] != text_edit.text() else None ) text_edit.editingFinished.connect( lambda index=prop_index: og_values.update({index: text_edit.text()}) ) text_button.clicked.connect(lambda _, line_edit_=text_edit: open_plain_editor(line_edit_)) if type(props[key]) is PropertyFlagLabel: og_values[prop_index] = props[key].value prop_list.append(QLineEdit(self.dockWidgetContents)) self.update_flag_label_completer(self.flag_label_model, self._config_root) self.flag_label_completer.activated[str].connect(prop_list[prop_index].setText) prop_list[prop_index].setCompleter(self.flag_label_completer) prop_list[prop_index].textChanged[str].connect( lambda text: self.update_flag_value_completer(self.flag_value_model, self._config_root, text) ) prop_list[prop_index].setText(props[key].value) prop_list[prop_index].textChanged[str].connect(props[key].set_value) prop_list[prop_index].textChanged[str].connect(self.current_node.write_attribs) prop_list[prop_index].textChanged[str].connect(self.current_node.update_item_name) prop_list[prop_index].textChanged[str].connect( lambda: self.xml_code_changed.emit(self.current_node) if self.settings_dict["General"]["code_refresh"] >= 3 else None ) prop_list[prop_index].editingFinished.connect( lambda index=prop_index: self.undo_stack.push( self.LineEditChangeCommand( og_values[index], prop_list[index].text(), self.current_prop_list, index, self.node_tree_model, self.current_node.model_item, self.select_node ) ) if og_values[index] != prop_list[index].text() else None ) prop_list[prop_index].editingFinished.connect( lambda index=prop_index: og_values.update({index: prop_list[index].text()}) ) if type(props[key]) is PropertyFlagValue: og_values[prop_index] = props[key].value prop_list.append(QLineEdit(self.dockWidgetContents)) prop_list[prop_index].setCompleter(self.flag_value_completer) self.flag_value_completer.activated[str].connect(prop_list[prop_index].setText) prop_list[prop_index].setText(props[key].value) prop_list[prop_index].textChanged[str].connect(props[key].set_value) prop_list[prop_index].textChanged[str].connect(self.current_node.write_attribs) prop_list[prop_index].textChanged[str].connect(self.current_node.update_item_name) prop_list[prop_index].textChanged[str].connect( lambda: self.xml_code_changed.emit(self.current_node) if self.settings_dict["General"]["code_refresh"] >= 3 else None ) prop_list[prop_index].editingFinished.connect( lambda index=prop_index: self.undo_stack.push( self.LineEditChangeCommand( og_values[index], prop_list[index].text(), self.current_prop_list, index, self.node_tree_model, self.current_node.model_item, self.select_node ) ) if og_values[index] != prop_list[index].text() else None ) prop_list[prop_index].editingFinished.connect( lambda index=prop_index: og_values.update({index: prop_list[index].text()}) ) elif type(props[key]) is PropertyInt: og_values[prop_index] = props[key].value prop_list.append(QSpinBox(self.dockWidgetContents)) prop_list[prop_index].setValue(int(props[key].value)) prop_list[prop_index].setMinimum(props[key].min) prop_list[prop_index].setMaximum(props[key].max) prop_list[prop_index].valueChanged.connect(props[key].set_value) prop_list[prop_index].valueChanged.connect(self.current_node.write_attribs) prop_list[prop_index].valueChanged.connect( lambda: self.xml_code_changed.emit(self.current_node) if self.settings_dict["General"]["code_refresh"] >= 3 else None ) prop_list[prop_index].valueChanged.connect( lambda new_value, index=prop_index: self.undo_stack.push( self.SpinBoxChangeCommand( og_values[index], new_value, self.current_prop_list, index, self.node_tree_model, self.current_node.model_item, self.select_node ) ) if og_values[index] != new_value else None ) prop_list[prop_index].valueChanged.connect( lambda new_value, index=prop_index: og_values.update({index: new_value}) ) elif type(props[key]) is PropertyCombo: og_values[prop_index] = props[key].value prop_list.append(QComboBox(self.dockWidgetContents)) prop_list[prop_index].insertItems(0, props[key].values) prop_list[prop_index].setCurrentIndex(props[key].values.index(props[key].value)) prop_list[prop_index].currentTextChanged.connect(props[key].set_value) prop_list[prop_index].currentTextChanged.connect(self.current_node.write_attribs) prop_list[prop_index].currentTextChanged.connect(self.current_node.update_item_name) prop_list[prop_index].currentTextChanged.connect( lambda: self.xml_code_changed.emit(self.current_node) if self.settings_dict["General"]["code_refresh"] >= 3 else None ) prop_list[prop_index].activated[str].connect( lambda new_value, index=prop_index: self.undo_stack.push( self.ComboBoxChangeCommand( og_values[index], new_value, self.current_prop_list, index, self.node_tree_model, self.current_node.model_item, self.select_node ) ) ) prop_list[prop_index].activated[str].connect( lambda new_value, index=prop_index: og_values.update({index: new_value}) ) elif type(props[key]) is PropertyFile: def button_clicked(line_edit_): open_dialog = QFileDialog() file_path = open_dialog.getOpenFileName(self, "Select File:", self._package_path) if file_path[0]: line_edit.setText(relpath(file_path[0], self._package_path)) line_edit_.editingFinished.emit() og_values[prop_index] = props[key].value prop_list.append(QWidget(self.dockWidgetContents)) layout = QHBoxLayout(prop_list[prop_index]) line_edit = QLineEdit(prop_list[prop_index]) push_button = QPushButton(prop_list[prop_index]) push_button.setText("...") push_button.setMaximumWidth(30) layout.addWidget(line_edit) layout.addWidget(push_button) layout.setContentsMargins(0, 0, 0, 0) line_edit.setText(props[key].value) line_edit.textChanged.connect(props[key].set_value) line_edit.textChanged[str].connect(self.current_node.write_attribs) line_edit.textChanged[str].connect(self.current_node.update_item_name) line_edit.textChanged[str].connect( lambda: self.xml_code_changed.emit(self.current_node) if self.settings_dict["General"]["code_refresh"] >= 3 else None ) line_edit.editingFinished.connect( lambda index=prop_index: self.undo_stack.push( self.WidgetLineEditChangeCommand( og_values[index], line_edit.text(), self.current_prop_list, index, self.node_tree_model, self.current_node.model_item, self.select_node ) ) if og_values[index] != line_edit.text() else None ) line_edit.editingFinished.connect( lambda index=prop_index: og_values.update({index: line_edit.text()}) ) push_button.clicked.connect(lambda _, line_edit_=line_edit: button_clicked(line_edit_)) elif type(props[key]) is PropertyFolder: def button_clicked(line_edit_): open_dialog = QFileDialog() folder_path = open_dialog.getExistingDirectory(self, "Select folder:", self._package_path) if folder_path: line_edit.setText(relpath(folder_path, self._package_path)) line_edit_.editingFinished.emit() og_values[prop_index] = props[key].value prop_list.append(QWidget(self.dockWidgetContents)) layout = QHBoxLayout(prop_list[prop_index]) line_edit = QLineEdit(prop_list[prop_index]) push_button = QPushButton(prop_list[prop_index]) push_button.setText("...") push_button.setMaximumWidth(30) layout.addWidget(line_edit) layout.addWidget(push_button) layout.setContentsMargins(0, 0, 0, 0) line_edit.setText(props[key].value) line_edit.textChanged.connect(props[key].set_value) line_edit.textChanged.connect(self.current_node.write_attribs) line_edit.textChanged.connect(self.current_node.update_item_name) line_edit.textChanged.connect( lambda: self.xml_code_changed.emit(self.current_node) if self.settings_dict["General"]["code_refresh"] >= 3 else None ) line_edit.editingFinished.connect( lambda index=prop_index: self.undo_stack.push( self.WidgetLineEditChangeCommand( og_values[index], line_edit.text(), self.current_prop_list, index, self.node_tree_model, self.current_node.model_item, self.select_node ) ) if og_values[index] != line_edit.text() else None ) line_edit.editingFinished.connect( lambda index=prop_index: og_values.update({index: line_edit.text()}) ) push_button.clicked.connect(lambda _, line_edit_=line_edit: button_clicked(line_edit_)) elif type(props[key]) is PropertyColour: def button_clicked(line_edit_): init_colour = QColor("#" + props[key].value) colour_dialog = QColorDialog() colour = colour_dialog.getColor(init_colour, self, "Choose Colour:") if colour.isValid(): line_edit.setText(colour.name()[1:]) line_edit_.editingFinished.emit() def update_button_colour(text): colour = QColor("#" + text) if colour.isValid() and len(text) == 6: push_button.setStyleSheet("background-color: " + colour.name()) push_button.setIcon(QIcon()) else: push_button.setStyleSheet("background-color: #ffffff") icon = QIcon() icon.addPixmap(QPixmap(join(cur_folder, "resources/logos/logo_danger.png")), QIcon.Normal, QIcon.Off) push_button.setIcon(icon) og_values[prop_index] = props[key].value prop_list.append(QWidget(self.dockWidgetContents)) layout = QHBoxLayout(prop_list[prop_index]) line_edit = QLineEdit(prop_list[prop_index]) line_edit.setMaxLength(6) push_button = QPushButton(prop_list[prop_index]) push_button.setMinimumHeight(21) push_button.setMinimumWidth(30) push_button.setMaximumHeight(21) push_button.setMaximumWidth(30) layout.addWidget(line_edit) layout.addWidget(push_button) layout.setContentsMargins(0, 0, 0, 0) line_edit.setText(props[key].value) update_button_colour(line_edit.text()) line_edit.textChanged.connect(props[key].set_value) line_edit.textChanged.connect(update_button_colour) line_edit.textChanged.connect(self.current_node.write_attribs) line_edit.textChanged.connect( lambda: self.xml_code_changed.emit(self.current_node) if self.settings_dict["General"]["code_refresh"] >= 3 else None ) line_edit.editingFinished.connect( lambda index=prop_index: self.undo_stack.push( self.WidgetLineEditChangeCommand( og_values[index], line_edit.text(), self.current_prop_list, index, self.node_tree_model, self.current_node.model_item, self.select_node ) ) if og_values[index] != line_edit.text() else None ) line_edit.editingFinished.connect( lambda index=prop_index: og_values.update({index: line_edit.text()}) ) push_button.clicked.connect(lambda _, line_edit_=line_edit: button_clicked(line_edit_)) self.layout_prop_editor.setWidget(prop_index, QFormLayout.FieldRole, prop_list[prop_index]) prop_list[prop_index].setObjectName(str(prop_index)) prop_index += 1 def run_wizard(self): """ Called when the wizard button is clicked. Sets up the main window and runs the wizard. """ def close(): wizard.deleteLater() self.action_Object_Tree.toggled.emit(enabled_tree) self.actionObject_Box.toggled.emit(enabled_box) self.action_Property_Editor.toggled.emit(enabled_list) self.menu_File.setEnabled(True) self.menu_Tools.setEnabled(True) self.menu_View.setEnabled(True) current_index = self.node_tree_model.indexFromItem(self.current_node.model_item) enabled_tree = self.action_Object_Tree.isChecked() enabled_box = self.actionObject_Box.isChecked() enabled_list = self.action_Property_Editor.isChecked() self.action_Object_Tree.toggled.emit(False) self.actionObject_Box.toggled.emit(False) self.action_Property_Editor.toggled.emit(False) self.menu_File.setEnabled(False) self.menu_Tools.setEnabled(False) self.menu_View.setEnabled(False) parent_node = self.current_node.getparent() original_node = self.current_node kwargs = {"package_path": self._package_path} wizard = self.current_node.wizard(self, self.current_node, self.xml_code_changed, **kwargs) self.splitter.insertWidget(0, wizard) wizard.cancelled.connect(close) wizard.cancelled.connect(lambda: self.select_node.emit(current_index)) wizard.finished.connect(close) wizard.finished.connect( lambda result: self.undo_stack.push( self.RunWizardCommand( parent_node, original_node, result, self.node_tree_model, self.select_node ) ) ) wizard.finished.connect(lambda: self.select_node.emit(current_index)) def check_fomod_state(self): """ Checks whether the installer has unsaved changes. """ if not self.undo_stack.isClean(): msg_box = QMessageBox() msg_box.setWindowTitle("The installer has been modified.") msg_box.setText("Do you want to save your changes?") msg_box.setStandardButtons(QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel) msg_box.setDefaultButton(QMessageBox.Save) return msg_box.exec_() else: return def closeEvent(self, event): """ Override the Qt close event to account for unsaved changes. :param event: """ answer = self.check_fomod_state() if answer == QMessageBox.Save: self.save() elif answer == QMessageBox.Discard: pass elif answer == QMessageBox.Cancel: event.ignore() class SettingsDialog(QDialog, window_settings.Ui_Dialog): """ The class for the settings window. Subclassed from QDialog and created in Qt Designer. """ def __init__(self, parent): super().__init__(parent=parent) self.setupUi(self) self.setWindowFlags(Qt.WindowSystemMenuHint | Qt.WindowTitleHint | Qt.Dialog) self.label_warning_palette.setPixmap(QPixmap(join(cur_folder, "resources/logos/logo_danger.png"))) self.label_warning_style.setPixmap(QPixmap(join(cur_folder, "resources/logos/logo_danger.png"))) self.widget_warning_palette.hide() self.widget_warning_style.hide() self.settings_dict = read_settings() self.buttonBox.accepted.connect(self.accepted) self.buttonBox.rejected.connect(self.close) self.check_valid_load.stateChanged.connect(self.check_valid_load_ignore.setEnabled) self.check_warn_load.stateChanged.connect(self.check_warn_load_ignore.setEnabled) self.check_valid_save.stateChanged.connect(self.check_valid_save_ignore.setEnabled) self.check_warn_save.stateChanged.connect(self.check_warn_save_ignore.setEnabled) self.check_installSteps.stateChanged.connect(self.combo_installSteps.setEnabled) self.check_optionalFileGroups.stateChanged.connect(self.combo_optionalFileGroups.setEnabled) self.check_type.stateChanged.connect(self.combo_type.setEnabled) self.check_defaultType.stateChanged.connect(self.combo_defaultType.setEnabled) self.button_colour_required.clicked.connect( lambda: self.button_colour_required.setStyleSheet( "background-color: " + QColorDialog().getColor( QColor(self.button_colour_required.styleSheet().split()[1]), self, "Choose Colour:" ).name() ) ) self.button_colour_atleastone.clicked.connect( lambda: self.button_colour_atleastone.setStyleSheet( "background-color: " + QColorDialog().getColor( QColor(self.button_colour_atleastone.styleSheet().split()[1]), self, "Choose Colour:" ).name() ) ) self.button_colour_either.clicked.connect( lambda: self.button_colour_either.setStyleSheet( "background-color: " + QColorDialog().getColor( QColor(self.button_colour_either.styleSheet().split()[1]), self, "Choose Colour:" ).name() ) ) self.button_colour_reset_required.clicked.connect( lambda: self.button_colour_required.setStyleSheet("background-color: #d90027") ) self.button_colour_reset_atleastone.clicked.connect( lambda: self.button_colour_atleastone.setStyleSheet("background-color: #d0d02e") ) self.button_colour_reset_either.clicked.connect( lambda: self.button_colour_either.setStyleSheet("background-color: #ffaa7f") ) self.combo_style.currentTextChanged.connect( lambda text: self.widget_warning_style.show() if text != self.settings_dict["Appearance"]["style"] else self.widget_warning_style.hide() ) self.combo_palette.currentTextChanged.connect( lambda text: self.widget_warning_palette.show() if text != self.settings_dict["Appearance"]["palette"] else self.widget_warning_palette.hide() ) self.combo_code_refresh.setCurrentIndex(self.settings_dict["General"]["code_refresh"]) self.check_intro.setChecked(self.settings_dict["General"]["show_intro"]) self.check_advanced.setChecked(self.settings_dict["General"]["show_advanced"]) self.check_tutorial.setChecked(self.settings_dict["General"]["tutorial_advanced"]) self.check_valid_load.setChecked(self.settings_dict["Load"]["validate"]) self.check_valid_load_ignore.setChecked(self.settings_dict["Load"]["validate_ignore"]) self.check_warn_load.setChecked(self.settings_dict["Load"]["warnings"]) self.check_warn_load_ignore.setChecked(self.settings_dict["Load"]["warn_ignore"]) self.check_valid_save.setChecked(self.settings_dict["Save"]["validate"]) self.check_valid_save_ignore.setChecked(self.settings_dict["Save"]["validate_ignore"]) self.check_warn_save.setChecked(self.settings_dict["Save"]["warnings"]) self.check_warn_save_ignore.setChecked(self.settings_dict["Save"]["warn_ignore"]) self.check_installSteps.setChecked(self.settings_dict["Defaults"]["installSteps"].enabled()) self.combo_installSteps.setEnabled(self.settings_dict["Defaults"]["installSteps"].enabled()) self.combo_installSteps.setCurrentText(self.settings_dict["Defaults"]["installSteps"].value()) self.check_optionalFileGroups.setChecked(self.settings_dict["Defaults"]["optionalFileGroups"].enabled()) self.combo_optionalFileGroups.setEnabled(self.settings_dict["Defaults"]["optionalFileGroups"].enabled()) self.combo_optionalFileGroups.setCurrentText(self.settings_dict["Defaults"]["optionalFileGroups"].value()) self.check_type.setChecked(self.settings_dict["Defaults"]["type"].enabled()) self.combo_type.setEnabled(self.settings_dict["Defaults"]["type"].enabled()) self.combo_type.setCurrentText(self.settings_dict["Defaults"]["type"].value()) self.check_defaultType.setChecked(self.settings_dict["Defaults"]["defaultType"].enabled()) self.combo_defaultType.setEnabled(self.settings_dict["Defaults"]["defaultType"].enabled()) self.combo_defaultType.setCurrentText(self.settings_dict["Defaults"]["defaultType"].value()) self.button_colour_required.setStyleSheet( "background-color: " + self.settings_dict["Appearance"]["required_colour"] ) self.button_colour_atleastone.setStyleSheet( "background-color: " + self.settings_dict["Appearance"]["atleastone_colour"] ) self.button_colour_either.setStyleSheet( "background-color: " + self.settings_dict["Appearance"]["either_colour"] ) if self.settings_dict["Appearance"]["style"]: self.combo_style.setCurrentText(self.settings_dict["Appearance"]["style"]) else: self.combo_style.setCurrentText("Default") if self.settings_dict["Appearance"]["palette"]: self.combo_palette.setCurrentText(self.settings_dict["Appearance"]["palette"]) else: self.combo_palette.setCurrentText("Default") def accepted(self): self.settings_dict["General"]["code_refresh"] = self.combo_code_refresh.currentIndex() self.settings_dict["General"]["show_intro"] = self.check_intro.isChecked() self.settings_dict["General"]["show_advanced"] = self.check_advanced.isChecked() self.settings_dict["General"]["tutorial_advanced"] = self.check_tutorial.isChecked() self.settings_dict["Load"]["validate"] = self.check_valid_load.isChecked() self.settings_dict["Load"]["validate_ignore"] = self.check_valid_load_ignore.isChecked() self.settings_dict["Load"]["warnings"] = self.check_warn_load.isChecked() self.settings_dict["Load"]["warn_ignore"] = self.check_warn_load_ignore.isChecked() self.settings_dict["Save"]["validate"] = self.check_valid_save.isChecked() self.settings_dict["Save"]["validate_ignore"] = self.check_valid_save_ignore.isChecked() self.settings_dict["Save"]["warnings"] = self.check_warn_save.isChecked() self.settings_dict["Save"]["warn_ignore"] = self.check_warn_save_ignore.isChecked() self.settings_dict["Defaults"]["installSteps"].set_enabled(self.check_installSteps.isChecked()) self.settings_dict["Defaults"]["installSteps"].set_value(self.combo_installSteps.currentText()) self.settings_dict["Defaults"]["optionalFileGroups"].set_enabled(self.check_optionalFileGroups.isChecked()) self.settings_dict["Defaults"]["optionalFileGroups"].set_value(self.combo_optionalFileGroups.currentText() ) self.settings_dict["Defaults"]["type"].set_enabled(self.check_type.isChecked()) self.settings_dict["Defaults"]["type"].set_value(self.combo_type.currentText()) self.settings_dict["Defaults"]["defaultType"].set_enabled(self.check_defaultType.isChecked()) self.settings_dict["Defaults"]["defaultType"].set_value(self.combo_defaultType.currentText()) self.settings_dict["Appearance"]["required_colour"] = self.button_colour_required.styleSheet().split()[1] self.settings_dict["Appearance"]["atleastone_colour"] = self.button_colour_atleastone.styleSheet().split()[1] self.settings_dict["Appearance"]["either_colour"] = self.button_colour_either.styleSheet().split()[1] if self.combo_style.currentText() != "Default": self.settings_dict["Appearance"]["style"] = self.combo_style.currentText() else: self.settings_dict["Appearance"]["style"] = "" if self.combo_palette.currentText() != "Default": self.settings_dict["Appearance"]["palette"] = self.combo_palette.currentText() else: self.settings_dict["Appearance"]["palette"] = "" makedirs(join(expanduser("~"), ".fomod"), exist_ok=True) with open(join(expanduser("~"), ".fomod", ".designer"), "w") as configfile: set_encoder_options("json", indent=4) configfile.write(encode(self.settings_dict)) self.close() class About(QDialog, window_about.Ui_Dialog): """ The class for the about window. Subclassed from QDialog and created in Qt Designer. """ def __init__(self, parent): super().__init__(parent=parent) self.setupUi(self) if parent: self.move( parent.window().frameGeometry().topLeft() + parent.window().rect().center() - self.rect().center() ) self.setWindowFlags(Qt.WindowTitleHint | Qt.Dialog) self.version.setText("Version: " + __version__) copyright_text = self.copyright.text() new_year = "2016-" + str(datetime.now().year) if datetime.now().year != 2016 else "2016" copyright_text = copyright_text.replace("2016", new_year) self.copyright.setText(copyright_text) self.button.clicked.connect(self.close) class PreviewMoGui(QWidget, preview_mo.Ui_Form): clear_tab_signal = pyqtSignal() clear_ui_signal = pyqtSignal() invalid_node_signal = pyqtSignal() missing_node_signal = pyqtSignal() set_labels_signal = pyqtSignal([str, str, str, str]) create_page_signal = pyqtSignal([object]) class ScaledLabel(QLabel): def __init__(self, parent=None): super().__init__(parent) self.original_pixmap = None self.setMinimumSize(320, 200) def set_scalable_pixmap(self, pixmap): self.original_pixmap = pixmap self.setPixmap(self.original_pixmap.scaled(self.size(), Qt.KeepAspectRatio)) def resizeEvent(self, event): if self.pixmap() and self.original_pixmap: self.setPixmap(self.original_pixmap.scaled(event.size(), Qt.KeepAspectRatio)) class PreviewItem(QStandardItem): def set_priority(self, value): self.priority = value def __init__(self, mo_preview_layout): super().__init__() self.mo_preview_layout = mo_preview_layout self.setupUi(self) self.mo_preview_layout.addWidget(self) self.label_image = self.ScaledLabel(self) self.splitter_label.addWidget(self.label_image) self.hide() self.button_preview_more.setIcon(QIcon(join(cur_folder, "resources/logos/logo_more.png"))) self.button_preview_less.setIcon(QIcon(join(cur_folder, "resources/logos/logo_less.png"))) self.button_preview_more.clicked.connect(self.button_preview_more.hide) self.button_preview_more.clicked.connect(self.button_preview_less.show) self.button_preview_more.clicked.connect(self.widget_preview.show) self.button_preview_less.clicked.connect(self.button_preview_less.hide) self.button_preview_less.clicked.connect(self.button_preview_more.show) self.button_preview_less.clicked.connect(self.widget_preview.hide) self.button_preview_more.clicked.emit() self.button_results_more.setIcon(QIcon(join(cur_folder, "resources/logos/logo_more.png"))) self.button_results_less.setIcon(QIcon(join(cur_folder, "resources/logos/logo_less.png"))) self.button_results_more.clicked.connect(self.button_results_more.hide) self.button_results_more.clicked.connect(self.button_results_less.show) self.button_results_more.clicked.connect(self.widget_results.show) self.button_results_less.clicked.connect(self.button_results_less.hide) self.button_results_less.clicked.connect(self.button_results_more.show) self.button_results_less.clicked.connect(self.widget_results.hide) self.button_results_less.clicked.emit() self.model_files = QStandardItemModel() self.tree_results.expanded.connect( lambda: self.tree_results.header().resizeSections(QHeaderView.Stretch) ) self.tree_results.collapsed.connect( lambda: self.tree_results.header().resizeSections(QHeaderView.Stretch) ) self.tree_results.setContextMenuPolicy(Qt.CustomContextMenu) self.tree_results.customContextMenuRequested.connect(self.on_custom_context_menu) self.model_flags = QStandardItemModel() self.list_flags.expanded.connect( lambda: self.list_flags.header().resizeSections(QHeaderView.Stretch) ) self.list_flags.collapsed.connect( lambda: self.list_flags.header().resizeSections(QHeaderView.Stretch) ) self.reset_models() self.label_invalid = QLabel( "Select an Installation Step node or one of its children to preview its installer page." ) self.label_invalid.setAlignment(Qt.AlignCenter) self.mo_preview_layout.addWidget(self.label_invalid) self.label_invalid.hide() self.label_missing = QLabel( "In order to preview an installer page, create an Installation Step node." ) self.label_missing.setAlignment(Qt.AlignCenter) self.mo_preview_layout.addWidget(self.label_missing) self.label_missing.hide() self.clear_tab_signal.connect(self.clear_tab) self.clear_ui_signal.connect(self.clear_ui) self.invalid_node_signal.connect(self.invalid_node) self.missing_node_signal.connect(self.missing_node) self.set_labels_signal.connect(self.set_labels) self.create_page_signal.connect(self.create_page) def on_custom_context_menu(self, position): node_tree_context_menu = QMenu(self.tree_results) action_expand = QAction(QIcon(join(cur_folder, "resources/logos/logo_expand.png")), "Expand All", self) action_collapse = QAction(QIcon(join(cur_folder, "resources/logos/logo_collapse.png")), "Collapse All", self) action_expand.triggered.connect(self.tree_results.expandAll) action_collapse.triggered.connect(self.tree_results.collapseAll) node_tree_context_menu.addActions([action_expand, action_collapse]) node_tree_context_menu.move(self.tree_results.mapToGlobal(position)) node_tree_context_menu.exec_() def eventFilter(self, object_, event): if event.type() == QEvent.HoverEnter: self.label_description.setText(object_.property("description")) self.label_image.set_scalable_pixmap(QPixmap(object_.property("image_path"))) return QWidget().eventFilter(object_, event) def clear_ui(self): self.label_name.clear() self.label_author.clear() self.label_version.clear() self.label_website.clear() self.label_description.clear() self.label_image.clear() [widget.deleteLater() for widget in [ self.layout_widget.itemAt(index).widget() for index in range(self.layout_widget.count()) if self.layout_widget.itemAt(index).widget() ]] self.reset_models() def reset_models(self): self.model_files.clear() self.model_files.setHorizontalHeaderLabels(["Files Preview", "Source", "Plugin"]) self.model_files_root = QStandardItem(QIcon(join(cur_folder, "resources/logos/logo_folder.png")), "<root>") self.model_files.appendRow(self.model_files_root) self.tree_results.setModel(self.model_files) self.model_flags.clear() self.model_flags.setHorizontalHeaderLabels(["Flag Label", "Flag Value", "Plugin"]) self.list_flags.setModel(self.model_flags) def clear_tab(self): for index in reversed(range(self.mo_preview_layout.count())): widget = self.mo_preview_layout.itemAt(index).widget() if widget is not None: widget.hide() def invalid_node(self): self.clear_tab() self.label_invalid.show() def missing_node(self): self.clear_tab() self.label_missing.show() def set_labels(self, name, author, version, website): self.label_name.setText(name) self.label_author.setText(author) self.label_version.setText(version) self.label_website.setText("<a href = {}>link</a>".format(website)) # this is pretty horrendous, need to come up with a better way of doing this. def create_page(self, page_data): group_step = QGroupBox(page_data.name) layout_step = QVBoxLayout() group_step.setLayout(layout_step) check_first_radio = True for group in page_data.group_list: group_group = QGroupBox(group.name) layout_group = QVBoxLayout() group_group.setLayout(layout_group) for plugin in group.plugin_list: if group.type in ["SelectAny", "SelectAll", "SelectAtLeastOne"]: button_plugin = QCheckBox(plugin.name, self) if group.type == "SelectAll": button_plugin.setChecked(True) button_plugin.setEnabled(False) elif group.type == "SelectAtLeastOne": button_plugin.toggled.connect( lambda checked, button=button_plugin: button.setChecked(True) if not checked and not [ button for button in [ layout_group.itemAt(index).widget() for index in range(layout_group.count()) if layout_group.itemAt(index).widget() ] if button.isChecked() ] else None ) elif group.type in ["SelectExactlyOne", "SelectAtMostOne"]: button_plugin = QRadioButton(plugin.name, self) if check_first_radio and not button_plugin.isChecked(): button_plugin.animateClick(0) check_first_radio = False button_plugin.setProperty("description", plugin.description) button_plugin.setProperty("image_path", plugin.image_path) button_plugin.setProperty("file_list", plugin.file_list) button_plugin.setProperty("folder_list", plugin.folder_list) button_plugin.setProperty("flag_list", plugin.flag_list) button_plugin.setProperty("type", plugin.type) button_plugin.setAttribute(Qt.WA_Hover) if plugin.type == "Required": button_plugin.setEnabled(False) elif plugin.type == "Recommended": button_plugin.animateClick(0) if not button_plugin.isChecked() else None elif plugin.type == "NotUsable": button_plugin.setChecked(False) button_plugin.setEnabled(False) button_plugin.toggled.connect(self.reset_models) button_plugin.toggled.connect(self.update_installed_files) button_plugin.toggled.connect(self.update_set_flags) button_plugin.installEventFilter(self) button_plugin.setObjectName("preview_button") layout_group.addWidget(button_plugin) if group.type == "SelectAtMostOne": button_none = QRadioButton("None") layout_group.addWidget(button_none) layout_step.addWidget(group_group) self.layout_widget.addWidget(group_step) self.reset_models() self.update_installed_files() self.update_set_flags() self.show() def update_installed_files(self): def recurse_add_items(folder, parent): for boop in listdir(folder): # I was very tired if isdir(join(folder, boop)): folder_item = None existing_folder_ = self.model_files.findItems(boop, Qt.MatchRecursive) if existing_folder_: for boopity in existing_folder_: if boopity.parent() is parent: folder_item = boopity break if not folder_item: folder_item = self.PreviewItem( QIcon(join(cur_folder, "resources/logos/logo_folder.png")), boop ) folder_item.set_priority(folder_.priority) parent.appendRow([folder_item, QStandardItem(rel_source), QStandardItem(button.text())]) recurse_add_items(join(folder, boop), folder_item) elif isfile(join(folder, boop)): file_item_ = None existing_file_ = self.model_files.findItems(boop, Qt.MatchRecursive) if existing_file_: for boopity in existing_file_: if boopity.parent() is parent: if folder_.priority < boopity.priority: file_item_ = boopity break else: parent.removeRow(boopity.row()) break if not file_item_: file_item_ = self.PreviewItem( QIcon(join(cur_folder, "resources/logos/logo_file.png")), boop ) file_item_.set_priority(folder_.priority) parent.appendRow([file_item_, QStandardItem(rel_source), QStandardItem(button.text())]) for button in self.findChildren((QCheckBox, QRadioButton), "preview_button"): for folder_ in button.property("folder_list"): if (button.isChecked() and button.property("type") != "NotUsable" or folder_.always_install or folder_.install_usable and button.property("type") != "NotUsable" or button.property("type") == "Required"): destination = folder_.destination abs_source = folder_.abs_source rel_source = folder_.rel_source parent_item = self.model_files_root destination_split = destination.split("/") if destination_split[0] == ".": destination_split = destination_split[1:] for dest_folder in destination_split: existing_folder_list = self.model_files.findItems(dest_folder, Qt.MatchRecursive) if existing_folder_list: for existing_folder in existing_folder_list: if existing_folder.parent() is parent_item: parent_item = existing_folder break continue item_ = self.PreviewItem( QIcon(join(cur_folder, "resources/logos/logo_folder.png")), dest_folder ) item_.set_priority(folder_.priority) parent_item.appendRow([item_, QStandardItem(), QStandardItem(button.text())]) parent_item = item_ if isdir(abs_source): recurse_add_items(abs_source, parent_item) for file_ in button.property("file_list"): if (button.isChecked() and button.property("type") != "NotUsable" or file_.always_install or file_.install_usable and button.property("type") != "NotUsable" or button.property("type") == "Required"): destination = file_.destination abs_source = file_.abs_source rel_source = file_.rel_source parent_item = self.model_files_root destination_split = destination.split("/") if destination_split[0] == ".": destination_split = destination_split[1:] for dest_folder in destination_split: existing_folder_list = self.model_files.findItems(dest_folder, Qt.MatchRecursive) if existing_folder_list: for existing_folder in existing_folder_list: if existing_folder.parent() is parent_item: parent_item = existing_folder break continue item_ = self.PreviewItem( QIcon(join(cur_folder, "resources/logos/logo_folder.png")), dest_folder ) item_.set_priority(file_.priority) parent_item.appendRow([item_, QStandardItem(), QStandardItem(button.text())]) parent_item = item_ source_file = abs_source.split("/")[len(abs_source.split("/")) - 1] file_item = None existing_file_list = self.model_files.findItems(source_file, Qt.MatchRecursive) if existing_file_list: for existing_file in existing_file_list: if existing_file.parent() is parent_item: if file_.priority < existing_file.priority: file_item = existing_file break else: parent_item.removeRow(existing_file.row()) break if not file_item: file_item = self.PreviewItem( QIcon(join(cur_folder, "resources/logos/logo_file.png")), source_file ) file_item.set_priority(file_.priority) parent_item.appendRow([file_item, QStandardItem(rel_source), QStandardItem(button.text())]) self.tree_results.header().resizeSections(QHeaderView.Stretch) def update_set_flags(self): for button in self.findChildren((QCheckBox, QRadioButton), "preview_button"): if button.isChecked(): for flag in button.property("flag_list"): flag_label = QStandardItem(flag.label) flag_value = QStandardItem(flag.value) flag_plugin = QStandardItem(button.text()) existing_flag = self.model_flags.findItems(flag.label) if existing_flag: previous_flag_row = existing_flag[0].row() self.model_flags.removeRow(previous_flag_row) self.model_flags.insertRow(previous_flag_row, [flag_label, flag_value, flag_plugin]) else: self.model_flags.appendRow([flag_label, flag_value, flag_plugin]) self.list_flags.header().resizeSections(QHeaderView.Stretch) class DefaultsSettings(object): def __init__(self, key, default_enabled, default_value): self.__enabled = default_enabled self.__property_key = key self.__property_value = default_value def __eq__(self, other): if self.enabled() == other.enabled() and self.value() == other.value() and self.key() == other.key(): return True else: return False def set_enabled(self, enabled): self.__enabled = enabled def set_value(self, value): self.__property_value = value def enabled(self): return self.__enabled def value(self): return self.__property_value def key(self): return self.__property_key default_settings = { "General": { "code_refresh": 3, "show_intro": True, "show_advanced": False, "tutorial_advanced": True, }, "Appearance": { "required_colour": "#ba4d0e", "atleastone_colour": "#d0d02e", "either_colour": "#ffaa7f", "style": "", "palette": "", }, "Defaults": { "installSteps": DefaultsSettings("order", True, "Explicit"), "optionalFileGroups": DefaultsSettings("order", True, "Explicit"), "type": DefaultsSettings("name", True, "Optional"), "defaultType": DefaultsSettings("name", True, "Optional"), }, "Load": { "validate": True, "validate_ignore": False, "warnings": True, "warn_ignore": True, }, "Save": { "validate": True, "validate_ignore": False, "warnings": True, "warn_ignore": True, }, "Recent Files": deque(maxlen=5), } def generic_errorbox(title, text, detail=""): """ A function that creates a generic errorbox with the logo_admin.png logo. :param title: A string containing the title of the errorbox. :param text: A string containing the text of the errorbox. :param detail: Optional. A string containing the detail text of the errorbox. """ errorbox = QMessageBox() errorbox.setText(text) errorbox.setWindowTitle(title) errorbox.setDetailedText(detail) errorbox.setIconPixmap(QPixmap(join(cur_folder, "resources/logos/logo_admin.png"))) return errorbox def read_settings(): """ Reads the settings from the ~/.fomod/.designer file. If such a file does not exist it uses the default settings. The settings are processed to be ready to be used in Python code. :return: The processed settings. """ def deep_merge(a, b, path=None): """merges b into a""" if path is None: path = [] for key in b: if key in a: # only accept the keys in default settings if isinstance(a[key], dict) and isinstance(b[key], dict): deep_merge(a[key], b[key], path + [str(key)]) elif isinstance(b[key], type(a[key])): a[key] = b[key] else: pass # user has messed with conf files return a try: with open(join(expanduser("~"), ".fomod", ".designer"), "r") as configfile: settings_dict = decode(configfile.read()) deep_merge(default_settings, settings_dict) return default_settings except (FileNotFoundError, JSONDecodeError): return default_settings
codeparrot/github-code-clean
import re import collections from enum import Enum from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION from ydk.errors import YPYError, YPYModelError from ydk.providers._importer import _yang_ns _meta_table = { 'GroupEnum' : _MetaInfoEnum('GroupEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', { 'address-family-group':'ADDRESS_FAMILY_GROUP', 'session-group':'SESSION_GROUP', 'neighbor-group':'NEIGHBOR_GROUP', 'neighbor':'NEIGHBOR', 'error-group':'ERROR_GROUP', }, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']), 'AttachPointDirectionEnum' : _MetaInfoEnum('AttachPointDirectionEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', { 'in':'IN', 'out':'OUT', }, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']), 'SubAddressFamilyEnum' : _MetaInfoEnum('SubAddressFamilyEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', { 'unicast':'UNICAST', 'multicast':'MULTICAST', 'label':'LABEL', 'tunnel':'TUNNEL', 'vpn':'VPN', 'mdt':'MDT', 'vpls':'VPLS', 'rt-constraint':'RT_CONSTRAINT', 'mvpn':'MVPN', 'flow':'FLOW', 'vpn-mcast':'VPN_MCAST', 'saf-none':'SAF_NONE', 'saf-unknown':'SAF_UNKNOWN', }, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']), 'AddressFamilyEnum' : _MetaInfoEnum('AddressFamilyEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', { 'ipv4':'IPV4', 'ipv6':'IPV6', 'l2vpn':'L2VPN', 'af-none':'AF_NONE', 'af-unknown':'AF_UNKNOWN', 'ls':'LS', }, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']), 'ObjectStatusEnum' : _MetaInfoEnum('ObjectStatusEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', { 'active':'ACTIVE', 'inactive':'INACTIVE', 'unused':'UNUSED', }, 'Cisco-IOS-XR-policy-repository-oper', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper']), 'RoutingPolicy.Limits' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Limits', False, [ _MetaInfoClassMember('compiled-policies-length', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' The total compiled length of all policies ''', 'compiled_policies_length', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('current-lines-of-policy-limit', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Number of lines of configuration for policies/sets currently allowed ''', 'current_lines_of_policy_limit', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('current-lines-of-policy-used', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Current number of lines configured for all policies and sets ''', 'current_lines_of_policy_used', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('current-number-of-policies-limit', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Number of policies currently allowed ''', 'current_number_of_policies_limit', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('current-number-of-policies-used', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Current number of policies configured ''', 'current_number_of_policies_used', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('maximum-lines-of-policy', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Maximum lines of configuration allowable for all policies and sets ''', 'maximum_lines_of_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('maximum-number-of-policies', ATTRIBUTE, 'int' , None, None, [(0, 4294967295)], [], ''' Maximum number of policies allowable ''', 'maximum_number_of_policies', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'limits', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'directly-used-policies', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets.Sets', False, [ _MetaInfoClassMember('set-domain', ATTRIBUTE, 'str' , None, None, [], [], ''' Domain of sets ''', 'set_domain', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('set-name', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Names of sets in this domain ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets', False, [ _MetaInfoClassMember('sets', REFERENCE_LIST, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets.Sets', [], [], ''' List of sets in several domains ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'all-used-sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets.Sets', False, [ _MetaInfoClassMember('set-domain', ATTRIBUTE, 'str' , None, None, [], [], ''' Domain of sets ''', 'set_domain', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('set-name', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Names of sets in this domain ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets', False, [ _MetaInfoClassMember('sets', REFERENCE_LIST, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets.Sets', [], [], ''' List of sets in several domains ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'directly-used-sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'all-used-policies', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses', False, [ _MetaInfoClassMember('all-used-policies', REFERENCE_CLASS, 'AllUsedPolicies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedPolicies', [], [], ''' Policies used by this policy, or by policies that it uses ''', 'all_used_policies', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('all-used-sets', REFERENCE_CLASS, 'AllUsedSets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.AllUsedSets', [], [], ''' Sets used by this policy, or by policies that it uses ''', 'all_used_sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('directly-used-policies', REFERENCE_CLASS, 'DirectlyUsedPolicies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedPolicies', [], [], ''' Policies that this policy uses directly ''', 'directly_used_policies', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('directly-used-sets', REFERENCE_CLASS, 'DirectlyUsedSets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses.DirectlyUsedSets', [], [], ''' Sets that this policy uses directly ''', 'directly_used_sets', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'policy-uses', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies.RoutePolicy', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Route policy name ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('policy-uses', REFERENCE_CLASS, 'PolicyUses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.PolicyUses', [], [], ''' Information about which policies and sets this policy uses ''', 'policy_uses', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'route-policy', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.RoutePolicies' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.RoutePolicies', False, [ _MetaInfoClassMember('route-policy', REFERENCE_LIST, 'RoutePolicy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies.RoutePolicy', [], [], ''' Information about an individual policy ''', 'route_policy', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'route-policies', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Policies' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Policies', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policies', REFERENCE_CLASS, 'RoutePolicies' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.RoutePolicies', [], [], ''' Information about individual policies ''', 'route_policies', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Policies.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'policies', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.OspfArea' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.OspfArea', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.OspfArea.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'ospf-area', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunityOpaque' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunityOpaque', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunityOpaque.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'extended-community-opaque', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySegNh' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySegNh', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySegNh.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'extended-community-seg-nh', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.ExtendedCommunitySoo' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.ExtendedCommunitySoo', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.ExtendedCommunitySoo.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'extended-community-soo', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Tag' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Tag', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Tag.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'tag', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Prefix' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Prefix', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Prefix.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'prefix', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets.Set.UsedBy.Reference', False, [ _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of policy ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('status', REFERENCE_ENUM_CLASS, 'ObjectStatusEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'ObjectStatusEnum', [], [], ''' Active, Inactive, or Unused ''', 'status', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-directly', ATTRIBUTE, 'bool' , None, None, [], [], ''' Whether the policy uses this object directly or indirectly ''', 'used_directly', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'reference', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Sets.Set.UsedBy' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets.Set.UsedBy', False, [ _MetaInfoClassMember('reference', REFERENCE_LIST, 'Reference' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets.Set.UsedBy.Reference', [], [], ''' Information about policies referring to this object ''', 'reference', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'used-by', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Sets.Set.Attached.Binding' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets.Set.Attached.Binding', False, [ _MetaInfoClassMember('af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Address Family Identifier ''', 'af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('aggregate-network-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Aggregate IP address or Network IP Address in IPv4 or IPv6 Format ''', 'aggregate_network_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('area-id', ATTRIBUTE, 'str' , None, None, [], [], ''' OSPF Area ID in Decimal Integer Format ''', 'area_id', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attach-point', ATTRIBUTE, 'str' , None, None, [], [], ''' Name of attach point where policy is attached ''', 'attach_point', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('attached-policy', ATTRIBUTE, 'str' , None, None, [], [], ''' The attached policy that (maybe indirectly) uses the object in question ''', 'attached_policy', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('direction', REFERENCE_ENUM_CLASS, 'AttachPointDirectionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AttachPointDirectionEnum', [], [], ''' Direction In or Out ''', 'direction', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group', REFERENCE_ENUM_CLASS, 'GroupEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'GroupEnum', [], [], ''' Neighbor Group ''', 'group', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('group-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor Group Name ''', 'group_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Instance ''', 'instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Interface Name ''', 'interface_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-address', ATTRIBUTE, 'str' , None, None, [], [], ''' Neighbor IP Address ''', 'neighbor_address', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('neighbor-af-name', REFERENCE_ENUM_CLASS, 'AddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'AddressFamilyEnum', [], [], ''' Neighbor IP Address Family ''', 'neighbor_af_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-from', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate From Level ''', 'propogate_from', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('propogate-to', ATTRIBUTE, 'int' , None, None, [(-2147483648, 2147483647)], [], ''' ISIS Propogate To Level ''', 'propogate_to', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('proto-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol instance ''', 'proto_instance', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Protocol to which policy attached ''', 'protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('route-policy-name', ATTRIBUTE, 'str' , None, None, [], [], ''' Policy that uses object in question ''', 'route_policy_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('saf-name', REFERENCE_ENUM_CLASS, 'SubAddressFamilyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'SubAddressFamilyEnum', [], [], ''' Subsequent Address Family Identifier ''', 'saf_name', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('source-protocol', ATTRIBUTE, 'str' , None, None, [], [], ''' Source Protocol to redistribute, Source Protocol can be one of the following values {all, connected, local, static, bgp, rip, isis, ospf, ospfv3, eigrp, unknown } ''', 'source_protocol', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF name ''', 'vrf_name', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'binding', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Sets.Set.Attached' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets.Set.Attached', False, [ _MetaInfoClassMember('binding', REFERENCE_LIST, 'Binding' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets.Set.Attached.Binding', [], [], ''' bindings list ''', 'binding', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'attached', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Sets.Set' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets.Set', False, [ _MetaInfoClassMember('set-name', ATTRIBUTE, 'str' , None, None, [], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'], ''' Set name ''', 'set_name', 'Cisco-IOS-XR-policy-repository-oper', True), _MetaInfoClassMember('attached', REFERENCE_CLASS, 'Attached' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets.Set.Attached', [], [], ''' Information about where this policy or set is attached ''', 'attached', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('used-by', REFERENCE_CLASS, 'UsedBy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets.Set.UsedBy', [], [], ''' Policies that use this object, directly or indirectly ''', 'used_by', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'set', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Sets' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Sets', False, [ _MetaInfoClassMember('set', REFERENCE_LIST, 'Set' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets.Set', [], [], ''' Information about an individual set ''', 'set', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'sets', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Unused' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Unused', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'unused', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Inactive' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Inactive', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'inactive', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community.Active' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community.Active', False, [ _MetaInfoClassMember('object', REFERENCE_LEAFLIST, 'str' , None, None, [], [], ''' Policy objects ''', 'object', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'active', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.Community' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.Sets.Community', False, [ _MetaInfoClassMember('active', REFERENCE_CLASS, 'Active' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Active', [], [], ''' All objects of a given type that are attached to a protocol ''', 'active', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('inactive', REFERENCE_CLASS, 'Inactive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Inactive', [], [], ''' All objects of a given type that are not attached to a protocol ''', 'inactive', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('sets', REFERENCE_CLASS, 'Sets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Sets', [], [], ''' Information about individual sets ''', 'sets', 'Cisco-IOS-XR-policy-repository-oper', False), _MetaInfoClassMember('unused', REFERENCE_CLASS, 'Unused' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper', 'RoutingPolicy.Sets.Community.Unused', [], [], ''' All objects of a given type that are not referenced at all ''', 'unused', 'Cisco-IOS-XR-policy-repository-oper', False), ], 'Cisco-IOS-XR-policy-repository-oper', 'community', _yang_ns._namespaces['Cisco-IOS-XR-policy-repository-oper'], 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_policy_repository_oper' ), }, 'RoutingPolicy.Sets.AsPath.Sets.Set.UsedBy.Reference' : { 'meta_info' : _MetaInfoClass('RoutingPolicy.S
codeparrot/github-code-clean
import bz2 from collections import Counter from contextlib import contextmanager from datetime import datetime from functools import wraps import gzip import http.client import os import re from shutil import rmtree import string import tempfile import traceback from typing import Union, cast import warnings import zipfile import numpy as np from numpy.random import rand, randn from pandas._config.localization import ( # noqa:F401 can_set_locale, get_locales, set_locale, ) import pandas._libs.testing as _testing from pandas.compat import _get_lzma_file, _import_lzma, raise_with_traceback from pandas.core.dtypes.common import ( is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_interval_dtype, is_list_like, is_number, is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent import pandas as pd from pandas import ( Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index, IntervalIndex, MultiIndex, RangeIndex, Series, bdate_range, ) from pandas.core.algorithms import take_1d from pandas.core.arrays import ( DatetimeArray, ExtensionArray, IntervalArray, PeriodArray, TimedeltaArray, period_array, ) from pandas.io.common import urlopen from pandas.io.formats.printing import pprint_thing lzma = _import_lzma() N = 30 K = 4 _RAISE_NETWORK_ERROR_DEFAULT = False # set testing_mode _testing_mode_warnings = (DeprecationWarning, ResourceWarning) def set_testing_mode(): # set the testing mode filters testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None") if "deprecate" in testing_mode: warnings.simplefilter("always", _testing_mode_warnings) def reset_testing_mode(): # reset the testing mode filters testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None") if "deprecate" in testing_mode: warnings.simplefilter("ignore", _testing_mode_warnings) set_testing_mode() def reset_display_options(): """ Reset the display options for printing and representing objects. """ pd.reset_option("^display.", silent=True) def round_trip_pickle(obj, path=None): """ Pickle an object and then read it again. Parameters ---------- obj : pandas object The object to pickle and then re-read. path : str, default None The path where the pickled object is written and then read. Returns ------- round_trip_pickled_object : pandas object The original object that was pickled and then re-read. """ if path is None: path = "__{random_bytes}__.pickle".format(random_bytes=rands(10)) with ensure_clean(path) as path: pd.to_pickle(obj, path) return pd.read_pickle(path) def round_trip_pathlib(writer, reader, path=None): """ Write an object to file specified by a pathlib.Path and read it back Parameters ---------- writer : callable bound to pandas object IO writing function (e.g. DataFrame.to_csv ) reader : callable IO reading function (e.g. pd.read_csv ) path : str, default None The path where the object is written and then read. Returns ------- round_trip_object : pandas object The original object that was serialized and then re-read. """ import pytest Path = pytest.importorskip("pathlib").Path if path is None: path = "___pathlib___" with ensure_clean(path) as path: writer(Path(path)) obj = reader(Path(path)) return obj def round_trip_localpath(writer, reader, path=None): """ Write an object to file specified by a py.path LocalPath and read it back Parameters ---------- writer : callable bound to pandas object IO writing function (e.g. DataFrame.to_csv ) reader : callable IO reading function (e.g. pd.read_csv ) path : str, default None The path where the object is written and then read. Returns ------- round_trip_object : pandas object The original object that was serialized and then re-read. """ import pytest LocalPath = pytest.importorskip("py.path").local if path is None: path = "___localpath___" with ensure_clean(path) as path: writer(LocalPath(path)) obj = reader(LocalPath(path)) return obj @contextmanager def decompress_file(path, compression): """ Open a compressed file and return a file object Parameters ---------- path : str The path where the file is read from compression : {'gzip', 'bz2', 'zip', 'xz', None} Name of the decompression to use Returns ------- f : file object """ if compression is None: f = open(path, "rb") elif compression == "gzip": f = gzip.open(path, "rb") elif compression == "bz2": f = bz2.BZ2File(path, "rb") elif compression == "xz": f = _get_lzma_file(lzma)(path, "rb") elif compression == "zip": zip_file = zipfile.ZipFile(path) zip_names = zip_file.namelist() if len(zip_names) == 1: f = zip_file.open(zip_names.pop()) else: raise ValueError("ZIP file {} error. Only one file per ZIP.".format(path)) else: msg = "Unrecognized compression type: {}".format(compression) raise ValueError(msg) try: yield f finally: f.close() if compression == "zip": zip_file.close() def write_to_compressed(compression, path, data, dest="test"): """ Write data to a compressed file. Parameters ---------- compression : {'gzip', 'bz2', 'zip', 'xz'} The compression type to use. path : str The file path to write the data. data : str The data to write. dest : str, default "test" The destination file (for ZIP only) Raises ------ ValueError : An invalid compression value was passed in. """ if compression == "zip": import zipfile compress_method = zipfile.ZipFile elif compression == "gzip": import gzip compress_method = gzip.GzipFile elif compression == "bz2": import bz2 compress_method = bz2.BZ2File elif compression == "xz": compress_method = _get_lzma_file(lzma) else: msg = "Unrecognized compression type: {}".format(compression) raise ValueError(msg) if compression == "zip": mode = "w" args = (dest, data) method = "writestr" else: mode = "wb" args = (data,) method = "write" with compress_method(path, mode=mode) as f: getattr(f, method)(*args) def assert_almost_equal( left, right, check_dtype="equiv", check_less_precise=False, **kwargs ): """ Check that the left and right objects are approximately equal. By approximately equal, we refer to objects that are numbers or that contain numbers which may be equivalent to specific levels of precision. Parameters ---------- left : object right : object check_dtype : bool / string {'equiv'}, default 'equiv' Check dtype if both a and b are the same type. If 'equiv' is passed in, then `RangeIndex` and `Int64Index` are also considered equivalent when doing type checking. check_less_precise : bool or int, default False Specify comparison precision. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the number of digits to compare. When comparing two numbers, if the first number has magnitude less than 1e-5, we compare the two numbers directly and check whether they are equivalent within the specified precision. Otherwise, we compare the **ratio** of the second number to the first number and check whether it is equivalent to 1 within the specified precision. """ if isinstance(left, pd.Index): assert_index_equal( left, right, check_exact=False, exact=check_dtype, check_less_precise=check_less_precise, **kwargs ) elif isinstance(left, pd.Series): assert_series_equal( left, right, check_exact=False, check_dtype=check_dtype, check_less_precise=check_less_precise, **kwargs ) elif isinstance(left, pd.DataFrame): assert_frame_equal( left, right, check_exact=False, check_dtype=check_dtype, check_less_precise=check_less_precise, **kwargs ) else: # Other sequences. if check_dtype: if is_number(left) and is_number(right): # Do not compare numeric classes, like np.float64 and float. pass elif is_bool(left) and is_bool(right): # Do not compare bool classes, like np.bool_ and bool. pass else: if isinstance(left, np.ndarray) or isinstance(right, np.ndarray): obj = "numpy array" else: obj = "Input" assert_class_equal(left, right, obj=obj) _testing.assert_almost_equal( left, right, check_dtype=check_dtype, check_less_precise=check_less_precise, **kwargs ) def _check_isinstance(left, right, cls): """ Helper method for our assert_* methods that ensures that the two objects being compared have the right type before proceeding with the comparison. Parameters ---------- left : The first object being compared. right : The second object being compared. cls : The class type to check against. Raises ------ AssertionError : Either `left` or `right` is not an instance of `cls`. """ err_msg = "{name} Expected type {exp_type}, found {act_type} instead" cls_name = cls.__name__ if not isinstance(left, cls): raise AssertionError( err_msg.format(name=cls_name, exp_type=cls, act_type=type(left)) ) if not isinstance(right, cls): raise AssertionError( err_msg.format(name=cls_name, exp_type=cls, act_type=type(right)) ) def assert_dict_equal(left, right, compare_keys=True): _check_isinstance(left, right, dict) _testing.assert_dict_equal(left, right, compare_keys=compare_keys) def randbool(size=(), p=0.5): return rand(*size) <= p RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1)) RANDU_CHARS = np.array( list("".join(map(chr, range(1488, 1488 + 26))) + string.digits), dtype=(np.unicode_, 1), ) def rands_array(nchars, size, dtype="O"): """Generate an array of byte strings.""" retval = ( np.random.choice(RANDS_CHARS, size=nchars * np.prod(size)) .view((np.str_, nchars)) .reshape(size) ) if dtype is None: return retval else: return retval.astype(dtype) def randu_array(nchars, size, dtype="O"): """Generate an array of unicode strings.""" retval = ( np.random.choice(RANDU_CHARS, size=nchars * np.prod(size)) .view((np.unicode_, nchars)) .reshape(size) ) if dtype is None: return retval else: return retval.astype(dtype) def rands(nchars): """ Generate one random byte string. See `rands_array` if you want to create an array of random strings. """ return "".join(np.random.choice(RANDS_CHARS, nchars)) def randu(nchars): """ Generate one random unicode string. See `randu_array` if you want to create an array of random unicode strings. """ return "".join(np.random.choice(RANDU_CHARS, nchars)) def close(fignum=None): from matplotlib.pyplot import get_fignums, close as _close if fignum is None: for fignum in get_fignums(): _close(fignum) else: _close(fignum) # ----------------------------------------------------------------------------- # contextmanager to ensure the file cleanup @contextmanager def ensure_clean(filename=None, return_filelike=False): """Gets a temporary path and agrees to remove on close. Parameters ---------- filename : str (optional) if None, creates a temporary file which is then removed when out of scope. if passed, creates temporary file with filename as ending. return_filelike : bool (default False) if True, returns a file-like which is *always* cleaned. Necessary for savefig and other functions which want to append extensions. """ filename = filename or "" fd = None if return_filelike: f = tempfile.TemporaryFile(suffix=filename) try: yield f finally: f.close() else: # don't generate tempfile if using a path with directory specified if len(os.path.dirname(filename)): raise ValueError("Can't pass a qualified name to ensure_clean()") try: fd, filename = tempfile.mkstemp(suffix=filename) except UnicodeEncodeError: import pytest pytest.skip("no unicode file names on this system") try: yield filename finally: try: os.close(fd) except Exception: print( "Couldn't close file descriptor: {fdesc} (file: {fname})".format( fdesc=fd, fname=filename ) ) try: if os.path.exists(filename): os.remove(filename) except Exception as e: print("Exception on removing file: {error}".format(error=e)) @contextmanager def ensure_clean_dir(): """ Get a temporary directory path and agrees to remove on close. Yields ------ Temporary directory path """ directory_name = tempfile.mkdtemp(suffix="") try: yield directory_name finally: try: rmtree(directory_name) except Exception: pass @contextmanager def ensure_safe_environment_variables(): """ Get a context manager to safely set environment variables All changes will be undone on close, hence environment variables set within this contextmanager will neither persist nor change global state. """ saved_environ = dict(os.environ) try: yield finally: os.environ.clear() os.environ.update(saved_environ) # ----------------------------------------------------------------------------- # Comparators def equalContents(arr1, arr2): """Checks if the set of unique elements of arr1 and arr2 are equivalent. """ return frozenset(arr1) == frozenset(arr2) def assert_index_equal( left: Index, right: Index, exact: Union[bool, str] = "equiv", check_names: bool = True, check_less_precise: Union[bool, int] = False, check_exact: bool = True, check_categorical: bool = True, obj: str = "Index", ) -> None: """Check that left and right Index are equal. Parameters ---------- left : Index right : Index exact : bool / string {'equiv'}, default 'equiv' Whether to check the Index class, dtype and inferred_type are identical. If 'equiv', then RangeIndex can be substituted for Int64Index as well. check_names : bool, default True Whether to check the names attribute. check_less_precise : bool or int, default False Specify comparison precision. Only used when check_exact is False. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the digits to compare check_exact : bool, default True Whether to compare number exactly. check_categorical : bool, default True Whether to compare internal Categorical exactly. obj : str, default 'Index' Specify object name being compared, internally used to show appropriate assertion message """ __tracebackhide__ = True def _check_types(l, r, obj="Index"): if exact: assert_class_equal(l, r, exact=exact, obj=obj) # Skip exact dtype checking when `check_categorical` is False if check_categorical: assert_attr_equal("dtype", l, r, obj=obj) # allow string-like to have different inferred_types if l.inferred_type in ("string", "unicode"): assert r.inferred_type in ("string", "unicode") else: assert_attr_equal("inferred_type", l, r, obj=obj) def _get_ilevel_values(index, level): # accept level number only unique = index.levels[level] labels = index.codes[level] filled = take_1d(unique.values, labels, fill_value=unique._na_value) values = unique._shallow_copy(filled, name=index.names[level]) return values # instance validation _check_isinstance(left, right, Index) # class / dtype comparison _check_types(left, right, obj=obj) # level comparison if left.nlevels != right.nlevels: msg1 = "{obj} levels are different".format(obj=obj) msg2 = "{nlevels}, {left}".format(nlevels=left.nlevels, left=left) msg3 = "{nlevels}, {right}".format(nlevels=right.nlevels, right=right) raise_assert_detail(obj, msg1, msg2, msg3) # length comparison if len(left) != len(right): msg1 = "{obj} length are different".format(obj=obj) msg2 = "{length}, {left}".format(length=len(left), left=left) msg3 = "{length}, {right}".format(length=len(right), right=right) raise_assert_detail(obj, msg1, msg2, msg3) # MultiIndex special comparison for little-friendly error messages if left.nlevels > 1: left = cast(MultiIndex, left) right = cast(MultiIndex, right) for level in range(left.nlevels): # cannot use get_level_values here because it can change dtype llevel = _get_ilevel_values(left, level) rlevel = _get_ilevel_values(right, level) lobj = "MultiIndex level [{level}]".format(level=level) assert_index_equal( llevel, rlevel, exact=exact, check_names=check_names, check_less_precise=check_less_precise, check_exact=check_exact, obj=lobj, ) # get_level_values may change dtype _check_types(left.levels[level], right.levels[level], obj=obj) # skip exact index checking when `check_categorical` is False if check_exact and check_categorical: if not left.equals(right): diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left) msg = "{obj} values are different ({pct} %)".format( obj=obj, pct=np.round(diff, 5) ) raise_assert_detail(obj, msg, left, right) else: _testing.assert_almost_equal( left.values, right.values, check_less_precise=check_less_precise, check_dtype=exact, obj=obj, lobj=left, robj=right, ) # metadata comparison if check_names: assert_attr_equal("names", left, right, obj=obj) if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex): assert_attr_equal("freq", left, right, obj=obj) if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex): assert_interval_array_equal(left.values, right.values) if check_categorical: if is_categorical_dtype(left) or is_categorical_dtype(right): assert_categorical_equal( left.values, right.values, obj="{obj} category".format(obj=obj) ) def assert_class_equal(left, right, exact=True, obj="Input"): """checks classes are equal.""" __tracebackhide__ = True def repr_class(x): if isinstance(x, Index): # return Index as it is to include values in the error message return x try: return x.__class__.__name__ except AttributeError: return repr(type(x)) if exact == "equiv": if type(left) != type(right): # allow equivalence of Int64Index/RangeIndex types = {type(left).__name__, type(right).__name__} if len(types - {"Int64Index", "RangeIndex"}): msg = "{obj} classes are not equivalent".format(obj=obj) raise_assert_detail(obj, msg, repr_class(left), repr_class(right)) elif exact: if type(left) != type(right): msg = "{obj} classes are different".format(obj=obj) raise_assert_detail(obj, msg, repr_class(left), repr_class(right)) def assert_attr_equal(attr, left, right, obj="Attributes"): """checks attributes are equal. Both objects must have attribute. Parameters ---------- attr : str Attribute name being compared. left : object right : object obj : str, default 'Attributes' Specify object name being compared, internally used to show appropriate assertion message """ __tracebackhide__ = True left_attr = getattr(left, attr) right_attr = getattr(right, attr) if left_attr is right_attr: return True elif ( is_number(left_attr) and np.isnan(left_attr) and is_number(right_attr) and np.isnan(right_attr) ): # np.nan return True try: result = left_attr == right_attr except TypeError: # datetimetz on rhs may raise TypeError result = False if not isinstance(result, bool): result = result.all() if result: return True else: msg = 'Attribute "{attr}" are different'.format(attr=attr) raise_assert_detail(obj, msg, left_attr, right_attr) def assert_is_valid_plot_return_object(objs): import matplotlib.pyplot as plt if isinstance(objs, (pd.Series, np.ndarray)): for el in objs.ravel(): msg = ( "one of 'objs' is not a matplotlib Axes instance, type " "encountered {name!r}" ).format(name=el.__class__.__name__) assert isinstance(el, (plt.Axes, dict)), msg else: assert isinstance(objs, (plt.Artist, tuple, dict)), ( "objs is neither an ndarray of Artist instances nor a " 'single Artist instance, tuple, or dict, "objs" is a {name!r}'.format( name=objs.__class__.__name__ ) ) def isiterable(obj): return hasattr(obj, "__iter__") def assert_is_sorted(seq): """Assert that the sequence is sorted.""" if isinstance(seq, (Index, Series)): seq = seq.values # sorting does not change precisions assert_numpy_array_equal(seq, np.sort(np.array(seq))) def assert_categorical_equal( left, right, check_dtype=True, check_category_order=True, obj="Categorical" ): """Test that Categoricals are equivalent. Parameters ---------- left : Categorical right : Categorical check_dtype : bool, default True Check that integer dtype of the codes are the same check_category_order : bool, default True Whether the order of the categories should be compared, which implies identical integer codes. If False, only the resulting values are compared. The ordered attribute is checked regardless. obj : str, default 'Categorical' Specify object name being compared, internally used to show appropriate assertion message """ _check_isinstance(left, right, Categorical) if check_category_order: assert_index_equal( left.categories, right.categories, obj="{obj}.categories".format(obj=obj) ) assert_numpy_array_equal( left.codes, right.codes, check_dtype=check_dtype, obj="{obj}.codes".format(obj=obj), ) else: assert_index_equal( left.categories.sort_values(), right.categories.sort_values(), obj="{obj}.categories".format(obj=obj), ) assert_index_equal( left.categories.take(left.codes), right.categories.take(right.codes), obj="{obj}.values".format(obj=obj), ) assert_attr_equal("ordered", left, right, obj=obj) def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"): """Test that two IntervalArrays are equivalent. Parameters ---------- left, right : IntervalArray The IntervalArrays to compare. exact : bool / string {'equiv'}, default 'equiv' Whether to check the Index class, dtype and inferred_type are identical. If 'equiv', then RangeIndex can be substituted for Int64Index as well. obj : str, default 'IntervalArray' Specify object name being compared, internally used to show appropriate assertion message """ _check_isinstance(left, right, IntervalArray) assert_index_equal( left.left, right.left, exact=exact, obj="{obj}.left".format(obj=obj) ) assert_index_equal( left.right, right.right, exact=exact, obj="{obj}.left".format(obj=obj) ) assert_attr_equal("closed", left, right, obj=obj) def assert_period_array_equal(left, right, obj="PeriodArray"): _check_isinstance(left, right, PeriodArray) assert_numpy_array_equal( left._data, right._data, obj="{obj}.values".format(obj=obj) ) assert_attr_equal("freq", left, right, obj=obj) def assert_datetime_array_equal(left, right, obj="DatetimeArray"): __tracebackhide__ = True _check_isinstance(left, right, DatetimeArray) assert_numpy_array_equal(left._data, right._data, obj="{obj}._data".format(obj=obj)) assert_attr_equal("freq", left, right, obj=obj) assert_attr_equal("tz", left, right, obj=obj) def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"): __tracebackhide__ = True _check_isinstance(left, right, TimedeltaArray) assert_numpy_array_equal(left._data, right._data, obj="{obj}._data".format(obj=obj)) assert_attr_equal("freq", left, right, obj=obj) def raise_assert_detail(obj, message, left, right, diff=None): __tracebackhide__ = True if isinstance(left, np.ndarray): left = pprint_thing(left) elif is_categorical_dtype(left): left = repr(left) if isinstance(right, np.ndarray): right = pprint_thing(right) elif is_categorical_dtype(right): right = repr(right) msg = """{obj} are different {message} [left]: {left} [right]: {right}""".format( obj=obj, message=message, left=left, right=right ) if diff is not None: msg += "\n[diff]: {diff}".format(diff=diff) raise AssertionError(msg) def assert_numpy_array_equal( left, right, strict_nan=False, check_dtype=True, err_msg=None, check_same=None, obj="numpy array", ): """ Checks that 'np.ndarray' is equivalent Parameters ---------- left : np.ndarray or iterable right : np.ndarray or iterable strict_nan : bool, default False If True, consider NaN and None to be different. check_dtype: bool, default True check dtype if both a and b are np.ndarray err_msg : str, default None If provided, used as assertion message check_same : None|'copy'|'same', default None Ensure left and right refer/do not refer to the same memory area obj : str, default 'numpy array' Specify object name being compared, internally used to show appropriate assertion message """ __tracebackhide__ = True # instance validation # Show a detailed error message when classes are different assert_class_equal(left, right, obj=obj) # both classes must be an np.ndarray _check_isinstance(left, right, np.ndarray) def _get_base(obj): return obj.base if getattr(obj, "base", None) is not None else obj left_base = _get_base(left) right_base = _get_base(right) if check_same == "same": if left_base is not right_base: msg = "{left!r} is not {right!r}".format(left=left_base, right=right_base) raise AssertionError(msg) elif check_same == "copy": if left_base is right_base: msg = "{left!r} is {right!r}".format(left=left_base, right=right_base) raise AssertionError(msg) def _raise(left, right, err_msg): if err_msg is None: if left.shape != right.shape: raise_assert_detail( obj, "{obj} shapes are different".format(obj=obj), left.shape, right.shape, ) diff = 0 for l, r in zip(left, right): # count up differences if not array_equivalent(l, r, strict_nan=strict_nan): diff += 1 diff = diff * 100.0 / left.size msg = "{obj} values are different ({pct} %)".format( obj=obj, pct=np.round(diff, 5) ) raise_assert_detail(obj, msg, left, right) raise AssertionError(err_msg) # compare shape and values if not array_equivalent(left, right, strict_nan=strict_nan): _raise(left, right, err_msg) if check_dtype: if isinstance(left, np.ndarray) and isinstance(right, np.ndarray): assert_attr_equal("dtype", left, right, obj=obj) def assert_extension_array_equal( left, right, check_dtype=True, check_less_precise=False, check_exact=False ): """Check that left and right ExtensionArrays are equal. Parameters ---------- left, right : ExtensionArray The two arrays to compare check_dtype : bool, default True Whether to check if the ExtensionArray dtypes are identical. check_less_precise : bool or int, default False Specify comparison precision. Only used when check_exact is False. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the digits to compare. check_exact : bool, default False Whether to compare number exactly. Notes ----- Missing values are checked separately from valid values. A mask of missing values is computed for each and checked to match. The remaining all-valid values are cast to object dtype and checked. """ assert isinstance(left, ExtensionArray), "left is not an ExtensionArray" assert isinstance(right, ExtensionArray), "right is not an ExtensionArray" if check_dtype: assert_attr_equal("dtype", left, right, obj="ExtensionArray") if hasattr(left, "asi8") and type(right) == type(left): # Avoid slow object-dtype comparisons assert_numpy_array_equal(left.asi8, right.asi8) return left_na = np.asarray(left.isna()) right_na = np.asarray(right.isna()) assert_numpy_array_equal(left_na, right_na, obj="ExtensionArray NA mask") left_valid = np.asarray(left[~left_na].astype(object)) right_valid = np.asarray(right[~right_na].astype(object)) if check_exact: assert_numpy_array_equal(left_valid, right_valid, obj="ExtensionArray") else: _testing.assert_almost_equal( left_valid, right_valid, check_dtype=check_dtype, check_less_precise=check_less_precise, obj="ExtensionArray", ) # This could be refactored to use the NDFrame.equals method def assert_series_equal( left, right, check_dtype=True, check_index_type="equiv", check_series_type=True, check_less_precise=False, check_names=True, check_exact=False, check_datetimelike_compat=False, check_categorical=True, obj="Series", ): """Check that left and right Series are equal. Parameters ---------- left : Series right : Series check_dtype : bool, default True Whether to check the Series dtype is identical. check_index_type : bool / string {'equiv'}, default 'equiv' Whether to check the Index class, dtype and inferred_type are identical. check_series_type : bool, default True Whether to check the Series class is identical. check_less_precise : bool or int, default False Specify comparison precision. Only used when check_exact is False. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the digits to compare. When comparing two numbers, if the first number has magnitude less than 1e-5, we compare the two numbers directly and check whether they are equivalent within the specified precision. Otherwise, we compare the **ratio** of the second number to the first number and check whether it is equivalent to 1 within the specified precision. check_names : bool, default True Whether to check the Series and Index names attribute. check_exact : bool, default False Whether to compare number exactly. check_datetimelike_compat : bool, default False Compare datetime-like which is comparable ignoring dtype. check_categorical : bool, default True Whether to compare internal Categorical exactly. obj : str, default 'Series' Specify object name being compared, internally used to show appropriate assertion message. """ __tracebackhide__ = True # instance validation _check_isinstance(left, right, Series) if check_series_type: # ToDo: There are some tests using rhs is sparse # lhs is dense. Should use assert_class_equal in future assert isinstance(left, type(right)) # assert_class_equal(left, right, obj=obj) # length comparison if len(left) != len(right): msg1 = "{len}, {left}".format(len=len(left), left=left.index) msg2 = "{len}, {right}".format(len=len(right), right=right.index) raise_assert_detail(obj, "Series length are different", msg1, msg2) # index comparison assert_index_equal( left.index, right.index, exact=check_index_type, check_names=check_names, check_less_precise=check_less_precise, check_exact=check_exact, check_categorical=check_categorical, obj="{obj}.index".format(obj=obj), ) if check_dtype: # We want to skip exact dtype checking when `check_categorical` # is False. We'll still raise if only one is a `Categorical`, # regardless of `check_categorical` if ( is_categorical_dtype(left) and is_categorical_dtype(right) and not check_categorical ): pass else: assert_attr_equal("dtype", left, right) if check_exact: assert_numpy_array_equal( left._internal_get_values(), right._internal_get_values(), check_dtype=check_dtype, obj="{obj}".format(obj=obj), ) elif check_datetimelike_compat: # we want to check only if we have compat dtypes # e.g. integer and M|m are NOT compat, but we can simply check # the values in that case if needs_i8_conversion(left) or needs_i8_conversion(right): # datetimelike may have different objects (e.g. datetime.datetime # vs Timestamp) but will compare equal if not Index(left.values).equals(Index(right.values)): msg = ( "[datetimelike_compat=True] {left} is not equal to " "{right}." ).format(left=left.values, right=right.values) raise AssertionError(msg) else: assert_numpy_array_equal( left._internal_get_values(), right._internal_get_values(), check_dtype=check_dtype, ) elif is_interval_dtype(left) or is_interval_dtype(right): assert_interval_array_equal(left.array, right.array) elif is_extension_array_dtype(left.dtype) and is_datetime64tz_dtype(left.dtype): # .values is an ndarray, but ._values is the ExtensionArray. # TODO: Use .array assert is_extension_array_dtype(right.dtype) assert_extension_array_equal(left._values, right._values) elif ( is_extension_array_dtype(left) and not is_categorical_dtype(left) and is_extension_array_dtype(right) and not is_categorical_dtype(right) ): assert_extension_array_equal(left.array, right.array) else: _testing.assert_almost_equal( left._internal_get_values(), right._internal_get_values(), check_less_precise=check_less_precise, check_dtype=check_dtype, obj="{obj}".format(obj=obj), ) # metadata comparison if check_names: assert_attr_equal("name", left, right, obj=obj) if check_categorical: if is_categorical_dtype(left) or is_categorical_dtype(right): assert_categorical_equal( left.values, right.values, obj="{obj} category".format(obj=obj) ) # This could be refactored to use the NDFrame.equals method def assert_frame_equal( left, right, check_dtype=True, check_index_type="equiv", check_column_type="equiv", check_frame_type=True, check_less_precise=False, check_names=True, by_blocks=False, check_exact=False, check_datetimelike_compat=False, check_categorical=True, check_like=False, obj="DataFrame", ): """ Check that left and right DataFrame are equal. This function is intended to compare two DataFrames and output any differences. Is is mostly intended for use in unit tests. Additional parameters allow varying the strictness of the equality checks performed. Parameters ---------- left : DataFrame First DataFrame to compare. right : DataFrame Second DataFrame to compare. check_dtype : bool, default True Whether to check the DataFrame dtype is identical. check_index_type : bool / string {'equiv'}, default 'equiv' Whether to check the Index class, dtype and inferred_type are identical. check_column_type : bool / string {'equiv'}, default 'equiv' Whether to check the columns class, dtype and inferred_type are identical. Is passed as the ``exact`` argument of :func:`assert_index_equal`. check_frame_type : bool, default True Whether to check the DataFrame class is identical. check_less_precise : bool or int, default False Specify comparison precision. Only used when check_exact is False. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the digits to compare. When comparing two numbers, if the first number has magnitude less than 1e-5, we compare the two numbers directly and check whether they are equivalent within the specified precision. Otherwise, we compare the **ratio** of the second number to the first number and check whether it is equivalent to 1 within the specified precision. check_names : bool, default True Whether to check that the `names` attribute for both the `index` and `column` attributes of the DataFrame is identical, i.e. * left.index.names == right.index.names * left.columns.names == right.columns.names by_blocks : bool, default False Specify how to compare internal data. If False, compare by columns. If True, compare by blocks. check_exact : bool, default False Whether to compare number exactly. check_datetimelike_compat : bool, default False Compare datetime-like which is comparable ignoring dtype. check_categorical : bool, default True Whether to compare internal Categorical exactly. check_like : bool, default False If True, ignore the order of index & columns. Note: index labels must match their respective rows (same as in columns) - same labels must be with the same data. obj : str, default 'DataFrame' Specify object name being compared, internally used to show appropriate assertion message. See Also -------- assert_series_equal : Equivalent method for asserting Series equality. DataFrame.equals : Check DataFrame equality. Examples -------- This example shows comparing two DataFrames that are equal but with columns of differing dtypes. >>> from pandas.util.testing import assert_frame_equal >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]}) df1 equals itself. >>> assert_frame_equal(df1, df1) df1 differs from df2 as column 'b' is of a different type. >>> assert_frame_equal(df1, df2) Traceback (most recent call last): AssertionError: Attributes are different ... Attribute "dtype" are different [left]: int64 [right]: float64 Ignore differing dtypes in columns with check_dtype. >>> assert_frame_equal(df1, df2, check_dtype=False) """ __tracebackhide__ = True # instance validation _check_isinstance(left, right, DataFrame) if check_frame_type: # ToDo: There are some tests using rhs is SparseDataFrame # lhs is DataFrame. Should use assert_class_equal in future assert isinstance(left, type(right)) # assert_class_equal(left, right, obj=obj) # shape comparison if left.shape != right.shape: raise_assert_detail( obj, "{obj} shape mismatch".format(obj=obj), "{shape!r}".format(shape=left.shape), "{shape!r}".format(shape=right.shape), ) if check_like: left, right = left.reindex_like(right), right # index comparison assert_index_equal( left.index, right.index, exact=check_index_type, check_names=check_names, check_less_precise=check_less_precise, check_exact=check_exact, check_categorical=check_categorical, obj="{obj}.index".format(obj=obj), ) # column comparison assert_index_equal( left.columns, right.columns, exact=check_column_type, check_names=check_names, check_less_precise=check_less_precise, check_exact=check_exact, check_categorical=check_categorical, obj="{obj}.columns".format(obj=obj), ) # compare by blocks if by_blocks: rblocks = right._to_dict_of_blocks() lblocks = left._to_dict_of_blocks() for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))): assert dtype in lblocks assert dtype in rblocks assert_frame_equal( lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj ) # compare by columns else: for i, col in enumerate(left.columns): assert col in right lcol = left.iloc[:, i] rcol = right.iloc[:, i] assert_series_equal( lcol, rcol, check_dtype=check_dtype, check_index_type=check_index_type, check_less_precise=check_less_precise, check_exact=check_exact, check_names=check_names, check_datetimelike_compat=check_datetimelike_compat, check_categorical=check_categorical, obj="{obj}.iloc[:, {idx}]".format(obj=obj, idx=i), ) def assert_equal(left, right, **kwargs): """ Wrapper for tm.assert_*_equal to dispatch to the appropriate test function. Parameters ---------- left : Index, Series, DataFrame, ExtensionArray, or np.ndarray right : Index, Series, DataFrame, ExtensionArray, or np.ndarray **kwargs """ __tracebackhide__ = True if isinstance(left, pd.Index): assert_index_equal(left, right, **kwargs) elif isinstance(left, pd.Series): assert_series_equal(left, right, **kwargs) elif isinstance(left, pd.DataFrame): assert_frame_equal(left, right, **kwargs) elif isinstance(left, IntervalArray): assert_interval_array_equal(left, right, **kwargs) elif isinstance(left, PeriodArray): assert_period_array_equal(left, right, **kwargs) elif isinstance(left, DatetimeArray): assert_datetime_array_equal(left, right, **kwargs) elif isinstance(left, TimedeltaArray): assert_timedelta_array_equal(left, right, **kwargs) elif isinstance(left, ExtensionArray): assert_extension_array_equal(left, right, **kwargs) elif isinstance(left, np.ndarray): assert_numpy_array_equal(left, right, **kwargs) else: raise NotImplementedError(type(left)) def box_expected(expected, box_cls, transpose=True): """ Helper function to wrap the expected output of a test in a given box_class. Parameters ---------- expected : np.ndarray, Index, Series box_cls : {Index, Series, DataFrame} Returns ------- subclass of box_cls """ if box_cls is pd.Index: expected = pd.Index(expected) elif box_cls is pd.Series: expected = pd.Series(expected) elif box_cls is pd.DataFrame: expected = pd.Series(expected).to_frame() if transpose: # for vector operations, we we need a DataFrame to be a single-row, # not a single-column, in order to operate against non-DataFrame # vectors of the same length. expected = expected.T elif box_cls is PeriodArray: # the PeriodArray constructor is not as flexible as period_array expected = period_array(expected) elif box_cls is DatetimeArray: expected = DatetimeArray(expected) elif box_cls is TimedeltaArray: expected = TimedeltaArray(expected) elif box_cls is np.ndarray: expected = np.array(expected) elif box_cls is to_array: expected = to_array(expected) else: raise NotImplementedError(box_cls) return expected def to_array(obj): # temporary implementation until we get pd.array in place if is_period_dtype(obj): return period_array(obj) elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj): return DatetimeArray._from_sequence(obj) elif is_timedelta64_dtype(obj): return TimedeltaArray._from_sequence(obj) else: return np.array(obj) # ----------------------------------------------------------------------------- # Sparse def assert_sp_array_equal( left, right, check_dtype=True, check_kind=True, check_fill_value=True, consolidate_block_indices=False, ): """Check that the left and right SparseArray are equal. Parameters ---------- left : SparseArray right : SparseArray check_dtype : bool, default True Whether to check the data dtype is identical. check_kind : bool, default True Whether to just the kind of the sparse index for each column. check_fill_value : bool, default True Whether to check that left.fill_value matches right.fill_value consolidate_block_indices : bool, default False Whether to consolidate contiguous blocks for sparse arrays with a BlockIndex. Some operations, e.g. concat, will end up with block indices that could be consolidated. Setting this to true will create a new BlockIndex for that array, with consolidated block indices. """ _check_isinstance(left, right, pd.SparseArray) assert_numpy_array_equal(left.sp_values, right.sp_values, check_dtype=check_dtype) # SparseIndex comparison assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex) assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex) if not check_kind: left_index = left.sp_index.to_block_index() right_index = right.sp_index.to_block_index() else: left_index = left.sp_index right_index = right.sp_index if consolidate_block_indices and left.kind == "block": # we'll probably remove this hack... left_index = left_index.to_int_index().to_block_index() right_index = right_index.to_int_index().to_block_index() if not left_index.equals(right_index): raise_assert_detail( "SparseArray.index", "index are not equal", left_index, right_index ) else: # Just ensure a pass if check_fill_value: assert_attr_equal("fill_value", left, right) if check_dtype: assert_attr_equal("dtype", left, right) assert_numpy_array_equal(left.to_dense(), right.to_dense(), check_dtype=check_dtype) def assert_sp_series_equal( left, right, check_dtype=True, exact_indices=True, check_series_type=True, check_names=True, check_kind=True, check_fill_value=True, consolidate_block_indices=False, obj="SparseSeries", ): """Check that the left and right SparseSeries are equal. Parameters ---------- left : SparseSeries right : SparseSeries check_dtype : bool, default True Whether to check the Series dtype is identical. exact_indices : bool, default True check_series_type : bool, default True Whether to check the SparseSeries class is identical. check_names : bool, default True Whether to check the SparseSeries name attribute. check_kind : bool, default True Whether to just the kind of the sparse index for each column. check_fill_value : bool, default True Whether to check that left.fill_value matches right.fill_value consolidate_block_indices : bool, default False Whether to consolidate contiguous blocks for sparse arrays with a BlockIndex. Some operations, e.g. concat, will end up with block indices that could be consolidated. Setting this to true will create a new BlockIndex for that array, with consolidated block indices. obj : str, default 'SparseSeries' Specify the object name being compared, internally used to show the appropriate assertion message. """ _check_isinstance(left, right, pd.SparseSeries) if check_series_type: assert_class_equal(left, right, obj=obj) assert_index_equal(left.index, right.index, obj="{obj}.index".format(obj=obj)) assert_sp_array_equal( left.values, right.values, check_kind=check_kind, check_fill_value=check_fill_value, consolidate_block_indices=consolidate_block_indices, ) if check_names: assert_attr_equal("name", left, right) if check_dtype: assert_attr_equal("dtype", left, right) assert_numpy_array_equal(np.asarray(left.values), np.asarray(right.values)) def assert_sp_frame_equal( left, right, check_dtype=True, exact_indices=True, check_frame_type=True, check_kind=True, check_fill_value=True, consolidate_block_indices=False, obj="SparseDataFrame", ): """Check that the left and right SparseDataFrame are equal. Parameters ---------- left : SparseDataFrame right : SparseDataFrame check_dtype : bool, default True Whether to check the Series dtype is identical. exact_indices : bool, default True SparseSeries SparseIndex objects must be exactly the same, otherwise just compare dense representations. check_frame_type : bool, default True Whether to check the SparseDataFrame class is identical. check_kind : bool, default True Whether to just the kind of the sparse index for each column. check_fill_value : bool, default True Whether to check that left.fill_value matches right.fill_value consolidate_block_indices : bool, default False Whether to consolidate contiguous blocks for sparse arrays with a BlockIndex. Some operations, e.g. concat, will end up with block indices that could be consolidated. Setting this to true will create a new BlockIndex for that array, with consolidated block indices. obj : str, default 'SparseDataFrame' Specify the object name being compared, internally used to show the appropriate assertion message. """ _check_isinstance(left, right, pd.SparseDataFrame) if check_frame_type: assert_class_equal(left, right, obj=obj) assert_index_equal(left.index, right.index, obj="{obj}.index".format(obj=obj)) assert_index_equal(left.columns, right.columns, obj="{obj}.columns".format(obj=obj)) if check_fill_value: assert_attr_equal("default_fill_value", left, right, obj=obj) for col, series in left.items(): assert col in right # trade-off? if exact_indices: assert_sp_series_equal( series, right[col], check_dtype=check_dtype, check_kind=check_kind, check_fill_value=check_fill_value, consolidate_block_indices=consolidate_block_indices, ) else: assert_series_equal( series.to_dense(), right[col].to_dense(), check_dtype=check_dtype ) # do I care? # assert(left.default_kind == right.default_kind) for col in right: assert col in left # ----------------------------------------------------------------------------- # Others def assert_contains_all(iterable, dic): for k in iterable: assert k in dic, "Did not contain item: '{key!r}'".format(key=k) def assert_copy(iter1, iter2, **eql_kwargs): """ iter1, iter2: iterables that produce elements comparable with assert_almost_equal Checks that the elements are equal, but not the same object. (Does not check that items in sequences are also not the same object) """ for elem1, elem2 in zip(iter1, iter2): assert_almost_equal(elem1, elem2, **eql_kwargs) msg = ( "Expected object {obj1!r} and object {obj2!r} to be " "different objects, but they were the same object." ).format(obj1=type(elem1), obj2=type(elem2)) assert elem1 is not elem2, msg def getCols(k): return string.ascii_uppercase[:k] # make index def makeStringIndex(k=10, name=None): return Index(rands_array(nchars=10, size=k), name=name) def makeUnicodeIndex(k=10, name=None): return Index(randu_array(nchars=10, size=k), name=name) def makeCategoricalIndex(k=10, n=3, name=None, **kwargs): """ make a length k index or n categories """ x = rands_array(nchars=4, size=n) return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs) def makeIntervalIndex(k=10, name=None, **kwargs): """ make a length k IntervalIndex """ x = np.linspace(0, 100, num=(k + 1)) return IntervalIndex.from_breaks(x, name=name, **kwargs) def makeBoolIndex(k=10, name=None): if k == 1: return Index([True], name=name) elif k == 2: return Index([False, True], name=name) return Index([False, True] + [False] * (k - 2), name=name) def makeIntIndex(k=10, name=None): return Index(list(range(k)), name=name) def makeUIntIndex(k=10, name=None): return Index([2 ** 63 + i for i in range(k)], name=name) def makeRangeIndex(k=10, name=None, **kwargs): return RangeIndex(0, k, 1, name=name, **kwargs) def makeFloatIndex(k=10, name=None): values = sorted(np.random.random_sample(k)) - np.random.random_sample(1) return Index(values * (10 ** np.random.randint(0, 9)), name=name) def makeDateIndex(k=10, freq="B", name=None, **kwargs): dt = datetime(2000, 1, 1) dr = bdate_range(dt, periods=k, freq=freq, name=name) return DatetimeIndex(dr, name=name, **kwargs) def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs): return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs) def makePeriodIndex(k=10, name=None, **kwargs): dt = datetime(2000, 1, 1) dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs) return dr def makeMultiIndex(k=10, names=None, **kwargs): return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs) def all_index_generator(k=10): """Generator which can be iterated over to get instances of all the various index classes. Parameters ---------- k: length of each of the index instances """ all_make_index_funcs = [ makeIntIndex, makeFloatIndex, makeStringIndex, makeUnicodeIndex, makeDateIndex, makePeriodIndex, makeTimedeltaIndex, makeBoolIndex, makeRangeIndex, makeIntervalIndex, makeCategoricalIndex, ] for make_index_func in all_make_index_funcs: yield make_index_func(k=k) def index_subclass_makers_generator(): make_index_funcs = [ makeDateIndex, makePeriodIndex, makeTimedeltaIndex, makeRangeIndex, makeIntervalIndex, makeCategoricalIndex, makeMultiIndex, ] for make_index_func in make_index_funcs: yield make_index_func def all_timeseries_index_generator(k=10): """Generator which can be iterated over to get instances of all the classes which represent time-series. Parameters ---------- k: length of each of the index instances """ make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex] for make_index_func in make_index_funcs: yield make_index_func(k=k) # make series def makeFloatSeries(name=None): index = makeStringIndex(N) return Series(randn(N), index=index, name=name) def makeStringSeries(name=None): index = makeStringIndex(N) return Series(randn(N), index=index, name=name) def makeObjectSeries(name=None): dateIndex = makeDateIndex(N) dateIndex = Index(dateIndex, dtype=object) index = makeStringIndex(N) return Series(dateIndex, index=index, name=name) def getSeriesData(): index = makeStringIndex(N) return {c: Series(randn(N), index=index) for c in getCols(K)} def makeTimeSeries(nper=None, freq="B", name=None): if nper is None: nper = N return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name) def makePeriodSeries(nper=None, name=None): if nper is None: nper = N return Series(randn(nper), index=makePeriodIndex(nper), name=name) def getTimeSeriesData(nper=None, freq="B"): return {c: makeTimeSeries(nper, freq) for c in getCols(K)} def getPeriodData(nper=None): return {c: makePeriodSeries(nper) for c in getCols(K)} # make frame def makeTimeDataFrame(nper=None, freq="B"): data = getTimeSeriesData(nper, freq) return DataFrame(data) def makeDataFrame(): data = getSeriesData() return DataFrame(data) def getMixedTypeDict(): index = Index(["a", "b", "c", "d", "e"]) data = { "A": [0.0, 1.0, 2.0, 3.0, 4.0], "B": [0.0, 1.0, 0.0, 1.0, 0.0], "C": ["foo1", "foo2", "foo3", "foo4", "foo5"], "D": bdate_range("1/1/2009", periods=5), } return index, data def makeMixedDataFrame(): return DataFrame(getMixedTypeDict()[1]) def makePeriodFrame(nper=None): data = getPeriodData(nper) return DataFrame(data) def makeCustomIndex( nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None ): """Create an index/multindex with given dimensions, levels, names, etc' nentries - number of entries in index nlevels - number of levels (> 1 produces multindex) prefix - a string prefix for labels names - (Optional), bool or list of strings. if True will use default names, if false will use no names, if a list is given, the name of each level in the index will be taken from the list. ndupe_l - (Optional), list of ints, the number of rows for which the label will repeated at the corresponding level, you can specify just the first few, the rest will use the default ndupe_l of 1. len(ndupe_l) <= nlevels. idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td". If idx_type is not None, `idx_nlevels` must be 1. "i"/"f" creates an integer/float index, "s"/"u" creates a string/unicode index "dt" create a datetime index. "td" create a datetime index. if unspecified, string labels will be generated. """ if ndupe_l is None: ndupe_l = [1] * nlevels assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels assert names is None or names is False or names is True or len(names) is nlevels assert idx_type is None or ( idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1 ) if names is True: # build default names names = [prefix + str(i) for i in range(nlevels)] if names is False: # pass None to index constructor for no name names = None # make singleton case uniform if isinstance(names, str) and nlevels == 1: names = [names] # specific 1D index type requested? idx_func = dict( i=makeIntIndex, f=makeFloatIndex, s=makeStringIndex, u=makeUnicodeIndex, dt=makeDateIndex, td=makeTimedeltaIndex, p=makePeriodIndex, ).get(idx_type) if idx_func: idx = idx_func(nentries) # but we need to fill in the name if names: idx.name = names[0] return idx elif idx_type is not None: raise ValueError( '"{idx_type}" is not a legal value for `idx_type`, ' 'use "i"/"f"/"s"/"u"/"dt/"p"/"td".'.format(idx_type=idx_type) ) if len(ndupe_l) < nlevels: ndupe_l.extend([1] * (nlevels - len(ndupe_l))) assert len(ndupe_l) == nlevels assert all(x > 0 for x in ndupe_l) tuples = [] for i in range(nlevels): def keyfunc(x): import re numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_") return [int(num) for num in numeric_tuple] # build a list of lists to create the index from div_factor = nentries // ndupe_l[i] + 1 cnt = Counter() for j in range(div_factor): label = "{prefix}_l{i}_g{j}".format(prefix=prefix, i=i, j=j) cnt[label] = ndupe_l[i] # cute Counter trick result = list(sorted(cnt.elements(), key=keyfunc))[:nentries] tuples.append(result) tuples = list(zip(*tuples)) # convert tuples to index if nentries == 1: # we have a single level of tuples, i.e. a regular Index index = Index(tuples[0], name=names[0]) elif nlevels == 1: name = None if names is None else names[0] index = Index((x[0] for x in tuples), name=name) else: index = MultiIndex.from_tuples(tuples, names=names) return index def makeCustomDataframe( nrows, ncols, c_idx_names=True, r_idx_names=True, c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None, c_ndupe_l=None, r_ndupe_l=None, dtype=None, c_idx_type=None, r_idx_type=None, ): """ nrows, ncols - number of data rows/cols c_idx_names, idx_names - False/True/list of strings, yields No names , default names or uses the provided names for the levels of the corresponding index. You can provide a single string when c_idx_nlevels ==1. c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex data_gen_f - a function f(row,col) which return the data value at that position, the default generator used yields values of the form "RxCy" based on position. c_ndupe_l, r_ndupe_l - list of integers, determines the number of duplicates for each label at a given level of the corresponding index. The default `None` value produces a multiplicity of 1 across all levels, i.e. a unique index. Will accept a partial list of length N < idx_nlevels, for just the first N levels. If ndupe doesn't divide nrows/ncol, the last label might have lower multiplicity. dtype - passed to the DataFrame constructor as is, in case you wish to have more control in conjunction with a custom `data_gen_f` r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td". If idx_type is not None, `idx_nlevels` must be 1. "i"/"f" creates an integer/float index, "s"/"u" creates a string/unicode index "dt" create a datetime index. "td" create a timedelta index. if unspecified, string labels will be generated. Examples: # 5 row, 3 columns, default names on both, single index on both axis >> makeCustomDataframe(5,3) # make the data a random int between 1 and 100 >> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100)) # 2-level multiindex on rows with each label duplicated # twice on first level, default names on both axis, single # index on both axis >> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2]) # DatetimeIndex on row, index with unicode labels on columns # no names on either axis >> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False, r_idx_type="dt",c_idx_type="u") # 4-level multindex on rows with names provided, 2-level multindex # on columns with default labels and default names. >> a=makeCustomDataframe(5,3,r_idx_nlevels=4, r_idx_names=["FEE","FI","FO","FAM"], c_idx_nlevels=2) >> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4) """ assert c_idx_nlevels > 0 assert r_idx_nlevels > 0 assert r_idx_type is None or ( r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1 ) assert c_idx_type is None or ( c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1 ) columns = makeCustomIndex( ncols, nlevels=c_idx_nlevels, prefix="C", names=c_idx_names, ndupe_l=c_ndupe_l, idx_type=c_idx_type, ) index = makeCustomIndex( nrows, nlevels=r_idx_nlevels, prefix="R", names=r_idx_names, ndupe_l=r_ndupe_l, idx_type=r_idx_type, ) # by default, generate data based on location if data_gen_f is None: data_gen_f = lambda r, c: "R{rows}C{cols}".format(rows=r, cols=c) data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)] return DataFrame(data, index, columns, dtype=dtype) def _create_missing_idx(nrows, ncols, density, random_state=None): if random_state is None: random_state = np.random else: random_state = np.random.RandomState(random_state) # below is cribbed from scipy.sparse size = int(np.round((1 - density) * nrows * ncols)) # generate a few more to ensure unique values min_rows = 5 fac = 1.02 extra_size = min(size + min_rows, fac * size) def _gen_unique_rand(rng, _extra_size): ind = rng.rand(int(_extra_size)) return np.unique(np.floor(ind * nrows * ncols))[:size] ind = _gen_unique_rand(random_state, extra_size) while ind.size < size: extra_size *= 1.05 ind = _gen_unique_rand(random_state, extra_size) j = np.floor(ind * 1.0 / nrows).astype(int) i = (ind - j * nrows).astype(int) return i.tolist(), j.tolist() def makeMissingCustomDataframe( nrows, ncols, density=0.9, random_state=None, c_idx_names=True, r_idx_names=True, c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None, c_ndupe_l=None, r_ndupe_l=None, dtype=None, c_idx_type=None, r_idx_type=None, ): """ Parameters ---------- Density : float, optional Float in (0, 1) that gives the percentage of non-missing numbers in the DataFrame. random_state : {np.random.RandomState, int}, optional Random number generator or random seed. See makeCustomDataframe for descriptions of the rest of the parameters. """ df = makeCustomDataframe( nrows, ncols, c_idx_names=c_idx_names, r_idx_names=r_idx_names, c_idx_nlevels=c_idx_nlevels, r_idx_nlevels=r_idx_nlevels, data_gen_f=data_gen_f, c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l, dtype=dtype, c_idx_type=c_idx_type, r_idx_type=r_idx_type, ) i, j = _create_missing_idx(nrows, ncols, density, random_state) df.values[i, j] = np.nan return df def makeMissingDataframe(density=0.9, random_state=None): df = makeDataFrame() i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state) df.values[i, j] = np.nan return df class TestSubDict(dict): def __init__(self, *args, **kwargs): dict.__init__(self, *args, **kwargs) def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that it is decorating a function, i.e. something like this:: @my_decorator def function(): pass Calls decorator with decorator(f, *args, **kwargs)""" @wraps(decorator) def wrapper(*args, **kwargs): def dec(f): return decorator(f, *args, **kwargs) is_decorating = not kwargs and len(args) == 1 and callable(args[0]) if is_decorating: f = args[0] args = [] return dec(f) else: return dec return wrapper # skip tests on exceptions with this message _network_error_messages = ( # 'urlopen error timed out', # 'timeout: timed out', # 'socket.timeout: timed out', "timed out", "Server Hangup", "HTTP Error 503: Service Unavailable", "502: Proxy Error", "HTTP Error 502: internal error", "HTTP Error 502", "HTTP Error 503", "HTTP Error 403", "HTTP Error 400", "Temporary failure in name resolution", "Name or service not known", "Connection refused", "certificate verify", ) # or this e.errno/e.reason.errno _network_errno_vals = ( 101, # Network is unreachable 111, # Connection refused 110, # Connection timed out 104, # Connection reset Error 54, # Connection reset by peer 60, # urllib.error.URLError: [Errno 60] Connection timed out ) # Both of the above shouldn't mask real issues such as 404's # or refused connections (changed DNS). # But some tests (test_data yahoo) contact incredibly flakey # servers. # and conditionally raise on these exception types _network_error_classes = (IOError, http.client.HTTPException, TimeoutError) def can_connect(url, error_classes=_network_error_classes): """Try to connect to the given url. True if succeeds, False if IOError raised Parameters ---------- url : basestring The URL to try to connect to Returns ------- connectable : bool Return True if no IOError (unable to connect) or URLError (bad url) was raised """ try: with urlopen(url): pass except error_classes: return False else: return True @optional_args def network( t, url="http://www.google.com", raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, check_before_test=False, error_classes=_network_error_classes, skip_errnos=_network_errno_vals, _skip_on_messages=_network_error_messages, ): """ Label a test as requiring network connection and, if an error is encountered, only raise if it does not find a network connection. In comparison to ``network``, this assumes an added contract to your test: you must assert that, under normal conditions, your test will ONLY fail if it does not have network connectivity. You can call this in 3 ways: as a standard decorator, with keyword arguments, or with a positional argument that is the url to check. Parameters ---------- t : callable The test requiring network connectivity. url : path The url to test via ``pandas.io.common.urlopen`` to check for connectivity. Defaults to 'http://www.google.com'. raise_on_error : bool If True, never catches errors. check_before_test : bool If True, checks connectivity before running the test case. error_classes : tuple or Exception error classes to ignore. If not in ``error_classes``, raises the error. defaults to IOError. Be careful about changing the error classes here. skip_errnos : iterable of int Any exception that has .errno or .reason.erno set to one of these values will be skipped with an appropriate message. _skip_on_messages: iterable of string any exception e for which one of the strings is a substring of str(e) will be skipped with an appropriate message. Intended to suppress errors where an errno isn't available. Notes ----- * ``raise_on_error`` supercedes ``check_before_test`` Returns ------- t : callable The decorated test ``t``, with checks for connectivity errors. Example ------- Tests decorated with @network will fail if it's possible to make a network connection to another URL (defaults to google.com):: >>> from pandas.util.testing import network >>> from pandas.io.common import urlopen >>> @network ... def test_network(): ... with urlopen("rabbit://bonanza.com"): ... pass Traceback ... URLError: <urlopen error unknown url type: rabit> You can specify alternative URLs:: >>> @network("http://www.yahoo.com") ... def test_something_with_yahoo(): ... raise IOError("Failure Message") >>> test_something_with_yahoo() Traceback (most recent call last): ... IOError: Failure Message If you set check_before_test, it will check the url first and not run the test on failure:: >>> @network("failing://url.blaher", check_before_test=True) ... def test_something(): ... print("I ran!") ... raise ValueError("Failure") >>> test_something() Traceback (most recent call last): ... Errors not related to networking will always be raised. """ from pytest import skip t.network = True @wraps(t) def wrapper(*args, **kwargs): if check_before_test and not raise_on_error: if not can_connect(url, error_classes): skip() try: return t(*args, **kwargs) except Exception as e: errno = getattr(e, "errno", None) if not errno and hasattr(errno, "reason"): errno = getattr(e.reason, "errno", None) if errno in skip_errnos: skip( "Skipping test due to known errno" " and error {error}".format(error=e) ) try: e_str = traceback.format_exc(e) except Exception: e_str = str(e) if any(m.lower() in e_str.lower() for m in _skip_on_messages): skip( "Skipping test because exception " "message is known and error {error}".format(error=e) ) if not isinstance(e, error_classes): raise if raise_on_error or can_connect(url, error_classes): raise else: skip( "Skipping test due to lack of connectivity" " and error {error}".format(error=e) ) return wrapper with_connectivity_check = network def assert_raises_regex(_exception, _regexp, _callable=None, *args, **kwargs): r""" Check that the specified Exception is raised and that the error message matches a given regular expression pattern. This may be a regular expression object or a string containing a regular expression suitable for use by `re.search()`. This is a port of the `assertRaisesRegexp` function from unittest in Python 2.7. .. deprecated:: 0.24.0 Use `pytest.raises` instead. Examples -------- >>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ') >>> import re >>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ') If an exception of a different type is raised, it bubbles up. >>> assert_raises_regex(TypeError, 'literal', int, 'XYZ') Traceback (most recent call last): ... ValueError: invalid literal for int() with base 10: 'XYZ' >>> dct = dict() >>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple') Traceback (most recent call last): ... AssertionError: "pear" does not match "'apple'" You can also use this in a with statement. >>> with assert_raises_regex(TypeError, r'unsupported operand type\(s\)'): ... 1 + {} >>> with assert_raises_regex(TypeError, 'banana'): ... 'apple'[0] = 'b' Traceback (most recent call last): ... AssertionError: "banana" does not match "'str' object does not support \ item assignment" """ warnings.warn( ( "assert_raises_regex has been deprecated and will " "be removed in the next release. Please use " "`pytest.raises` instead." ), FutureWarning, stacklevel=2, ) manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp) if _callable is not None: with manager: _callable(*args, **kwargs) else: return manager class _AssertRaisesContextmanager: """ Context manager behind `assert_raises_regex`. """ def __init__(self, exception, regexp=None): """ Initialize an _AssertRaisesContextManager instance. Parameters ---------- exception : class The expected Exception class. regexp : str, default None The regex to compare against the Exception message. """ self.exception = exception if regexp is not None and not hasattr(regexp, "search"): regexp = re.compile(regexp, re.DOTALL) self.regexp = regexp def __enter__(self): return self def __exit__(self, exc_type, exc_value, trace_back): expected = self.exception if not exc_type: exp_name = getattr(expected, "__name__", str(expected)) raise AssertionError("{name} not raised.".format(name=exp_name)) return self.exception_matches(exc_type, exc_value, trace_back) def exception_matches(self, exc_type, exc_value, trace_back): """ Check that the Exception raised matches the expected Exception and expected error message regular expression. Parameters ---------- exc_type : class The type of Exception raised. exc_value : Exception The instance of `exc_type` raised. trace_back : stack trace object The traceback object associated with `exc_value`. Returns ------- is_matched : bool Whether or not the Exception raised matches the expected Exception class and expected error message regular expression. Raises ------ AssertionError : The error message provided does not match the expected error message regular expression. """ if issubclass(exc_type, self.exception): if self.regexp is not None: val = str(exc_value) if not self.regexp.search(val): msg = '"{pat}" does not match "{val}"'.format( pat=self.regexp.pattern, val=val ) e = AssertionError(msg) raise_with_traceback(e, trace_back) return True else: # Failed, so allow Exception to bubble up. return False @contextmanager def assert_produces_warning( expected_warning=Warning, filter_level="always", clear=None, check_stacklevel=True, raise_on_extra_warnings=True, ): """ Context manager for running code expected to either raise a specific warning, or not raise any warnings. Verifies that the code raises the expected warning, and that it does not raise any other unexpected warnings. It is basically a wrapper around ``warnings.catch_warnings``. Parameters ---------- expected_warning : {Warning, False, None}, default Warning The type of Exception raised. ``exception.Warning`` is the base class for all warnings. To check that no warning is returned, specify ``False`` or ``None``. filter_level : str or None, default "always" Specifies whether warnings are ignored, displayed, or turned into errors. Valid values are: * "error" - turns matching warnings into exceptions * "ignore" - discard the warning * "always" - always emit a warning * "default" - print the warning the first time it is generated from each location * "module" - print the warning the first time it is generated from each module * "once" - print the warning the first time it is generated clear : str, default None If not ``None`` then remove any previously raised warnings from the ``__warningsregistry__`` to ensure that no warning messages are suppressed by this context manager. If ``None`` is specified, the ``__warningsregistry__`` keeps track of which warnings have been shown, and does not show them again. check_stacklevel : bool, default True If True, displays the line that called the function containing the warning to show were the function is called. Otherwise, the line that implements the function is displayed. raise_on_extra_warnings : bool, default True Whether extra warnings not of the type `expected_warning` should cause the test to fail. Examples -------- >>> import warnings >>> with assert_produces_warning(): ... warnings.warn(UserWarning()) ... >>> with assert_produces_warning(False): ... warnings.warn(RuntimeWarning()) ... Traceback (most recent call last): ... AssertionError: Caused unexpected warning(s): ['RuntimeWarning']. >>> with assert_produces_warning(UserWarning): ... warnings.warn(RuntimeWarning()) Traceback (most recent call last): ... AssertionError: Did not see expected warning of class 'UserWarning'. ..warn:: This is *not* thread-safe. """ __tracebackhide__ = True with warnings.catch_warnings(record=True) as w: if clear is not None: # make sure that we are clearing these warnings # if they have happened before # to guarantee that we will catch them if not is_list_like(clear): clear = [clear] for m in clear: try: m.__warningregistry__.clear() except Exception: pass saw_warning = False warnings.simplefilter(filter_level) yield w extra_warnings = [] for actual_warning in w: if expected_warning and issubclass( actual_warning.category, expected_warning ): saw_warning = True if check_stacklevel and issubclass( actual_warning.category, (FutureWarning, DeprecationWarning) ): from inspect import getframeinfo, stack caller = getframeinfo(stack()[2][0]) msg = ( "Warning not set with correct stacklevel. " "File where warning is raised: {actual} != " "{caller}. Warning message: {message}" ).format( actual=actual_warning.filename, caller=caller.filename, message=actual_warning.message, ) assert actual_warning.filename == caller.filename, msg else: extra_warnings.append( ( actual_warning.category.__name__, actual_warning.message, actual_warning.filename, actual_warning.lineno, ) ) if expected_warning: msg = "Did not see expected warning of class {name!r}.".format( name=expected_warning.__name__ ) assert saw_warning, msg if raise_on_extra_warnings and extra_warnings: raise AssertionError( "Caused unexpected warning(s): {!r}.".format(extra_warnings) ) class RNGContext: """ Context manager to set the numpy random number generator speed. Returns to the original value upon exiting the context manager. Parameters ---------- seed : int Seed for numpy.random.seed Examples -------- with RNGContext(42): np.random.randn() """ def __init__(self, seed): self.seed = seed def __enter__(self): self.start_state = np.random.get_state() np.random.seed(self.seed) def __exit__(self, exc_type, exc_value, traceback): np.random.set_state(self.start_state) @contextmanager def with_csv_dialect(name, **kwargs): """ Context manager to temporarily register a CSV dialect for parsing CSV. Parameters ---------- name : str The name of the dialect. kwargs : mapping The parameters for the dialect. Raises ------ ValueError : the name of the dialect conflicts with a builtin one. See Also -------- csv : Python's CSV library. """ import csv _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"} if name in _BUILTIN_DIALECTS: raise ValueError("Cannot override builtin dialect.") csv.register_dialect(name, **kwargs) yield csv.unregister_dialect(name) @contextmanager def use_numexpr(use, min_elements=None): from pandas.core.computation import expressions as expr if min_elements is None: min_elements = expr._MIN_ELEMENTS olduse = expr._USE_NUMEXPR oldmin = expr._MIN_ELEMENTS expr.set_use_numexpr(use) expr._MIN_ELEMENTS = min_elements yield expr._MIN_ELEMENTS = oldmin expr.set_use_numexpr(olduse) def test_parallel(num_threads=2, kwargs_list=None): """Decorator to run the same function multiple times in parallel. Parameters ---------- num_threads : int, optional The number of times the function is run in parallel. kwargs_list : list of dicts, optional The list of kwargs to update original function kwargs on different threads. Notes ----- This decorator does not pass the return value of the decorated function. Original from scikit-image: https://github.com/scikit-image/scikit-image/pull/1519 """ assert num_threads > 0 has_kwargs_list = kwargs_list is not None if has_kwargs_list: assert len(kwargs_list) == num_threads import threading def wrapper(func): @wraps(func) def inner(*args, **kwargs): if has_kwargs_list: update_kwargs = lambda i: dict(kwargs, **kwargs_list[i]) else: update_kwargs = lambda i: kwargs threads = [] for i in range(num_threads): updated_kwargs = update_kwargs(i) thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs) threads.append(thread) for thread in threads: thread.start() for thread in threads: thread.join() return inner return wrapper class SubclassedSeries(Series): _metadata = ["testattr", "name"] @property def _constructor(self): return SubclassedSeries @property def _constructor_expanddim(self): return SubclassedDataFrame class SubclassedDataFrame(DataFrame): _metadata = ["testattr"] @property def _constructor(self): return SubclassedDataFrame @property def _constructor_sliced(self): return SubclassedSeries class SubclassedSparseSeries(pd.SparseSeries): _metadata = ["testattr"] @property def _constructor(self): return SubclassedSparseSeries @property def _constructor_expanddim(self): return SubclassedSparseDataFrame class SubclassedSparseDataFrame(pd.SparseDataFrame): _metadata = ["testattr"] @property def _constructor(self): return SubclassedSparseDataFrame @property def _constructor_sliced(self): return SubclassedSparseSeries class SubclassedCategorical(Categorical): @property def _constructor(self): return SubclassedCategorical @contextmanager def set_timezone(tz): """Context manager for temporarily setting a timezone. Parameters ---------- tz : str A string representing a valid timezone. Examples -------- >>> from datetime import datetime >>> from dateutil.tz import tzlocal >>> tzlocal().tzname(datetime.now()) 'IST' >>> with set_timezone('US/Eastern'): ... tzlocal().tzname(datetime.now()) ... 'EDT' """ import os import time def setTZ(tz): if tz is None: try: del os.environ["TZ"] except KeyError: pass else: os.environ["TZ"] = tz time.tzset() orig_tz = os.environ.get("TZ") setTZ(tz) try: yield finally: setTZ(orig_tz) def _make_skipna_wrapper(alternative, skipna_alternative=None): """Create a function for calling on an array. Parameters ---------- alternative : function The function to be called on the array with no NaNs. Only used when 'skipna_alternative' is None. skipna_alternative : function The function to be called on the original array Returns ------- skipna_wrapper : function """ if skipna_alternative: def skipna_wrapper(x): return skipna_alternative(x.values) else: def skipna_wrapper(x): nona = x.dropna() if len(nona) == 0: return np.nan return alternative(nona) return skipna_wrapper def convert_rows_list_to_csv_str(rows_list): """ Convert list of CSV rows to single CSV-formatted string for current OS. This method is used for creating expected value of to_csv() method. Parameters ---------- rows_list : list The list of string. Each element represents the row of csv. Returns ------- expected : string Expected output of to_csv() in current OS """ sep = os.linesep expected = sep.join(rows_list) + sep return expected
codeparrot/github-code-clean
# encoding: latin2 """Algorithm utilities G{packagetree core} """ __author__ = "Juan C. Duque" __credits__ = "Copyright (c) 2009-11 Juan C. Duque" __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "RiSE Group" __email__ = "contacto@rise-group.org" import copy import numpy import dist2Regions import objFunctions import distanceFunctions import selectionTypeFunctions class AreaManager: """ This class contains operations at areal level, including the generation of instances of areas, a wide range of area2area and area2region distance functions. """ def __init__(self, w, y, distanceType="EuclideanSquared", variance="false"): """ @type w: dictionary @param w: With B{key} = area Id, and B{value} = list with Ids of neighbours of each area. @type y: dictionary @param y: With B{key} = area Id, and B{value} = list with attribute values. @type distanceType: string @keyword distanceType: Function to calculate the distance between areas. Default value I{distanceType = 'EuclideanSquared'}. @type variance: boolean @keyword variance: Boolean indicating if the data have variance matrix. Default value I{variance = 'false'}. """ self.y = y self.areas = {} self.noNeighs = set([]) self.variance = variance self.distanceType = distanceType self.createAreas(w, y) self.distanceStatDispatcher = dist2Regions.distanceStatDispatcher def createAreas(self, w, y): """ Creates instances of areas based on a sparse weights matrix (w) and a data array (y). """ n = len(self.y) self.distances = {} noNeighs = [] for key in range(n): data = y[key] try: neighbours = w[key] except: neighbours = {} w[key] = {} if len(w[key]) == 0: self.noNeighs = self.noNeighs | set([key]) a = AreaCl(key, neighbours, data, self.variance) self.areas[key] = a if len(self.noNeighs) > 0: print "Disconnected areas neighs: ", list(self.noNeighs) def returnDistance2Area(self, area, otherArea): """ Returns the distance between two areas """ try: if area.id < otherArea.id: dist = self.distances[(area.id, otherArea.id)] else: dist = self.distances[(otherArea.id, area.id)] except: dist = 0.0 return dist def getDataAverage(self, areaList, dataIndex): """ Returns the attribute centroid of a set of areas """ dataAvg = len(dataIndex) * [0.0] for aID in areaList: i = 0 for index in dataIndex: dataAvg[i] += self.areas[aID].data[index] /len(areaList) i += 1 return dataAvg def getDistance2Region(self, area, areaList, distanceStat="Centroid", weights=[], indexData=[]): """ Returns the distance from an area to a region (defined as a list of area IDs) """ if isinstance(distanceStat,str): if len(indexData) == 0: indexData = range(len(area.data)) return self.distanceStatDispatcher[distanceStat](self,area, areaList, indexData) else: distance = 0.0 i = 0 for dS in distanceStat: if len(indexData) == 0: indexDataDS = range(len(area.data)) else: indexDataDS = indexData[i] if len(weights) > 0: distance += weights[i] self.distanceStatDispatcher[dS](self,area, areaList, indexDataDS) else: distance += self.distanceStatDispatcher[dS](self,area, areaList, indexDataDS) i += 1 return distance def getDistance2AreaMin(self, area, areaList): """ Return the ID of the area whitin a region that is closest to an area outside the region """ areaMin = -1; distanceMin = 1e300 for aID in areaList: if self.distances[area.id, aID] < distanceMin: areaMin = aID distanceMin = self.distances[area.id, aID] return areaMin def checkFeasibility(self, solution): """ Checks feasibility of a candidate solution """ n = len(solution) regions = {} for i in range(n): try: regions[solution[i]] = regions[solution[i]] + [i] except: regions[solution[i]] = [i] feasible = 1 r = len(regions) for i in range(r): newRegion = set([]) areas2Eval = regions[i] if len(areas2Eval) > 1: for area in areas2Eval: newRegion = newRegion | (set(self.areas[area].neighs) & set(areas2Eval)) if set(areas2Eval) -newRegion != set([]): feasible = 0 break return feasible class BasicMemory: """ Keeps the minimum amount of information about a given solution. It keeps the Objective function value (self.objInfo) and the region each area has been assigned to (self.regions) """ def __init__(self, objInfo=99999999E10, regions={}): """ @type objInfo: float @keyword objInfo: Objective function value. @type regions: list @keyword regions: list of Region´s IDs values. """ self.objInfo = objInfo self.regions = regions def updateBasicMemory(self, rm): """ Updates BasicMemory when a solution is modified. """ self.objInfo = rm.objInfo self.regions = rm.returnRegions() class ExtendedMemory(BasicMemory): """ This memory is designed to allow the algorithm to go back to a given solution (different from the current solution). It gives to RegionManager all the information that must be available in order to continue an iteration process. """ def __init__(self, objInfo=99999999E10, area2Region={}, region2Area={}, intraBorderingAreas={}): """ @type objInfo: float @keyword objInfo: Objective function value @type area2region: dictionairy @keyword area2region: Region to which the area is in. @type region2area: dictionary @keyword region2area: areas within the region. @type intraBorderingAreas: dictionary @keyword intraBorderingAreas: areas in the border of the region. """ BasicMemory.__init__(self, objInfo, {}) self.area2Region = area2Region self.region2Area = region2Area self.intraBorderingAreas = intraBorderingAreas def updateExtendedMemory(self, rm): """ Updates ExtendedMemory when a solution is modified. """ BasicMemory.updateBasicMemory(self, rm) self.area2Region = rm.area2Region self.region2Area = rm.region2Area self.intraBorderingAreas = rm.intraBorderingAreas class RegionMaker: """ This class deals with a large amount of methods required during both the construction and local search phases. This class takes the area instances and coordinate them during the solution process. It also send information to Memory when needed. """ def __init__(self, am, pRegions=2, initialSolution=[], seedSelection = "kmeans", distanceType = "EuclideanSquared", distanceStat = "Centroid", selectionType = "Minimum", alpha = 0.2, numRegionsType = "Exogenous", objectiveFunctionType = "SS", threshold = 0.0, weightsDistanceStat = [], weightsObjectiveFunctionType = [], indexDataStat = [], indexDataOF = []): """ @type am: AreaManager @param am: Area manager object. @type pRegions: integer @keyword pRegions: Number of regions in scheme @type seeds: list @keyword seeds: List of area IDs for initial seeds. @type distanceType: string @keyword distanceType: Type of distance to be used, by default "EuclideanSquared" @type distanceStat: string @keyword distanceStat: Type of conversion used for summarizing distance, by defaults "Average" @type selectionType: string @keyword selectionType: Type of selection criterion for construction phase, by defaults "Minimum" @type alpha: float. @keyword alpha: float equal or between the interval [0,1]; for GRASP selection only. @type numRegionsType: strigng @keyword numRegionsType: Type of constructive method (Exogenous, EndogenousThreshold, EndogenousRange), by default "Exogenous" @type objectiveFunctionType: string @keyword objectiveFunctionType: Methosd to calculate the objective function, by default "Total" @type threshold: float @keyword threshold: Minimum population threshold to be satisfied for each region # FIXME: estos atributos no se que son y lee porfa las funciones de esta clase que no estan muy completas las descripciones pues no sabia bien que hacian algunas. @type weightsDistanceStat: list @keyword weightsDistanceStat: @type weightsObjectiveFunctionStat: list @keyword weightsObjectiveFunctionStat: @type indexDataStat = list @keyword indexDataStat: @type indexDataOf = list @keyword indexDataOf: """ self.am = am self.areas = copy.deepcopy(am.areas) self.distanceType = distanceType self.distanceStat = distanceStat self.weightsDistanceStat = weightsDistanceStat self.indexDataStat = indexDataStat self.weightsObjectiveFunctionType = weightsObjectiveFunctionType self.indexDataOF = indexDataOF self.selectionType = selectionType self.objectiveFunctionType = objectiveFunctionType self.n = len(self.areas) self.unassignedAreas = self.areas.keys() self.assignedAreas = [] self.area2Region = {} self.region2Area = {} self.potentialRegions4Area = {} self.intraBorderingAreas = {} self.candidateInfo = {} self.externalNeighs = set([]) self.alpha = alpha self.numRegionsType = numRegionsType self.objectiveFunctionTypeDispatcher = objFunctions.objectiveFunctionTypeDispatcher self.selectionTypeDispatcher = selectionTypeFunctions.selectionTypeDispatcher self.neighSolutions = {(0,0): 9999} self.regionMoves = set([]) self.distances = {} self.NRegion = [] self.N = 0 self.data = {} self.objInfo = -1 self.assignAreasNoNeighs() # PREDEFINED NUMBER OF REGIONS if self.numRegionsType == "Exogenous": if initialSolution == []: self.pRegions = pRegions seeds = self.kmeansInit() self.setSeeds(seeds) c = 0 while len(self.unassignedAreas) != 0: self.constructRegions() c += 1 self.objInfo = self.getObj() elif initialSolution != []: self.pRegions = len(numpy.unique(initialSolution)) seeds = [] for s in numpy.unique(initialSolution): seeds.append(initialSolution.index(s)) self.setSeeds(seeds) regions2create = dict.fromkeys(range(len(seeds))) c = 0 for i in initialSolution: try: regions2create[i].append(c) except: regions2create[i] = [c] c += 1 c = 0 for i in regions2create.keys(): self.unassignedAreas = regions2create[i][1:] while len(self.unassignedAreas) != 0: self.constructRegions(filteredCandidates=self.unassignedAreas, filteredReg=i) c += 1 self.objInfo = self.getObj() # NUMBER OF REGIONS IS ENDOGENOUS WITH A THRESHOLD VALUE if self.numRegionsType == "EndogenousThreshold": self.constructionStage = "growing" try: self.areas[areas.keys()[0]].thresholdVar except: self.extractThresholdVar() self.regionalThreshold = threshold c = 0 self.feasibleRegions = {} self.regionValue = {} seeds = [] for aID in self.areas: if self.areas[aID].thresholdVar >= self.regionalThreshold: seed = aID seeds = seeds + [seed] self.regionValue[c] = self.areas[seed].thresholdVar self.feasibleRegions[c] = [seed] self.removeRegionAsCandidate() c += 1 self.setSeeds(seeds) while len(self.unassignedAreas) != 0: numpy.random.shuffle(self.unassignedAreas) vals = [] for index in self.unassignedAreas: vals += [self.areas[index].thresholdVar] seed = self.unassignedAreas[0] self.setSeeds([seed], c) self.regionValue[c] = self.areas[seed].thresholdVar if self.regionValue[c] >= self.regionalThreshold: self.feasibleRegions[c] = [seed] self.removeRegionAsCandidate() c += 1 else: feasibleThreshold = 1 while self.regionValue[c] < self.regionalThreshold: self.addedArea = -1 try: self.constructRegions() self.regionValue[c] += self.areas[self.addedArea].thresholdVar except: feasibleThreshold = 0 break if feasibleThreshold == 1: self.feasibleRegions[c] = self.region2Area[c] self.removeRegionAsCandidate() c += 1 # NUMBER OF REGIONS IS ENDOGENOUS WITH A RANGE VALUE if self.numRegionsType == "EndogenousRange": self.constructionStage = "growing" # there are two values for constructionStage: "growing" and "enclaves" try: self.areas[areas.keys()[0]].thresholdVar except: self.extractThresholdVar() self.regionalThreshold = threshold c = 0 self.feasibleRegions = {} while len(self.unassignedAreas) != 0: # select seed numpy.random.shuffle(self.unassignedAreas) seed = self.unassignedAreas[0] self.setSeeds([seed],c) # regionRange contains the current range per region # regionalThreshold is the predefined threshold value self.regionRange = {} maxValue = self.areas[seed].thresholdVar minValue = self.areas[seed].thresholdVar currentRange = maxValue - minValue self.regionRange[c] = currentRange # grow region if possible stop = 0 while stop == 0: upplim = maxValue + self.regionalThreshold - currentRange lowlim = minValue - self.regionalThreshold + currentRange feasibleNeigh = 0 toRemove = [] for ext in self.externalNeighs: if self.areas[ext].thresholdVar <= upplim and self.areas[ext].thresholdVar >= lowlim: feasibleNeigh = 1 if self.areas[ext].thresholdVar > upplim or self.areas[ext].thresholdVar < lowlim: toRemove.append(ext) self.toRemove = toRemove if feasibleNeigh == 0: stop = 1 if feasibleNeigh == 1: try: self.constructRegions() if self.areas[self.addedArea].thresholdVar > maxValue: maxValue = self.areas[self.addedArea].thresholdVar if self.areas[self.addedArea].thresholdVar < minValue: minValue = self.areas[self.addedArea].thresholdVar currentRange = maxValue - minValue self.regionRange[c] = currentRange except: stop = 1 self.feasibleRegions[c] = self.region2Area[c] self.removeRegionAsCandidate() c += 1 self.getIntraBorderingAreas() def kmeansInit(self): y = self.am.y n = len(y) distances = numpy.ones(n) total = sum(distances) probabilities = map(lambda x: x / float(total), distances) seeds = [] for k in range(self.pRegions): random = numpy.random.uniform(0, 1) find = False acum = 0 cont = 0 while find == False: inf = acum sup = acum + probabilities[cont] if inf <= random <= sup: find = True seeds += [cont] for area in self.am.areas: distancei = min(map(lambda x: self.am.areas[area].returnDistance2Area(self.am.areas[x], distanceType=self.distanceType), seeds)) distances[area] = distancei total = sum(distances) probabilities = map(lambda x: x / float(total), distances) else: cont += 1 acum = sup return seeds def extractThresholdVar(self): """ Separate aggregation variables (data) from the variable selected to satisfy a threshold value (thresholdVar) """ self.totalThresholdVar = 0.0 for areaId in self.areas.keys(): self.areas[areaId].thresholdVar = self.areas[areaId].data[-1] self.areas[areaId].data = self.areas[areaId].data[0: -1] self.totalThresholdVar += self.areas[areaId].thresholdVar def removeRegionAsCandidate(self): """ Remove a region from candidates """ for i in self.candidateInfo.keys(): a, r = i if r in self.feasibleRegions: self.candidateInfo.pop(i) def returnRegions(self): """ Return regions created """ areasId = self.area2Region.keys() areasId = numpy.sort(areasId).tolist() return [self.area2Region[area] for area in areasId] def resetNow(self): """ Reset all variables """ self.unassignedAreas = self.areas.keys() self.assignedAreas = [] self.area2Region = {} self.region2Area = {} self.potentialRegions4Area = {} self.intraBorderingAreas = {} self.candidateInfo = {} self.externalNeighs = set([]) self.neighsMinusAssigned = set([]) def setSeeds(self, seeds, c=0): """ Sets the initial seeds for clustering """ if self.numRegionsType == "Exogenous" and len(seeds) <= self.pRegions: idx = range(self.n) didx = list((set(idx) - set(seeds)) - self.am.noNeighs) numpy.random.shuffle(didx) self.seeds = seeds + didx[0:(self.pRegions - len(seeds))] else: self.seeds = seeds for seed in self.seeds: self.NRegion += [0] self.assignSeeds(seed, c) c += 1 def assignAreaStep1(self, areaID, regionID): """ Assgin an area to a region """ a = self.areas[areaID] neighs = a.neighs try: self.region2Area[regionID].append(areaID) if self.objectiveFunctionType == "GWalt": try: self.NRegion[regionID] += a.data[0] for index in range(1,len(a.data)): self.data[regionID][index - 1] += a.data[index] * a.data[0] except: self.NRegion[regionID] = a.data[0] for index in range(1, len(a.data)): self.data[regionID][index - 1] = a.data[index] * a.data[0] self.N += a.data[0] except: self.region2Area[regionID] = [areaID] if self.objectiveFunctionType == "GWalt": self.NRegion[regionID] = a.data[0] for index in range(1, len(a.data)): if index == 1: self.data[regionID] = [a.data[index] * a.data[0]] else: self.data[regionID] += [a.data[index] * a.data[0]] self.N += a.data[0] self.area2Region[areaID] = regionID try: aid = self.unassignedAreas.remove(areaID) except: pass self.assignedAreas.append(areaID) setNeighs = set(neighs) setAssigned = set(self.assignedAreas) self.oldExternal = self.externalNeighs self.externalNeighs = (self.externalNeighs | setNeighs) - setAssigned self.newExternal = self.externalNeighs - self.oldExternal self.neighsMinusAssigned = setNeighs - setAssigned def assignSeeds(self, areaID, regionID): """ Assign an area to a region and updates potential regions for the neighs Parameters """ self.assignAreaStep1(areaID, regionID) for neigh in self.neighsMinusAssigned: try: self.potentialRegions4Area[neigh] = self.potentialRegions4Area[neigh]|set([regionID]) except: self.potentialRegions4Area[neigh] = set([regionID]) try: self.potentialRegions4Area.pop(areaID) except: pass self.changedRegion = 'null' self.newExternal = self.potentialRegions4Area.keys() def assignAreasNoNeighs(self): """ Assign to the region "-1" for the areas without neighbours """ noNeighs = list(self.am.noNeighs) nr = -1 for areaID in noNeighs: self.area2Region[areaID] = nr try: aid = self.unassignedAreas.remove(areaID) except: pass self.assignedAreas.append(areaID) setAssigned = set(self.assignedAreas) nr = nr - 1 def assignArea(self, areaID, regionID): """ Assign an area to a region and updates potential regions for neighs """ self.changedRegion = regionID self.addedArea = areaID self.assignAreaStep1(areaID, regionID) for neigh in self.neighsMinusAssigned: try: self.potentialRegions4Area[neigh] = self.potentialRegions4Area[neigh]|set([regionID]) except: self.potentialRegions4Area[neigh] = set([regionID]) try: self.potentialRegions4Area.pop(areaID) except: pass def returnBorderingAreas(self, regionID): """ Returns bordering areas of a region """ areas2Eval = self.returnRegion2Area(regionID) borderingAreas = set([]) for area in areas2Eval: try: if len(self.intraBorderingAreas[area]) > 0: borderingAreas = borderingAreas | set([area]) except: pass return borderingAreas def returnIntraBorderingAreas(self): """ Returns intrabordering areas """ return self.intraBorderingAreas def getIntraBorderingAreas(self): """ Gets the intrabordering areas """ self.intraBorderingAreas = {} if self.numRegionsType == "Exogenous": nr = range(self.pRegions) else: nr = self.feasibleRegions for regionID in nr: setNeighsNoRegion = set([]) try: areas2Eval = self.region2Area[regionID] except: areas2Eval = [] for area in areas2Eval: setNeighsNoRegion = setNeighsNoRegion | (set(self.areas[area].neighs) - set(areas2Eval)) for neigh in list(setNeighsNoRegion): try: self.intraBorderingAreas[neigh]=self.intraBorderingAreas[neigh]|set([regionID]) except: self.intraBorderingAreas[neigh]=set([regionID]) def returnRegion2Area(self, regionID): """ Return the areas of a region """ return self.region2Area[regionID] def constructRegions(self, filteredCandidates=-99, filteredReg=-99): """ Construct potential regions per area """ lastRegion = 0 for areaID in self.potentialRegions4Area.keys(): a = self.areas[areaID] regionIDs = list(self.potentialRegions4Area[areaID]) for region in regionIDs: if (self.numRegionsType != "Exogenous" and self.constructionStage == "growing" and region in self.feasibleRegions): # once a region reaches the threshold its grow is rejected until the # assignation of enclaves pass else: if filteredCandidates == -99: if areaID not in self.newExternal and region != self.changedRegion: lastRegion = region pass else: if self.selectionType != "FullRandom": areasIdsIn = self.region2Area[region] areasInNow = [ self.areas[aID] for aID in areasIdsIn ] regionDistance = self.am.getDistance2Region(self.areas[areaID], self.region2Area[region], distanceStat = self.distanceStat, weights = self.weightsDistanceStat, indexData = self.indexDataStat) else: regionDistance = 0.0 self.candidateInfo[(areaID, region)] = regionDistance elif filteredCandidates != -99 and areaID in filteredCandidates and region == filteredReg: areasIdsIn = self.region2Area[region] areasInNow = [ self.areas[aID] for aID in areasIdsIn ] regionDistance = self.am.getDistance2Region(self.areas[areaID], self.region2Area[region], distanceStat = self.distanceStat, weights = self.weightsDistanceStat, indexData = self.indexDataStat) self.candidateInfo[(areaID,region)] = regionDistance else: pass if len(self.candidateInfo) == 0: self.changedRegion = lastRegion if self.numRegionsType == "EndogenousRange": self.filterCandidate(self.toRemove) self.selectionTypeDispatcher[self.selectionType](self) def filterCandidate(self,removeCandidate=[]): """ Filter candidates """ if len(removeCandidate) > 0: toRemove = [] for id in removeCandidate: for cand,reg in self.candidateInfo.keys(): if cand == id: toRemove.append((cand,reg)) for remov in toRemove: self.candidateInfo.pop(remov) def graspList(self, xList, alpha=0.0): """ Return random index of values with specified range. """ maxX = max(xList) minX = min(xList) xRangeMax = minX + ((maxX - minX) * alpha) candidates = [i <= xRangeMax for i in xList] indices = indexMultiple(candidates, 1) nCandidates = len(indices) idx = range(nCandidates) numpy.random.shuffle(idx) random = idx[0] index4Grasp = indices[random] return index4Grasp def getObjective(self, region2AreaDict): """ Return the value of the objective function from regions to area dictionary """ if (type(self.objectiveFunctionType) == type('objectiveFunctionType')): if len(self.indexDataOF) == 0: indexData = range(len(self.areas[0].data)) else: indexData = self.indexDataOF return self.objectiveFunctionTypeDispatcher[self.objectiveFunctionType](self, region2AreaDict, indexData) else: distance = 0.0 i = 0 for oFT in self.objectiveFunctionType: if len(self.indexDataOF) == 0: indexData = range(len(self.areas[0].data)) else: indexData = self.indexDataOF[i] if len(self.weightsObjectiveFunctionType) > 0: distance += self.weightsObjectiveFunctionType[i] * self.objectiveFunctionTypeDispatcher[oFT](self, region2AreaDict, indexData) else: distance += self.objectiveFunctionTypeDispatcher[oFT](self, region2AreaDict, indexData) i += 1 return distance def getObjectiveFast(self, region2AreaDict, modifiedRegions=[]): """ Return the value of the objective function from regions to area dictionary """ if (type(self.objectiveFunctionType) == type('objectiveFunctionType')): if len(self.indexDataOF) == 0: indexData = range(len(self.areas[0].data)) else: indexData = self.indexDataOF return self.objectiveFunctionTypeDispatcher[self.objectiveFunctionType+'f'](self, region2AreaDict, modifiedRegions, indexData) else: distance = 0.0 i = 0 for oFT in self.objectiveFunctionType: if len(self.indexDataOF) == 0: indexData = range(len(self.areas[0].data)) else: indexData = self.indexDataOF[i] if len(self.weightsObjectiveFunctionType) > 0: distance += self.weightsObjectiveFunctionType[i] * self.objectiveFunctionTypeDispatcher[oFT](self, region2AreaDict, indexData) else: distance += self.objectiveFunctionTypeDispatcher[oFT](self, region2AreaDict, indexData) i += 1 return distance def getLambda(self): """ # FIXME: No se que hace """ L = numpy.matrix(numpy.identity(self.pRegions)) for r in range(self.pRegions): L[r, r] = 1.0 * self.NRegion[r] / self.N return L def getB(self): """ Return matrix of parameters of all regions """ B = numpy.matrix(numpy.zeros(len(self.data[0]) * self.pRegions)).T index = 0 for r in range(self.pRegions): for i in range(len(self.data[0])): B[index, 0] = self.data[r][i] / self.NRegion[r] index += 1 return B def getY(self): """ Return matrix of the average variance-covariance of all regions """ Y = numpy.matrix(numpy.identity(len(self.data[0]))) centroids = {} for r in range(self.pRegions): centroids[r] = calculateCentroid([self.areas[aID] for aID in self.region2Area[r]]) for r in range(self.pRegions): Y += centroids[r].var * numpy.power(self.NRegion[r] / self.N, 2) return Y def getH(self): """ Return composite matrix """ E = numpy.matrix(numpy.ones((1, self.pRegions, self.pRegions))) L = self.getLambda() H = L - L * E * L return H def getObj(self): """ Return the value of the objective function """ if self.objInfo < 0: self.calcObj() return self.objInfo def calcObj(self): """ Calculate the value of the objective function """ self.objInfo = self.getObjective(self.region2Area) def recalcObj(self, region2AreaDict, modifiedRegions=[]): """ Re-calculate the value of the objective function """ if "objDict" in dir(self): obj = self.getObjectiveFast(region2AreaDict, modifiedRegions) else: obj = self.getObjective(region2AreaDict) return obj def checkFeasibility(self, regionID, areaID, region2AreaDict): """ Check feasibility from a change region (remove an area from a region) """ areas2Eval = list(region2AreaDict[regionID]) areas2Eval.remove(areaID) seedArea = areas2Eval[0] newRegion = (set([seedArea]) | set(self.areas[seedArea].neighs)) & set(areas2Eval) areas2Eval.remove(seedArea) flag = 1 newAdded = newRegion - set([seedArea]) newNeighs = set([]) while flag: for area in newAdded: newNeighs = newNeighs | (((set(self.areas[area].neighs) & set(region2AreaDict[regionID])) - set([areaID])) - newRegion) areas2Eval.remove(area) newNeighs = newNeighs - newAdded newAdded = newNeighs newRegion = newRegion | newAdded if len(areas2Eval) == 0: feasible = 1 flag = 0 break elif newNeighs == set([]) and len(areas2Eval) > 0: feasible = 0 flag = 0 break return feasible def calculateRegionValueThreshold(self): """ # FIXME: No se que hace """ if self.numRegionsType == "Exogenous": nr = range(self.pRegions) else: nr = range(len(self.region2Area.keys())) for regionID in nr: self.regionValue[regionID] = 0 areas2Eval = self.region2Area[regionID] for area in areas2Eval: self.regionValue[regionID] += self.areas[area].thresholdVar def improvingCandidates(self): """ Select solutions that improve the current objective function. """ intraCopy = copy.deepcopy(self.intraBorderingAreas) region2AreaCopy = copy.deepcopy(self.region2Area) area2RegionCopy = copy.deepcopy(self.area2Region) self.neighSolutions = {} for area in intraCopy.keys(): regionIn = self.area2Region[area] regions4Move = list(self.intraBorderingAreas[area]) if (len(self.region2Area[regionIn]) > 1): for region in regions4Move: self.swapArea(area, region, region2AreaCopy, area2RegionCopy) obj = self.recalcObj(region2AreaCopy) self.swapArea(area, regionIn, region2AreaCopy, area2RegionCopy) if obj < self.objInfo: f = self.checkFeasibility(regionIn, area, self.region2Area) if f == 1: if self.numRegionsType == "Exogenous": self.neighSolutions[(area, region)] = obj elif self.numRegionsType == "EndogenousThreshold": if self.regionValue[region] >= self.regionalThreshold and self.regionValue[regionIn] >= self.regionalThreshold: self.neighSolutions[(area,region)] = obj def allCandidates(self): """ Select neighboring solutions. """ intraCopy = copy.deepcopy(self.intraBorderingAreas) region2AreaCopy = copy.deepcopy(self.region2Area) area2RegionCopy = copy.deepcopy(self.area2Region) self.neighSolutions = {} for area in intraCopy.keys(): regionIn = self.area2Region[area] regions4Move = list(self.intraBorderingAreas[area]) if (len(self.region2Area[regionIn]) > 1): for region in regions4Move: f = self.checkFeasibility(regionIn, area, self.region2Area) if f == 1: if self.numRegionsType == "Exogenous": self.swapArea(area, region, region2AreaCopy, area2RegionCopy) modifiedRegions = [region,regionIn] obj = self.recalcObj(region2AreaCopy, modifiedRegions) self.neighSolutions[(area,region)] = obj self.swapArea(area, regionIn, region2AreaCopy, area2RegionCopy) elif self.numRegionsType == "EndogenousThreshold": self.swapArea(area, region, region2AreaCopy, area2RegionCopy) if self.regionValue[region] >= self.regionalThreshold and self.regionValue[regionIn] >= self.regionalThreshold: obj = self.recalcObj(region2AreaCopy) self.neighSolutions[(area, region)] = obj self.swapArea(area, regionIn, region2AreaCopy, area2RegionCopy) def allMoves(self): """ Select all posible moves. """ moves = [] for area in self.intraBorderingAreas: regionIn = self.area2Region[area] regions4Move = list(self.intraBorderingAreas[area]) if len(self.region2Area[regionIn]) > 1: for region in regions4Move: moves = moves + [(area, region)] return moves def swapArea(self, area, newRegion, region2AreaDict, area2RegionDict): """ Removed an area from a region and appended it to another one """ oldRegion = area2RegionDict[area] region2AreaDict[oldRegion].remove(area) region2AreaDict[newRegion].append(area) area2RegionDict[area] = newRegion if self.objectiveFunctionType == "GWalt": a = self.areas[area] self.NRegion[newRegion] += a.data[0] self.NRegion[oldRegion] -= a.data[0] for index in range(1, len(a.data)): self.data[newRegion][index - 1] += a.data[index] * a.data[0] for index in range(1, len(a.data)): self.data[oldRegion][index-1] -= a.data[index] * a.data[0] if self.numRegionsType == "EndogenousThreshold": self.regionValue[newRegion] += self.areas[area].thresholdVar self.regionValue[oldRegion] -= self.areas[area].thresholdVar def greedyMove(self, typeGreedy="random"): """ Conduct a solution to the best posible with greedy moves """ flag = 1 self.round = 0 while flag: self.improvingCandidates() self.round = 1 if len(self.neighSolutions.keys()) == 0: flag = 0 else: if typeGreedy == "exact": sorted = sortedKeys(self.neighSolutions) move = sorted[numpy.random.randint(0, len(sorted))] area, region = move else: values = self.neighSolutions.values() sorted = sortedKeys(self.neighSolutions) minVal = min(self.neighSolutions.values()) indicesMin = indexMultiple(values, minVal) nInd = len(indicesMin) idx = range(nInd) numpy.random.shuffle(idx) minIndex = indicesMin[idx[0]] area,region = self.neighSolutions.keys()[minIndex] self.moveArea(area, region) # self.objInfo = minVal self.regions = self.returnRegions() def updateTabuList(self,newValue,aList,endInd): """ Add a new value to the tabu list. """ return [newValue] + aList[0:endInd-1] def tabuMove(self, tabuLength=5, convTabu=5, typeTabu="exact"): """ Conduct a solution to the best posible with tabu search """ aspireOBJ = self.objInfo currentOBJ = self.objInfo aspireRegions = self.returnRegions() region2AreaAspire = copy.deepcopy(self.region2Area) area2RegionAspire = copy.deepcopy(self.area2Region) currentRegions = aspireRegions bestAdmisable = 9999999.0 tabuList = numpy.zeros(tabuLength) tabuList = tabuList.tolist() cBreak = [] c = 1 self.round = 0 resList = [] epsilon = 1e-10 while c <= convTabu: # print "regions: ",self.returnRegions(), self.objInfo if typeTabu == "exact": self.objDict = objFunctions.makeObjDict(self) self.allCandidates() #print "soluciones vecinas",self.neighSolutions else: moves = self.allMoves() if (typeTabu == "exact" and len(self.neighSolutions) == 0) or (typeTabu == "random" and len(moves) == 0): c += convTabu else: if typeTabu == "exact": sorted = sortedKeys(self.neighSolutions) end = len(sorted) else: end = len(moves) run = 0 while run < end: if typeTabu == "exact": move = sorted[run] area,region = move obj4Move = self.neighSolutions[move] candidate = 1 # print "** selected move (area,region)",move else: candidate = 0 region2AreaCopy = copy.deepcopy(self.region2Area) area2RegionCopy = copy.deepcopy(self.area2Region) while (candidate == 0 and len(moves) > 0): move = moves[numpy.random.randint(0, len(moves))] moves.remove(move) area, region = move run += 1 regionIn = self.area2Region[area] f = self.checkFeasibility(regionIn, area, self.region2Area) if f == 1: if self.numRegionsType == "Exogenous": self.swapArea(area, region, region2AreaCopy, area2RegionCopy) obj4Move = self.recalcObj(region2AreaCopy) self.swapArea(area, regionIn, region2AreaCopy, area2RegionCopy) candidate = 1 elif self.numRegionsType == "EndogenousThreshold": self.swapArea(area, region, region2AreaCopy, area2RegionCopy) if self.regionValue[region] >= self.regionalThreshold and self.regionValue[regionIn] >= self.regionalThreshold: obj4Move = self.recalcObj(region2AreaCopy) candidate = 1 self.swapArea(area, regionIn, region2AreaCopy, area2RegionCopy) tabuCount = 0 if candidate == 1: # print "--- tabu List:", tabuList if move in tabuList: # print "move is in tabu list" if (aspireOBJ-obj4Move) > epsilon: # print "CASE1: improves aspirational: ",aspireOBJ,obj4Move oldRegion = self.area2Region[area] tabuList = self.updateTabuList((area,oldRegion), tabuList, tabuLength) self.moveArea(area, region) self.objInfo = obj4Move aspireOBJ = obj4Move currentOBJ = obj4Move aspireRegions = self.returnRegions() region2AreaAspire = copy.deepcopy(self.region2Area) area2RegionAspire = copy.deepcopy(self.area2Region) currentRegions = aspireRegions bestAdmisable = obj4Move cBreak.append(c) c = 1 run = end resList.append([obj4Move, aspireOBJ]) else: # print "CASE 2: does not improve aspirational: ",aspireOBJ,obj4Move run += 1 tabuCount += 1 tabuList = self.updateTabuList((-1, 0), tabuList, tabuLength) if tabuCount == end: c = convTabu else: # print "move is NOT in tabu list" if (aspireOBJ-obj4Move) > epsilon: # print "CASE 3: improves aspirational: ",aspireOBJ,obj4Move oldRegion = self.area2Region[area] tabuList = self.updateTabuList((area,oldRegion), tabuList, tabuLength) self.moveArea(area, region) self.objInfo = obj4Move aspireOBJ = obj4Move currentOBJ = obj4Move aspireRegions = self.returnRegions() region2AreaAspire = copy.deepcopy(self.region2Area) area2RegionAspire = copy.deepcopy(self.area2Region) currentRegions = aspireRegions bestAdmisable = obj4Move cBreak.append(c) c = 1 run = end resList.append( [obj4Move, aspireOBJ] ) else: # print "CASE 4: does not improve aspirational: ",aspireOBJ,obj4Move oldRegion = self.area2Region[area] tabuList = self.updateTabuList((area, oldRegion), tabuList, tabuLength) self.moveArea(area, region) self.objInfo = obj4Move currentOBJ = obj4Move currentRegions = self.returnRegions() bestAdmisable = obj4Move # cBreak.append(99) c += 1 run = end resList.append([obj4Move, aspireOBJ]) else: c += convTabu self.objInfo = aspireOBJ self.regions = aspireRegions self.region2Area = copy.deepcopy(region2AreaAspire) self.area2Region = copy.deepcopy(area2RegionAspire) # print "FINAL SOLUTION IN TABU",self.objInfo,self.regions self.resList = resList self.cBreak = cBreak def AZPImproving(self): """ """ improve = 1 while improve == 1: regions = range(0, self.pRegions) while len(regions) > 0: # step 3 if len(regions) > 1: randomRegion = numpy.random.randint(0, len(regions)) else: randomRegion = 0 region = regions[randomRegion] regions.remove(region) # step 4 borderingAreas = list(set(self.returnBorderingAreas(region)) & set(self.returnRegion2Area(region))) improve = 0 while len(borderingAreas) > 0: # step 5 randomArea = numpy.random.randint(0, len(borderingAreas)) area = borderingAreas[randomArea] borderingAreas.remove(area) posibleMove = list(self.returnIntraBorderingAreas()[area]) if len(self.region2Area[region]) >= 2: f = self.checkFeasibility(region, area, self.region2Area) else: f = 0 if f == 1: for move in posibleMove: self.swapArea(area, move, self.region2Area, self.area2Region) obj = self.recalcObj(self.region2Area) self.swapArea(area, region, self.region2Area, self.area2Region) if obj <= self.objInfo: self.moveArea(area, move) improve = 1 borderingAreas = list(set(self.returnBorderingAreas(region)) & set(self.returnRegion2Area(region))) break def AZPSA(self, alpha, temperature): """ Openshaw's Simulated Annealing for AZP algorithm """ totalMoves = 0 acceptedMoves = 0 bestOBJ = self.objInfo currentOBJ = self.objInfo bestRegions = self.returnRegions() currentRegions = self.returnRegions() region2AreaBest = copy.deepcopy(self.region2Area) area2RegionBest = copy.deepcopy(self.area2Region) improve = 1 while improve == 1: regions = range(0,self.pRegions) while len(regions) > 0: # step 3 if len(regions) > 1: randomRegion = numpy.random.randint(0, len(regions) - 1) else: randomRegion = 0 region = regions[randomRegion] regions.remove(region) # step 4 borderingAreas = list(set(self.returnBorderingAreas(region)) & set(self.returnRegion2Area(region))) improve = 0 while len(borderingAreas) > 0: # step 5 randomArea = numpy.random.randint(0,len(borderingAreas)) area = borderingAreas[randomArea] borderingAreas.remove(area) posibleMove = list(self.returnIntraBorderingAreas()[area]) if len(self.region2Area[region]) >= 2: f = self.checkFeasibility(region, area, self.region2Area) else: f = 0 if f == 1: for move in posibleMove: # if len(region2AreaCopy[area2RegionCopy[area]]) > 1: self.swapArea(area, move, self.region2Area, self.area2Region) obj = self.recalcObj(self.region2Area) self.swapArea(area, region, self.region2Area, self.area2Region) if obj <= bestOBJ: self.moveArea(area, move) improve = 1 self.objInfo = obj bestOBJ = obj currentOBJ = obj bestRegions = self.returnRegions() currentRegions = self.returnRegions() region2AreaBest = copy.deepcopy(self.region2Area) area2RegionBest = copy.deepcopy(self.area2Region) # print "--- Local improvement (area, region)", area, move # print "--- New Objective Function value: ", obj # step 4 borderingAreas = list(set(self.returnBorderingAreas(region)) & set(self.returnRegion2Area(region))) break else: random = numpy.random.rand(1)[0] totalMoves += 1 if (numpy.exp(-(obj - currentOBJ) / (currentOBJ * temperature))) > random: acceptedMoves += 1 self.moveArea(area, move) self.objInfo = obj currentOBJ = obj currentRegions = self.returnRegions() # print "--- NON-improving move (area, region)", area, move # print "--- New Objective Function value: ", obj # step 4 borderingAreas = list(set(self.returnBorderingAreas(region)) & set(self.returnRegion2Area(region))) break self.objInfo = bestOBJ self.region2Area = copy.deepcopy(region2AreaBest) self.area2Region = copy.deepcopy(area2RegionBest) def AZPTabuMove(self, tabuLength=5, convTabu=5): """ Tabu search algorithm for Openshaws AZP-tabu (1995) """ aspireOBJ = self.objInfo currentOBJ = self.objInfo aspireRegions = self.returnRegions() region2AreaAspire = copy.deepcopy(self.region2Area) area2RegionAspire = copy.deepcopy(self.area2Region) currentRegions = copy.deepcopy(aspireRegions) tabuList = numpy.zeros(tabuLength) tabuList = tabuList.tolist() cBreak = [] c = 1 self.round = 0 resList = [] epsilon = 1e-10 while c <= convTabu: self.objDict = objFunctions.makeObjDict(self) self.allCandidates() if len(self.neighSolutions) == 0: c += convTabu else: minFound = 0 neighSolutionsCopy = copy.deepcopy(self.neighSolutions) c += 1 neighNoTabuKeys = list(set(neighSolutionsCopy.keys()) - set(tabuList)) neighNoTabuDict = dict((key, neighSolutionsCopy[key]) for key in neighNoTabuKeys) if len(neighNoTabuDict) > 0: move = min(neighNoTabuDict, key = lambda x: neighNoTabuDict.get(x)) obj4Move = self.neighSolutions[move] moveNoTabu = move obj4MoveNoTabu = obj4Move if (currentOBJ - obj4Move) >= epsilon: minFound = 1 else: neighTabuKeys = list(set(neighSolutionsCopy.keys()) & set(tabuList)) neighTabuDict = dict((key, neighSolutionsCopy[key]) for key in neighTabuKeys) if len(neighTabuDict) > 0: move = min(neighTabuDict, key = lambda x: neighTabuDict.get(x)) obj4Move = self.neighSolutions[move] moveTabu = move obj4MoveTabu = obj4Move if (aspireOBJ - obj4Move) > epsilon: minFound = 1 if minFound == 1: area, region = move obj4Move = self.neighSolutions[move] oldRegion = self.area2Region[area] tabuList = self.updateTabuList((area, oldRegion), tabuList, tabuLength) self.moveArea(area, region) self.objInfo = obj4Move if (aspireOBJ - obj4Move) > epsilon: aspireOBJ = obj4Move aspireRegions = self.returnRegions() region2AreaAspire = copy.deepcopy(self.region2Area) area2RegionAspire = copy.deepcopy(self.area2Region) c = 1 currentOBJ = obj4Move currentRegions = self.returnRegions() else: move = moveNoTabu area, region = move obj4Move = self.neighSolutions[move] oldRegion = self.area2Region[area] tabuList = self.updateTabuList((area, oldRegion), tabuList, tabuLength) self.moveArea(area, region) self.objInfo = obj4Move currentOBJ = obj4Move currentRegions = self.returnRegions() self.objInfo = aspireOBJ self.regions = aspireRegions self.region2Area = copy.deepcopy(region2AreaAspire) self.area2Region = copy.deepcopy(area2RegionAspire) self.resList = resList def reactiveTabuMove(self, convTabu=99): """ AZP Openshaw's Reactive Tabu algorithm """ # step 2 tabuLength = 1 tabuList = numpy.zeros(tabuLength) tabuList = tabuList.tolist() rAvg = 1 K1 = 3 K2 = 3 visitedSolutions = [] allVisitedSolutions = {} self.round = 0 epsilon = 1e-10 aspireOBJ = self.objInfo aspireRegions = self.returnRegions() region2AreaAspire = copy.deepcopy(self.region2Area) area2RegionAspire = copy.deepcopy(self.area2Region) c = 1 while c <= convTabu: improved = 0 # step 3 self.objDict = objFunctions.makeObjDict(self) self.allCandidates() if len(self.neighSolutions) == 0: c += convTabu else: neighSolutionsCopy = copy.deepcopy(self.neighSolutions) neighNoTabuKeys = list(set(neighSolutionsCopy.keys()) - set(tabuList)) neighNoTabuDict = dict((key, neighSolutionsCopy[key]) for key in neighNoTabuKeys) # step 4 if len(neighNoTabuDict) > 0: move = min(neighNoTabuDict, key = lambda x: neighNoTabuDict.get(x)) obj4Move = self.neighSolutions[move] else: c += convTabu break; # step 5 area, region = move obj4Move = self.neighSolutions[move] oldRegion = self.area2Region[area] tabuList = self.updateTabuList((area, oldRegion), tabuList, tabuLength) self.moveArea(area, region) self.objInfo = obj4Move # update aspirational if (aspireOBJ - obj4Move) > epsilon: aspireOBJ = obj4Move aspireRegions = self.returnRegions() region2AreaAspire = copy.deepcopy(self.region2Area) area2RegionAspire = copy.deepcopy(self.area2Region) improved = 1 # step 6 currentSystem = self.returnRegions() nVisits = visitedSolutions.count(currentSystem) if nVisits == 0: # zoning system not found (go to step 10) # step 10 visitedSolutions.append(currentSystem) # step 7 elif nVisits > K1: try: nVisitsAll = allVisitedSolutions[currentSystem] except: nVisitsAll = 0 nVisitsAll =+ 1 allVisitedSolutions[currentSystem] = nVisitsAll if nVisitsAll >= K2: # go to step 11 # step 11a visitedSolutions = [] self.objDict = objFunctions.makeObjDict(self) self.allCandidates() moveIndex = range(len(self.neighSolutions)) numpy.random.suffle(moveIndex) for move in moveIndex[0:int(1 + 0.5 * rAvg)]: area, region = move obj4Move = self.neighSolutions[move] oldRegion = self.area2Region[area] tabuList = self.updateTabuList((area,oldRegion), tabuList, tabuLength) self.moveArea(area, region) obj4Move = self.neighSolutions[move] # update aspirational if (aspireOBJ-obj4Move) > epsilon: aspireOBJ = obj4Move aspireRegions = self.returnRegions() region2AreaAspire = copy.deepcopy(self.region2Area) area2RegionAspire = copy.deepcopy(self.area2Region) improved = 1 # step 8 elif nVisits < K1: rAvg += 1 tabuLength = 1.1*tabuLength # step 9 if tabuLength > rAvg: tabuLength = max(0.9 * tabuLength, 1) tabuLength = int(round(tabuLength)) # step 10 visitedSolutions.append(currentSystem) if improved == 1: c = 1 else: c += 1 self.objInfo = aspireOBJ self.regions = aspireRegions self.region2Area = copy.deepcopy(region2AreaAspire) self.area2Region = copy.deepcopy(area2RegionAspire) def moveArea(self, areaID, regionID): """ Move an area to a region """ oldRegion = self.area2Region[areaID] self.region2Area[oldRegion].remove(areaID) self.region2Area[regionID].append(areaID) self.area2Region[areaID] = regionID a = self.areas[areaID] toUpdate = [areaID] + a.neighs if self.objectiveFunctionType == "GWalt": self.NRegion[regionID] += a.data[0] self.NRegion[oldRegion] -= a.data[0] if self.numRegionsType == "EndogenousThreshold": self.regionValue[regionID] += self.areas[areaID].thresholdVar self.regionValue[oldRegion] -= self.areas[areaID].thresholdVar try: for index in range(1, len(a.data)): self.data[regionID][index - 1] += a.data[index] * a.data[0] for index in range(1, len(a.data)): self.data[oldRegion][index - 1] -= a.data[index] *a.data[0] except: pass for area in toUpdate: regionIn = self.area2Region[area] areasIdsIn = self.region2Area[regionIn] areasInNow = [self.areas[aID] for aID in areasIdsIn] areasInRegion = set(areasIdsIn) aNeighs = set(self.areas[area].neighs) neighsInOther = aNeighs - areasInRegion if len(neighsInOther) == 0 and area in self.intraBorderingAreas: self.intraBorderingAreas.pop(area) else: borderRegions = set([]) for neigh in neighsInOther: borderRegions = borderRegions | set([self.area2Region[neigh]]) if area in self.intraBorderingAreas: self.intraBorderingAreas.pop(area) self.intraBorderingAreas[area] = borderRegions self.calcObj() def recoverFromExtendedMemory(self, extendedMemory): """ Recover a solution form the extended memory """ self.objInfo = extendedMemory.objInfo self.area2Region = extendedMemory.area2Region self.region2Area = extendedMemory.region2Area self.intraBorderingAreas = extendedMemory.intraBorderingAreas def getSeeds(self): """ Return the seeds of the solution """ return self.seeds; def indexMultiple(x,value): """ Return indexes in x with multiple values. """ return [ i[0] for i in enumerate(x) if i[1] == value ] def calculateGetisG(keyList, dataMean, dataStd, dataDictionary, dataLength): """ This function returns the local G statistic a given region. """ sum = 0 for i in keyList: sum = sum + numpy.double((dataDictionary[i])) neighborNumber = len(keyList) numerator = sum - dataMean * neighborNumber denominator = dataStd * ((float(dataLength * neighborNumber - (neighborNumber ** 2)) / (dataLength - 1)) ** 0.5) # denominator = (dataStd*((dataLength*neighborNumber-(neighborNumber**2))/(dataLength-1))**0.5) G = numerator / denominator return G def quickSortIntersection(dataList, keyList, discardList): """ quickSortIntersection recursively sorts the list of values usinga quick sort algorithm. """ if len(keyList) <= 1: return keyList else: lessData = [] lessKey = [] moreData = [] moreKey = [] pivot = dataList[-1] kpivot = keyList[-1] for i in range(len(dataList) - 1): if keyList[i] not in discardList: if dataList[i] <= pivot: lessData.append(dataList[i]) lessKey.append(keyList[i]) else: moreData.append(dataList[i]) moreKey.append(keyList[i]) return quickSortIntersection(lessData, lessKey, discardList) + [kpivot] + quickSortIntersection(moreData, moreKey, discardList) def quickSort2(keys, y): """ quickSortIntersection recursively sorts the list of values using a quick sort algorithm. """ if len(keys) <= 1: return keys else: lessData = [] lessKey = [] moreData = [] moreKey = [] pivot = y[keys[-1]] kpivot = keys[-1] keys=keys[0: -1] for i in keys: if y[i] <= pivot: lessKey.append(i) else: moreKey.append(i) return quickSort2(lessKey, y) + [kpivot] + quickSort2(moreKey, y) def neighborSort(dictionary, discardList): """ Returns the list of keys of a dictionary sorted by the values that are assigned by them. """ dataList = dictionary.values() keyList = dictionary.keys() return quickSortIntersection(dataList, keyList, discardList) def vectorDistance(v1, v2): """ this function calculates de euclidean distance between two vectors. """ sum = 0 for i in range(len(v1)): sum += (v1[i] - v2[i]) ** 2 return sum ** 0.5 # INTERNOS def calculateCentroid(areaList): """ This function return the centroid of an area list """ pg = 0.0 pk = [] centroid = AreaCl(0, [], []) for area in areaList: pg += area.data[0] pk = pk + [area.data[0]] pkPg = numpy.matrix(pk).T / pg data = [0.0] * len(area.data) var = numpy.matrix(areaList[0].var) * 0.0 j = 0 for area in areaList: var += area.var * pow(pkPg[j, 0], 2) for i in range(len(area.data)): data[i] += area.data[i] * pkPg[j, 0] j += 1 centroid.data = data centroid.var = var return centroid def factorial(n): """ Returns the factorial of a number. """ fact = 1.0 if n > 1: fact = n * factorial(n - 1) return fact def comb(n, m): """ This function calculates the number of possible combinations of n items chosen by m. """ return factorial(n) / (factorial(m) * factorial(n - m)) def recode(X): """ Tranform a list with regions begining in x to a lis begining in 0. """ XP = X + [] assigned = [] r = 0 for i in range(len(X)): if (i not in assigned): XP[i] = r for j in range(len(X) - i - 1): k = i + j + 1 if (k not in assigned): if X[k] == X[i]: XP[k] = r assigned = assigned + [k] r = r + 1 return XP def sortedKeys(d): """ Return keys of the dictionary d sorted based on their values. """ values = d.values() sortedIndices = numpy.argsort(values) sortedKeys = [d.keys()[i] for i in sortedIndices] minVal = min(values) countMin = values.count(minVal) if countMin > 1: minIndices = sortedKeys[0: countMin] nInd = len(minIndices) idx = range(nInd) numpy.random.shuffle(idx) permMins = idx c = 0 for i in range(nInd): place = permMins[c] sortedKeys[c] = minIndices[place] c += 1 return sortedKeys def feasibleRegion(feasDict): """ Return if a list of areas are connected """ areas2Eval = [] areas = {} for key in feasDict.keys(): try: neighbours = feasDict[key] except: neighbours = {} a = AreaCl(key, neighbours, []) areas[key] = a areas2Eval = areas2Eval + [key] feasible = 1 newRegion = set([]) for area in areas2Eval: newRegion = newRegion | (set(areas[area].neighs) & set(areas2Eval)) if set(areas2Eval) - newRegion != set([]): feasible = 0 return feasible class AreaCl: """ Area Class for Regional Clustering. """ def __init__(self, id, neighs, data, variance="false"): """ @type id: integer @param id: Id of the polygon/area @type neighs: list @param neighs: Neighborhood ids @type data: list. @param data: Data releated to the area. @type variance: boolean @keyword variance: Boolean indicating if the data have variance matrix """ self.id = id self.neighs = neighs if variance == "false": self.data = data else: n = (numpy.sqrt(9 + 8 * (len(data) - 1)) - 3) / 2 self.var = numpy.matrix(numpy.identity(n)) index = n + 1 for i in range(int(n)): for j in range(i + 1): self.var[i, j] = data[int(index)] self.var[j, i] = data[int(index)] index += 1 self.data = data[0: int(n + 1)] def returnDistance2Area(self, otherArea, distanceType="EuclideanSquared", indexData=[]): """ Return the distance between the area and other area """ if len(indexData) == 0: indexData = range(len(self.data)) y0 = [] y1 = [] for index in indexData: y0 += [self.data[index]] y1 += [otherArea.data[index]] data = numpy.concatenate(([y0], [y1])) areaDistance = distanceFunctions.distMethods[distanceType](data) try: dist = areaDistance[0][0] except: dist = areaDistance[0] return dist class somManager(): """SOM Manager object """ def __init__(self, data, iters, outputLayer, alphaType, initialDistribution, BMUContiguity): """This class control all the SOM neural network structure. It's the repository of the output layer and the solution generator @type data: dictionary @param data: Input layer data @type iters: integer @param iters: Number of iterations @type outputLayer: Layer @param outputLayer: Output Layer object @type alphaType: string @param alphaType: Type of learning rate @type initialDistribution: string @param initialDistribution: Neural units initial distribution @type BMUContiguity: string @param BMUContiguity: Contiguity criterion """ self.alphaType = alphaType self.data = data nv = len(data[0]) self.iters = iters self.outputLayer = outputLayer # Initializing neural weights self.outputLayer.generateData(initialDistribution, 'rook', nv, 0, 1) dataNames = self.outputLayer.fieldNames[-1 * nv:] self.actualData = outputLayer.getVars(*dataNames) # initializing empty clusters self.emptyClusters = {} for i in range(len(self.outputLayer.areas)): self.emptyClusters[i] = [] # initializing feasibles BMU self.feasibleBMU = {} for i in self.data.keys(): self.feasibleBMU = outputLayer.Y.keys() # initializing contiguities if BMUContiguity == 'rook': self.outputContiguity = self.outputLayer.Wrook elif BMUContiguity == 'queen': self.outputContiguity = self.outputLayer.Wqueen elif BMUContiguity == 'custom': self.outputContiguity = self.outputLayer.Wcustom elif BMUContiguity == 'all': for i in self.data.Y.keys(): self.BMUContiguity[i] = self.data.Y.keys() else: raise NameError('Invalid contiguity Type') # defining areas order self.order = self.data.keys() self.solutionsInput = {} def __alpha(self, value): """ Decreasing scalar-valued function used to update the neural network weights on a specific itereations. """ if self.alphaType == 'linear': return (1 - float(value) / self.iters) elif self.alphaType == 'quadratic': return -1 * (float(value) / self.iters) ** 2 + 1 else: raise NameError('Invalid deacrising function type') def findBMU(self, areaId): """ Find the most similar neural weight, usally called on the literature such as Best Matching Unit (BMU) """ inputY = self.data[areaId] min = vectorDistance(inputY, self.actualData[self.feasibleBMU[0]]) bmu = 0 for i in self.feasibleBMU[1:]: dist = vectorDistance(inputY, self.actualData[i]) if dist < min: min = dist bmu = i return bmu def modifyUnits(self, bmu, areaId, iter): """ Updates the BMU neighborhod """ inputY = self.data[areaId] for i in self.outputContiguity[bmu] + [bmu]: dist = numpy.array(inputY) - numpy.array(self.actualData[i]) alph = self.__alpha(iter) self.actualData[i] = list(numpy.array(self.actualData[i]) \ + alph * dist) def addSolution(self, iter): """ Manage the solutions of each iteration """ solution = {} self.outputLayer.fieldNames += ['iter' + str(iter)] for i in self.clusters: self.outputLayer.Y[i] += [len(self.clusters[i])] for j in self.clusters[i]: if self.solutionsInput.has_key(j): self.solutionsInput[j] += [i] else: self.solutionsInput[j] = [i] solution[j] = i return solution.values() def compressSolution(self, solution): """ Standarize the not sorted solution. """ count = 0 order = list(set(solution)) order.sort() sol = [order.index(x) for x in solution] return sol class geoSomManager(somManager): """Geo-SOM Manager object """ def __init__(self,data, iters, outputLayer, alphaType, initialDistribution, BMUContiguity, iCentroids, oCentroids): """ This class control all the geoSOM neural network structure. Aditionally it's the repository of the output layer and the solution generator. @type data: dictionary @param data: Input layer data @type iters: integer @param iters: Number of iterations @type outputLayer: Layer @param outputLayer: Output Layer object @type alphaType: string @param alphaType: Type of learning rate @type initialDistribution: string @param initialDistribution: Neural units initial distribution @type BMUContiguity: string @param BMUContiguity: Contiguity criterion @type iCentroids: dictionary @param iCentroids: Centroid coordinates for the input Layer areas. @type oCentroids: dictionary @param oCentroids: Centroid coordinates for the output Layer areas. """ somManager.__init__(self,data, iters, outputLayer, alphaType, initialDistribution, BMUContiguity) self.iCentroids=iCentroids self.oCentroids=oCentroids self.geoWinner, self.feasibleBMU=self.defGeoWinnerAttributes() def defGeoWinnerAttributes(self): """ This function define de geoWinners for all the input areas """ geoWinner = {} feasibleBMU = {} for c in self.iCentroids: bestOIndex = 0 minDistance = vectorDistance(self.iCentroids[c], self.oCentroids[0]) outputContiguity = self.outputContiguity[0] for o in self.oCentroids: dis = vectorDistance(self.iCentroids[c], self.oCentroids[o]) if dis < minDistance: minDistance = dis bestOIndex = o outputContiguity = self.outputContiguity[o] + [o] geoWinner[c] = bestOIndex feasibleBMU[c] = outputContiguity return geoWinner, feasibleBMU def findBMU(self, areaId): """ Finds the most similar neural network weight, usally called on the literature such as Best Matching Unit (BMU) """ inputY = self.data[areaId] feasibleBMU = self.feasibleBMU[areaId] min = vectorDistance(inputY, self.actualData[feasibleBMU[0]]) bmu = feasibleBMU[0] for i in feasibleBMU: dist = vectorDistance(inputY, self.actualData[i]) if dist < min: min = dist bmu = i return bmu
codeparrot/github-code-clean
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- import warnings from operator import or_, itemgetter from copy import deepcopy from itertools import combinations from functools import reduce from collections import defaultdict import numpy as np from scipy.stats import pearsonr from skbio._base import SkbioObject from skbio.stats.distance import DistanceMatrix from ._exception import (NoLengthError, DuplicateNodeError, NoParentError, MissingNodeError, TreeError) from skbio.util import RepresentationWarning from skbio.util._decorator import experimental, classonlymethod def distance_from_r(m1, m2): r"""Estimates distance as (1-r)/2: neg correl = max distance Parameters ---------- m1 : DistanceMatrix a distance matrix to compare m2 : DistanceMatrix a distance matrix to compare Returns ------- float The distance between m1 and m2 """ return (1-pearsonr(m1.data.flat, m2.data.flat)[0])/2 class TreeNode(SkbioObject): r"""Representation of a node within a tree A `TreeNode` instance stores links to its parent and optional children nodes. In addition, the `TreeNode` can represent a `length` (e.g., a branch length) between itself and its parent. Within this object, the use of "children" and "descendants" is frequent in the documentation. A child is a direct descendant of a node, while descendants are all nodes that are below a given node (e.g., grand-children, etc). Parameters ---------- name : str or None A node can have a name. It is common for tips in particular to have names, for instance, in a phylogenetic tree where the tips correspond to species. length : float, int, or None Length of the branch connecting this node to its parent. Can represent ellapsed time, amount of mutations, or other measures of evolutionary distance. support : float, int, or None Support value of the branch connecting this node to its parent. Can be bootstrap value, posterior probability, or other metrics measuring the confidence or frequency of this branch. parent : TreeNode or None Connect this node to a parent children : list of TreeNode or None Connect this node to existing children """ default_write_format = 'newick' _exclude_from_copy = set(['parent', 'children', '_tip_cache', '_non_tip_cache']) @experimental(as_of="0.4.0") def __init__(self, name=None, length=None, support=None, parent=None, children=None): self.name = name self.length = length self.support = support self.parent = parent self._tip_cache = {} self._non_tip_cache = {} self._registered_caches = set() self.children = [] self.id = None if children is not None: self.extend(children) @experimental(as_of="0.4.0") def __repr__(self): r"""Returns summary of the tree Returns ------- str A summary of this node and all descendants Notes ----- This method returns the name of the node and a count of tips and the number of internal nodes in the tree Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c, d)root;"]) >>> repr(tree) '<TreeNode, name: root, internal node count: 1, tips count: 3>' """ nodes = [n for n in self.traverse(include_self=False)] n_tips = sum([n.is_tip() for n in nodes]) n_nontips = len(nodes) - n_tips classname = self.__class__.__name__ name = self.name if self.name is not None else "unnamed" return "<%s, name: %s, internal node count: %d, tips count: %d>" % \ (classname, name, n_nontips, n_tips) @experimental(as_of="0.4.0") def __str__(self): r"""Returns string version of self, with names and distances Returns ------- str Returns a Newick representation of the tree See Also -------- read write Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c);"]) >>> str(tree) '((a,b)c);\n' """ return str(''.join(self.write([]))) @experimental(as_of="0.4.0") def __iter__(self): r"""Node iter iterates over the `children`.""" return iter(self.children) @experimental(as_of="0.4.0") def __len__(self): return len(self.children) @experimental(as_of="0.4.0") def __getitem__(self, i): r"""Node delegates slicing to `children`.""" return self.children[i] @experimental(as_of="0.4.0") def _adopt(self, node): r"""Update `parent` references but does NOT update `children`.""" self.invalidate_caches() if node.parent is not None: node.parent.remove(node) node.parent = self return node @experimental(as_of="0.4.0") def append(self, node): r"""Appends a node to `children`, in-place, cleaning up refs `append` will invalidate any node lookup caches, remove an existing parent on `node` if one exists, set the parent of `node` to self and add the `node` to `self` `children`. Parameters ---------- node : TreeNode An existing TreeNode object See Also -------- extend Examples -------- >>> from skbio import TreeNode >>> root = TreeNode(name="root") >>> child1 = TreeNode(name="child1") >>> child2 = TreeNode(name="child2") >>> root.append(child1) >>> root.append(child2) >>> print(root) (child1,child2)root; <BLANKLINE> """ self.children.append(self._adopt(node)) @experimental(as_of="0.4.0") def extend(self, nodes): r"""Append a `list` of `TreeNode` to `self`. `extend` will invalidate any node lookup caches, remove existing parents of the `nodes` if they have any, set their parents to self and add the nodes to `self` `children`. Parameters ---------- nodes : list of TreeNode A list of TreeNode objects See Also -------- append Examples -------- >>> from skbio import TreeNode >>> root = TreeNode(name="root") >>> root.extend([TreeNode(name="child1"), TreeNode(name="child2")]) >>> print(root) (child1,child2)root; <BLANKLINE> """ self.children.extend([self._adopt(n) for n in nodes[:]]) @experimental(as_of="0.4.0") def pop(self, index=-1): r"""Remove a `TreeNode` from `self`. Remove a child node by its index position. All node lookup caches are invalidated, and the parent reference for the popped node will be set to `None`. Parameters ---------- index : int The index position in `children` to pop Returns ------- TreeNode The popped child See Also -------- remove remove_deleted Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["(a,b)c;"]) >>> print(tree.pop(0)) a; <BLANKLINE> """ return self._remove_node(index) def _remove_node(self, idx): r"""The actual (and only) method that performs node removal""" self.invalidate_caches() node = self.children.pop(idx) node.parent = None return node @experimental(as_of="0.4.0") def remove(self, node): r"""Remove a node from self Remove a `node` from `self` by identity of the node. Parameters ---------- node : TreeNode The node to remove from self's children Returns ------- bool `True` if the node was removed, `False` otherwise See Also -------- pop remove_deleted Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["(a,b)c;"]) >>> tree.remove(tree.children[0]) True """ for (i, curr_node) in enumerate(self.children): if curr_node is node: self._remove_node(i) return True return False @experimental(as_of="0.4.0") def remove_deleted(self, func): r"""Delete nodes in which `func(node)` evaluates `True`. Remove all descendants from `self` that evaluate `True` from `func`. This has the potential to drop clades. Parameters ---------- func : a function A function that evaluates `True` when a node should be deleted See Also -------- pop remove Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["(a,b)c;"]) >>> tree.remove_deleted(lambda x: x.name == 'b') >>> print(tree) (a)c; <BLANKLINE> """ for node in self.traverse(include_self=False): if func(node): node.parent.remove(node) @experimental(as_of="0.4.0") def prune(self): r"""Reconstructs correct topology after nodes have been removed. Internal nodes with only one child will be removed and new connections will be made to reflect change. This method is useful to call following node removals as it will clean up nodes with singular children. Names and properties of singular children will override the names and properties of their parents following the prune. Node lookup caches are invalidated. See Also -------- shear remove pop remove_deleted Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"]) >>> to_delete = tree.find('b') >>> tree.remove_deleted(lambda x: x == to_delete) >>> print(tree) ((a)c,(d,e)f)root; <BLANKLINE> >>> tree.prune() >>> print(tree) ((d,e)f,a)root; <BLANKLINE> """ # build up the list of nodes to remove so the topology is not altered # while traversing nodes_to_remove = [] for node in self.traverse(include_self=False): if len(node.children) == 1: nodes_to_remove.append(node) # clean up the single children nodes for node in nodes_to_remove: child = node.children[0] if child.length is None or node.length is None: child.length = child.length or node.length else: child.length += node.length if node.parent is None: continue node.parent.append(child) node.parent.remove(node) # if a single descendent from the root, the root adopts the childs # properties. we can't "delete" the root as that would be deleting # self. if len(self.children) == 1: node_to_copy = self.children[0] efc = self._exclude_from_copy for key in node_to_copy.__dict__: if key not in efc: self.__dict__[key] = deepcopy(node_to_copy.__dict__[key]) self.remove(node_to_copy) self.extend(node_to_copy.children) @experimental(as_of="0.4.0") def shear(self, names): """Lop off tips until the tree just has the desired tip names. Parameters ---------- names : Iterable of str The tip names on the tree to keep Returns ------- TreeNode The resulting tree Raises ------ ValueError If the names do not exist in the tree See Also -------- prune remove pop remove_deleted Examples -------- >>> from skbio import TreeNode >>> t = TreeNode.read(['((H:1,G:1):2,(R:0.5,M:0.7):3);']) >>> sheared = t.shear(['G', 'M']) >>> print(sheared) (G:3.0,M:3.7); <BLANKLINE> """ tcopy = self.deepcopy() all_tips = {n.name for n in tcopy.tips()} ids = set(names) if not ids.issubset(all_tips): raise ValueError("ids are not a subset of the tree.") marked = set() for tip in tcopy.tips(): if tip.name in ids: marked.add(tip) for anc in tip.ancestors(): if anc in marked: break else: marked.add(anc) for node in list(tcopy.traverse()): if node not in marked: node.parent.remove(node) tcopy.prune() return tcopy @experimental(as_of="0.4.0") def copy(self): r"""Returns a copy of self using an iterative approach Perform an iterative deepcopy of self. It is not assured that the copy of node attributes will be performed iteratively as that depends on the copy method of the types being copied Returns ------- TreeNode A new copy of self See Also -------- unrooted_deepcopy unrooted_copy Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"]) >>> tree_copy = tree.copy() >>> tree_nodes = set([id(n) for n in tree.traverse()]) >>> tree_copy_nodes = set([id(n) for n in tree_copy.traverse()]) >>> print(len(tree_nodes.intersection(tree_copy_nodes))) 0 """ def __copy_node(node_to_copy): r"""Helper method to copy a node""" # this is _possibly_ dangerous, we're assuming the node to copy is # of the same class as self, and has the same exclusion criteria. # however, it is potentially dangerous to mix TreeNode subclasses # within a tree, so... result = self.__class__() efc = self._exclude_from_copy for key in node_to_copy.__dict__: if key not in efc: result.__dict__[key] = deepcopy(node_to_copy.__dict__[key]) return result root = __copy_node(self) nodes_stack = [[root, self, len(self.children)]] while nodes_stack: # check the top node, any children left unvisited? top = nodes_stack[-1] new_top_node, old_top_node, unvisited_children = top if unvisited_children: top[2] -= 1 old_child = old_top_node.children[-unvisited_children] new_child = __copy_node(old_child) new_top_node.append(new_child) nodes_stack.append([new_child, old_child, len(old_child.children)]) else: # no unvisited children nodes_stack.pop() return root __copy__ = copy __deepcopy__ = deepcopy = copy @experimental(as_of="0.4.0") def unrooted_deepcopy(self, parent=None): r"""Walks the tree unrooted-style and returns a new copy Perform a deepcopy of self and return a new copy of the tree as an unrooted copy. This is useful for defining new roots of the tree as the `TreeNode`. This method calls `TreeNode.unrooted_copy` which is recursive. Parameters ---------- parent : TreeNode or None Used to avoid infinite loops when performing the unrooted traverse Returns ------- TreeNode A new copy of the tree See Also -------- copy unrooted_copy root_at Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,(b,c)d)e,(f,g)h)i;"]) >>> new_tree = tree.find('d').unrooted_deepcopy() >>> print(new_tree) (b,c,(a,((f,g)h)e)d)root; <BLANKLINE> """ root = self.root() root.assign_ids() new_tree = root.copy() new_tree.assign_ids() new_tree_self = new_tree.find_by_id(self.id) return new_tree_self.unrooted_copy(parent) @experimental(as_of="0.4.0") def unrooted_copy(self, parent=None): r"""Walks the tree unrooted-style and returns a copy Perform a copy of self and return a new copy of the tree as an unrooted copy. This is useful for defining new roots of the tree as the `TreeNode`. This method is recursive. Warning, this is _NOT_ a deepcopy Parameters ---------- parent : TreeNode or None Used to avoid infinite loops when performing the unrooted traverse Returns ------- TreeNode A new copy of the tree See Also -------- copy unrooted_deepcopy root_at Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,(b,c)d)e,(f,g)h)i;"]) >>> new_tree = tree.find('d').unrooted_copy() >>> print(new_tree) (b,c,(a,((f,g)h)e)d)root; <BLANKLINE> """ neighbors = self.neighbors(ignore=parent) children = [c.unrooted_copy(parent=self) for c in neighbors] # we might be walking UP the tree, so: if parent is None: # base edge edgename = None length = None elif parent.parent is self: # self's parent is becoming self's child edgename = parent.name length = parent.length else: assert parent is self.parent edgename = self.name length = self.length result = self.__class__(name=edgename, children=children, length=length) if parent is None: result.name = "root" return result @experimental(as_of="0.4.0") def count(self, tips=False): """Get the count of nodes in the tree Parameters ---------- tips : bool If `True`, only return the count of the number of tips Returns ------- int The number of nodes or tips Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,(b,c)d)e,(f,g)h)i;"]) >>> print(tree.count()) 9 >>> print(tree.count(tips=True)) 5 """ if tips: return len(list(self.tips())) else: return len(list(self.traverse(include_self=True))) @experimental(as_of="0.4.1") def observed_node_counts(self, tip_counts): """Returns counts of node observations from counts of tip observations Parameters ---------- tip_counts : dict of ints Counts of observations of tips. Keys correspond to tip names in ``self``, and counts are unsigned ints. Returns ------- dict Counts of observations of nodes. Keys correspond to node names (internal nodes or tips), and counts are unsigned ints. Raises ------ ValueError If a count less than one is observed. MissingNodeError If a count is provided for a tip not in the tree, or for an internal node. """ result = defaultdict(int) for tip_name, count in tip_counts.items(): if count < 1: raise ValueError("All tip counts must be greater than zero.") else: t = self.find(tip_name) if not t.is_tip(): raise MissingNodeError( "Counts can only be for tips in the tree. %s is an " "internal node." % t.name) result[t] += count for internal_node in t.ancestors(): result[internal_node] += count return result @experimental(as_of="0.4.0") def subtree(self, tip_list=None): r"""Make a copy of the subtree""" raise NotImplementedError() @experimental(as_of="0.4.0") def subset(self): r"""Returns set of names that descend from specified node Get the set of `name` on tips that descend from this node. Returns ------- frozenset The set of names at the tips of the clade that descends from self See Also -------- subsets compare_subsets Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,(b,c)d)e,(f,g)h)i;"]) >>> sorted(tree.subset()) ['a', 'b', 'c', 'f', 'g'] """ return frozenset({i.name for i in self.tips()}) @experimental(as_of="0.4.0") def subsets(self): r"""Return all sets of names that come from self and its descendants Compute all subsets of tip names over `self`, or, represent a tree as a set of nested sets. Returns ------- frozenset A frozenset of frozensets of str See Also -------- subset compare_subsets Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["(((a,b)c,(d,e)f)h)root;"]) >>> subsets = tree.subsets() >>> len(subsets) 3 """ sets = [] for i in self.postorder(include_self=False): if not i.children: i.__leaf_set = frozenset([i.name]) else: leaf_set = reduce(or_, [c.__leaf_set for c in i.children]) if len(leaf_set) > 1: sets.append(leaf_set) i.__leaf_set = leaf_set return frozenset(sets) @experimental(as_of="0.4.0") def root_at(self, node): r"""Return a new tree rooted at the provided node. This can be useful for drawing unrooted trees with an orientation that reflects knowledge of the true root location. Parameters ---------- node : TreeNode or str The node to root at Returns ------- TreeNode A new copy of the tree Raises ------ TreeError Raises a `TreeError` if a tip is specified as the new root See Also -------- root_at_midpoint unrooted_deepcopy Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["(((a,b)c,(d,e)f)g,h)i;"]) >>> print(tree.root_at('c')) (a,b,((d,e)f,(h)g)c)root; <BLANKLINE> """ if isinstance(node, str): node = self.find(node) if not node.children: raise TreeError("Can't use a tip (%s) as the root" % repr(node.name)) return node.unrooted_deepcopy() @experimental(as_of="0.4.0") def root_at_midpoint(self): r"""Return a new tree rooted at midpoint of the two tips farthest apart This method doesn't preserve the internal node naming or structure, but does keep tip to tip distances correct. Uses `unrooted_copy` but operates on a full copy of the tree. Raises ------ TreeError If a tip ends up being the mid point Returns ------- TreeNode A tree rooted at its midpoint LengthError Midpoint rooting requires `length` and will raise (indirectly) if evaluated nodes don't have length. See Also -------- root_at unrooted_deepcopy Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["(((d:1,e:1,(g:1)f:1)c:1)b:1,h:1)a:1;"]) >>> print(tree.root_at_midpoint()) ((d:1.0,e:1.0,(g:1.0)f:1.0)c:0.5,((h:1.0)b:1.0):0.5)root; <BLANKLINE> """ tree = self.copy() max_dist, tips = tree.get_max_distance() half_max_dist = max_dist / 2.0 if max_dist == 0.0: # only pathological cases with no lengths return tree tip1 = tree.find(tips[0]) tip2 = tree.find(tips[1]) lca = tree.lowest_common_ancestor([tip1, tip2]) if tip1.accumulate_to_ancestor(lca) > half_max_dist: climb_node = tip1 else: climb_node = tip2 dist_climbed = 0.0 while dist_climbed + climb_node.length < half_max_dist: dist_climbed += climb_node.length climb_node = climb_node.parent # now midpt is either at on the branch to climb_node's parent # or midpt is at climb_node's parent if dist_climbed + climb_node.length == half_max_dist: # climb to midpoint spot climb_node = climb_node.parent if climb_node.is_tip(): raise TreeError('error trying to root tree at tip') else: return climb_node.unrooted_copy() else: # make a new node on climb_node's branch to its parent old_br_len = climb_node.length new_root = tree.__class__() climb_node.parent.append(new_root) new_root.append(climb_node) climb_node.length = half_max_dist - dist_climbed new_root.length = old_br_len - climb_node.length return new_root.unrooted_copy() @experimental(as_of="0.4.0") def is_tip(self): r"""Returns `True` if the current node has no `children`. Returns ------- bool `True` if the node is a tip See Also -------- is_root has_children Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c);"]) >>> print(tree.is_tip()) False >>> print(tree.find('a').is_tip()) True """ return not self.children @experimental(as_of="0.4.0") def is_root(self): r"""Returns `True` if the current is a root, i.e. has no `parent`. Returns ------- bool `True` if the node is the root See Also -------- is_tip has_children Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c);"]) >>> print(tree.is_root()) True >>> print(tree.find('a').is_root()) False """ return self.parent is None @experimental(as_of="0.4.0") def has_children(self): r"""Returns `True` if the node has `children`. Returns ------- bool `True` if the node has children. See Also -------- is_tip is_root Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c);"]) >>> print(tree.has_children()) True >>> print(tree.find('a').has_children()) False """ return not self.is_tip() @experimental(as_of="0.4.0") def traverse(self, self_before=True, self_after=False, include_self=True): r"""Returns iterator over descendants This is a depth-first traversal. Since the trees are not binary, preorder and postorder traversals are possible, but inorder traversals would depend on the data in the tree and are not handled here. Parameters ---------- self_before : bool includes each node before its descendants if True self_after : bool includes each node after its descendants if True include_self : bool include the initial node if True `self_before` and `self_after` are independent. If neither is `True`, only terminal nodes will be returned. Note that if self is terminal, it will only be included once even if `self_before` and `self_after` are both `True`. Yields ------ TreeNode Traversed node. See Also -------- preorder postorder pre_and_postorder levelorder tips non_tips Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c);"]) >>> for node in tree.traverse(): ... print(node.name) None c a b """ if self_before: if self_after: return self.pre_and_postorder(include_self=include_self) else: return self.preorder(include_self=include_self) else: if self_after: return self.postorder(include_self=include_self) else: return self.tips(include_self=include_self) @experimental(as_of="0.4.0") def preorder(self, include_self=True): r"""Performs preorder iteration over tree Parameters ---------- include_self : bool include the initial node if True Yields ------ TreeNode Traversed node. See Also -------- traverse postorder pre_and_postorder levelorder tips non_tips Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c);"]) >>> for node in tree.preorder(): ... print(node.name) None c a b """ stack = [self] while stack: curr = stack.pop() if include_self or (curr is not self): yield curr if curr.children: stack.extend(curr.children[::-1]) @experimental(as_of="0.4.0") def postorder(self, include_self=True): r"""Performs postorder iteration over tree. This is somewhat inelegant compared to saving the node and its index on the stack, but is 30% faster in the average case and 3x faster in the worst case (for a comb tree). Parameters ---------- include_self : bool include the initial node if True Yields ------ TreeNode Traversed node. See Also -------- traverse preorder pre_and_postorder levelorder tips non_tips Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c);"]) >>> for node in tree.postorder(): ... print(node.name) a b c None """ child_index_stack = [0] curr = self curr_children = self.children curr_children_len = len(curr_children) while 1: curr_index = child_index_stack[-1] # if there are children left, process them if curr_index < curr_children_len: curr_child = curr_children[curr_index] # if the current child has children, go there if curr_child.children: child_index_stack.append(0) curr = curr_child curr_children = curr.children curr_children_len = len(curr_children) curr_index = 0 # otherwise, yield that child else: yield curr_child child_index_stack[-1] += 1 # if there are no children left, return self, and move to # self's parent else: if include_self or (curr is not self): yield curr if curr is self: break curr = curr.parent curr_children = curr.children curr_children_len = len(curr_children) child_index_stack.pop() child_index_stack[-1] += 1 @experimental(as_of="0.4.0") def pre_and_postorder(self, include_self=True): r"""Performs iteration over tree, visiting node before and after Parameters ---------- include_self : bool include the initial node if True Yields ------ TreeNode Traversed node. See Also -------- traverse postorder preorder levelorder tips non_tips Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c);"]) >>> for node in tree.pre_and_postorder(): ... print(node.name) None c a b c None """ # handle simple case first if not self.children: if include_self: yield self return child_index_stack = [0] curr = self curr_children = self.children while 1: curr_index = child_index_stack[-1] if not curr_index: if include_self or (curr is not self): yield curr # if there are children left, process them if curr_index < len(curr_children): curr_child = curr_children[curr_index] # if the current child has children, go there if curr_child.children: child_index_stack.append(0) curr = curr_child curr_children = curr.children curr_index = 0 # otherwise, yield that child else: yield curr_child child_index_stack[-1] += 1 # if there are no children left, return self, and move to # self's parent else: if include_self or (curr is not self): yield curr if curr is self: break curr = curr.parent curr_children = curr.children child_index_stack.pop() child_index_stack[-1] += 1 @experimental(as_of="0.4.0") def levelorder(self, include_self=True): r"""Performs levelorder iteration over tree Parameters ---------- include_self : bool include the initial node if True Yields ------ TreeNode Traversed node. See Also -------- traverse postorder preorder pre_and_postorder tips non_tips Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f);"]) >>> for node in tree.levelorder(): ... print(node.name) None c f a b d e """ queue = [self] while queue: curr = queue.pop(0) if include_self or (curr is not self): yield curr if curr.children: queue.extend(curr.children) @experimental(as_of="0.4.0") def tips(self, include_self=False): r"""Iterates over tips descended from `self`. Node order is consistent between calls and is ordered by a postorder traversal of the tree. Parameters ---------- include_self : bool include the initial node if True Yields ------ TreeNode Traversed node. See Also -------- traverse postorder preorder pre_and_postorder levelorder non_tips Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f);"]) >>> for node in tree.tips(): ... print(node.name) a b d e """ for n in self.postorder(include_self=include_self): if n.is_tip(): yield n @experimental(as_of="0.4.0") def non_tips(self, include_self=False): r"""Iterates over nontips descended from self `include_self`, if `True` (default is False), will return the current node as part of non_tips if it is a non_tip. Node order is consistent between calls and is ordered by a postorder traversal of the tree. Parameters ---------- include_self : bool include the initial node if True Yields ------ TreeNode Traversed node. See Also -------- traverse postorder preorder pre_and_postorder levelorder tips Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f);"]) >>> for node in tree.non_tips(): ... print(node.name) c f """ for n in self.postorder(include_self): if not n.is_tip(): yield n @experimental(as_of="0.4.0") def invalidate_caches(self, attr=True): r"""Delete lookup and attribute caches Parameters ---------- attr : bool, optional If ``True``, invalidate attribute caches created by `TreeNode.cache_attr`. See Also -------- create_caches cache_attr find """ if not self.is_root(): self.root().invalidate_caches() else: self._tip_cache = {} self._non_tip_cache = {} if self._registered_caches and attr: for n in self.traverse(): for cache in self._registered_caches: if hasattr(n, cache): delattr(n, cache) @experimental(as_of="0.4.0") def create_caches(self): r"""Construct an internal lookups to facilitate searching by name This method will not cache nodes in which the .name is None. This method will raise `DuplicateNodeError` if a name conflict in the tips is discovered, but will not raise if on internal nodes. This is because, in practice, the tips of a tree are required to be unique while no such requirement holds for internal nodes. Raises ------ DuplicateNodeError The tip cache requires that names are unique (with the exception of names that are None) See Also -------- invalidate_caches cache_attr find """ if not self.is_root(): self.root().create_caches() else: if self._tip_cache and self._non_tip_cache: return self.invalidate_caches(attr=False) tip_cache = {} non_tip_cache = defaultdict(list) for node in self.postorder(): name = node.name if name is None: continue if node.is_tip(): if name in tip_cache: raise DuplicateNodeError("Tip with name '%s' already " "exists." % name) tip_cache[name] = node else: non_tip_cache[name].append(node) self._tip_cache = tip_cache self._non_tip_cache = non_tip_cache @experimental(as_of="0.4.0") def find_all(self, name): r"""Find all nodes that match `name` The first call to `find_all` will cache all nodes in the tree on the assumption that additional calls to `find_all` will be made. Parameters ---------- name : TreeNode or str The name or node to find. If `name` is `TreeNode` then all other nodes with the same name will be returned. Raises ------ MissingNodeError Raises if the node to be searched for is not found Returns ------- list of TreeNode The nodes found See Also -------- find find_by_id find_by_func Examples -------- >>> from skbio.tree import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)d,(f,g)c);"]) >>> for node in tree.find_all('c'): ... print(node.name, node.children[0].name, node.children[1].name) c a b c f g >>> for node in tree.find_all('d'): ... print(node.name, str(node)) d (d,e)d; <BLANKLINE> d d; <BLANKLINE> """ root = self.root() # if what is being passed in looks like a node, just return it if isinstance(name, root.__class__): return [name] root.create_caches() tip = root._tip_cache.get(name, None) nodes = root._non_tip_cache.get(name, []) nodes.append(tip) if tip is not None else None if not nodes: raise MissingNodeError("Node %s is not in self" % name) else: return nodes @experimental(as_of="0.4.0") def find(self, name): r"""Find a node by `name`. The first call to `find` will cache all nodes in the tree on the assumption that additional calls to `find` will be made. `find` will first attempt to find the node in the tips. If it cannot find a corresponding tip, then it will search through the internal nodes of the tree. In practice, phylogenetic trees and other common trees in biology do not have unique internal node names. As a result, this find method will only return the first occurance of an internal node encountered on a postorder traversal of the tree. Parameters ---------- name : TreeNode or str The name or node to find. If `name` is `TreeNode` then it is simply returned Raises ------ MissingNodeError Raises if the node to be searched for is not found Returns ------- TreeNode The found node See Also -------- find_all find_by_id find_by_func Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f);"]) >>> print(tree.find('c').name) c """ root = self.root() # if what is being passed in looks like a node, just return it if isinstance(name, root.__class__): return name root.create_caches() node = root._tip_cache.get(name, None) if node is None: node = root._non_tip_cache.get(name, [None])[0] if node is None: raise MissingNodeError("Node %s is not in self" % name) else: return node @experimental(as_of="0.4.0") def find_by_id(self, node_id): r"""Find a node by `id`. This search method is based from the root. Parameters ---------- node_id : int The `id` of a node in the tree Returns ------- TreeNode The tree node with the matching id Notes ----- This method does not cache id associations. A full traversal of the tree is performed to find a node by an id on every call. Raises ------ MissingNodeError This method will raise if the `id` cannot be found See Also -------- find find_all find_by_func Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f);"]) >>> print(tree.find_by_id(2).name) d """ # if this method gets used frequently, then we should cache by ID # as well root = self.root() root.assign_ids() node = None for n in self.traverse(include_self=True): if n.id == node_id: node = n break if node is None: raise MissingNodeError("ID %d is not in self" % node_id) else: return node @experimental(as_of="0.4.0") def find_by_func(self, func): r"""Find all nodes given a function This search method is based on the current subtree, not the root. Parameters ---------- func : a function A function that accepts a TreeNode and returns `True` or `False`, where `True` indicates the node is to be yielded Yields ------ TreeNode Node found by `func`. See Also -------- find find_all find_by_id Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f);"]) >>> func = lambda x: x.parent == tree.find('c') >>> [n.name for n in tree.find_by_func(func)] ['a', 'b'] """ for node in self.traverse(include_self=True): if func(node): yield node @experimental(as_of="0.4.0") def ancestors(self): r"""Returns all ancestors back to the root This call will return all nodes in the path back to root, but does not include the node instance that the call was made from. Returns ------- list of TreeNode The path, toward the root, from self Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"]) >>> [node.name for node in tree.find('a').ancestors()] ['c', 'root'] """ result = [] curr = self while not curr.is_root(): result.append(curr.parent) curr = curr.parent return result @experimental(as_of="0.4.0") def root(self): r"""Returns root of the tree `self` is in Returns ------- TreeNode The root of the tree Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"]) >>> tip_a = tree.find('a') >>> root = tip_a.root() >>> root == tree True """ curr = self while not curr.is_root(): curr = curr.parent return curr @experimental(as_of="0.4.0") def siblings(self): r"""Returns all nodes that are `children` of `self` `parent`. This call excludes `self` from the list. Returns ------- list of TreeNode The list of sibling nodes relative to self See Also -------- neighbors Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e,f)g)root;"]) >>> tip_e = tree.find('e') >>> [n.name for n in tip_e.siblings()] ['d', 'f'] """ if self.is_root(): return [] result = self.parent.children[:] result.remove(self) return result @experimental(as_of="0.4.0") def neighbors(self, ignore=None): r"""Returns all nodes that are connected to self This call does not include `self` in the result Parameters ---------- ignore : TreeNode A node to ignore Returns ------- list of TreeNode The list of all nodes that are connected to self Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"]) >>> node_c = tree.find('c') >>> [n.name for n in node_c.neighbors()] ['a', 'b', 'root'] """ nodes = [n for n in self.children + [self.parent] if n is not None] if ignore is None: return nodes else: return [n for n in nodes if n is not ignore] @experimental(as_of="0.4.0") def lowest_common_ancestor(self, tipnames): r"""Lowest common ancestor for a list of tips Parameters ---------- tipnames : list of TreeNode or str The nodes of interest Returns ------- TreeNode The lowest common ancestor of the passed in nodes Raises ------ ValueError If no tips could be found in the tree, or if not all tips were found. Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"]) >>> nodes = [tree.find('a'), tree.find('b')] >>> lca = tree.lowest_common_ancestor(nodes) >>> print(lca.name) c >>> nodes = [tree.find('a'), tree.find('e')] >>> lca = tree.lca(nodes) # lca is an alias for convience >>> print(lca.name) root """ if len(tipnames) == 1: return self.find(tipnames[0]) tips = [self.find(name) for name in tipnames] if len(tips) == 0: raise ValueError("No tips found.") nodes_to_scrub = [] for t in tips: if t.is_root(): # has to be the LCA... return t prev = t curr = t.parent while curr and not hasattr(curr, 'black'): setattr(curr, 'black', [prev]) nodes_to_scrub.append(curr) prev = curr curr = curr.parent # increase black count, multiple children lead to here if curr: curr.black.append(prev) curr = self while len(curr.black) == 1: curr = curr.black[0] # clean up tree for n in nodes_to_scrub: delattr(n, 'black') return curr lca = lowest_common_ancestor # for convenience @classonlymethod @experimental(as_of="0.4.0") def from_taxonomy(cls, lineage_map): r"""Construct a tree from a taxonomy Parameters ---------- lineage_map : iterable of tuple A id to lineage mapping where the first index is an ID and the second index is an iterable of the lineage. Returns ------- TreeNode The constructed taxonomy Examples -------- >>> from skbio.tree import TreeNode >>> lineages = [ ... ('1', ['Bacteria', 'Firmicutes', 'Clostridia']), ... ('2', ['Bacteria', 'Firmicutes', 'Bacilli']), ... ('3', ['Bacteria', 'Bacteroidetes', 'Sphingobacteria']), ... ('4', ['Archaea', 'Euryarchaeota', 'Thermoplasmata']), ... ('5', ['Archaea', 'Euryarchaeota', 'Thermoplasmata']), ... ('6', ['Archaea', 'Euryarchaeota', 'Halobacteria']), ... ('7', ['Archaea', 'Euryarchaeota', 'Halobacteria']), ... ('8', ['Bacteria', 'Bacteroidetes', 'Sphingobacteria']), ... ('9', ['Bacteria', 'Bacteroidetes', 'Cytophagia'])] >>> tree = TreeNode.from_taxonomy(lineages) >>> print(tree.ascii_art()) /Clostridia-1 /Firmicutes | \Bacilli- /-2 /Bacteria| | | /-3 | | /Sphingobacteria | \Bacteroidetes \-8 | | ---------| \Cytophagia-9 | | /-4 | /Thermoplasmata | | \-5 \Archaea- /Euryarchaeota | /-6 \Halobacteria \-7 """ root = cls(name=None) root._lookup = {} for id_, lineage in lineage_map: cur_node = root # for each name, see if we've seen it, if not, add that puppy on for name in lineage: if name in cur_node._lookup: cur_node = cur_node._lookup[name] else: new_node = cls(name=name) new_node._lookup = {} cur_node._lookup[name] = new_node cur_node.append(new_node) cur_node = new_node cur_node.append(cls(name=id_)) # scrub the lookups for node in root.non_tips(include_self=True): del node._lookup return root def _balanced_distance_to_tip(self): """Return the distance to tip from this node. The distance to every tip from this node must be equal for this to return a correct result. Returns ------- int The distance to tip of a length-balanced tree """ node = self distance = 0 while node.has_children(): distance += node.children[0].length node = node.children[0] return distance @classonlymethod @experimental(as_of="0.4.0") def from_linkage_matrix(cls, linkage_matrix, id_list): """Return tree from SciPy linkage matrix. Parameters ---------- linkage_matrix : ndarray A SciPy linkage matrix as returned by `scipy.cluster.hierarchy.linkage` id_list : list The indices of the `id_list` will be used in the linkage_matrix Returns ------- TreeNode An unrooted bifurcated tree See Also -------- scipy.cluster.hierarchy.linkage """ tip_width = len(id_list) cluster_count = len(linkage_matrix) lookup_len = cluster_count + tip_width node_lookup = np.empty(lookup_len, dtype=cls) for i, name in enumerate(id_list): node_lookup[i] = cls(name=name) for i in range(tip_width, lookup_len): node_lookup[i] = cls() newest_cluster_index = cluster_count + 1 for link in linkage_matrix: child_a = node_lookup[int(link[0])] child_b = node_lookup[int(link[1])] path_length = link[2] / 2 child_a.length = path_length - child_a._balanced_distance_to_tip() child_b.length = path_length - child_b._balanced_distance_to_tip() new_cluster = node_lookup[newest_cluster_index] new_cluster.append(child_a) new_cluster.append(child_b) newest_cluster_index += 1 return node_lookup[-1] @experimental(as_of="0.4.0") def to_taxonomy(self, allow_empty=False, filter_f=None): """Returns a taxonomy representation of self Parameters ---------- allow_empty : bool, optional Allow gaps the taxonomy (e.g., internal nodes without names). filter_f : function, optional Specify a filtering function that returns True if the lineage is to be returned. This function must accept a ``TreeNode`` as its first parameter, and a ``list`` that represents the lineage as the second parameter. Yields ------ tuple ``(tip, [lineage])`` where ``tip`` corresponds to a tip in the tree and ``[lineage]`` is the expanded names from root to tip. ``None`` and empty strings are omitted from the lineage. Notes ----- If ``allow_empty`` is ``True`` and the root node does not have a name, then that name will not be included. This is because it is common to have multiple domains represented in the taxonomy, which would result in a root node that does not have a name and does not make sense to represent in the output. Examples -------- >>> from skbio.tree import TreeNode >>> lineages = {'1': ['Bacteria', 'Firmicutes', 'Clostridia'], ... '2': ['Bacteria', 'Firmicutes', 'Bacilli'], ... '3': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'], ... '4': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'], ... '5': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'], ... '6': ['Archaea', 'Euryarchaeota', 'Halobacteria'], ... '7': ['Archaea', 'Euryarchaeota', 'Halobacteria'], ... '8': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'], ... '9': ['Bacteria', 'Bacteroidetes', 'Cytophagia']} >>> tree = TreeNode.from_taxonomy(lineages.items()) >>> lineages = sorted([(n.name, l) for n, l in tree.to_taxonomy()]) >>> for name, lineage in lineages: ... print(name, '; '.join(lineage)) 1 Bacteria; Firmicutes; Clostridia 2 Bacteria; Firmicutes; Bacilli 3 Bacteria; Bacteroidetes; Sphingobacteria 4 Archaea; Euryarchaeota; Thermoplasmata 5 Archaea; Euryarchaeota; Thermoplasmata 6 Archaea; Euryarchaeota; Halobacteria 7 Archaea; Euryarchaeota; Halobacteria 8 Bacteria; Bacteroidetes; Sphingobacteria 9 Bacteria; Bacteroidetes; Cytophagia """ if filter_f is None: def filter_f(a, b): return True self.assign_ids() seen = set() lineage = [] # visit internal nodes while traversing out to the tips, and on the # way back up for node in self.traverse(self_before=True, self_after=True): if node.is_tip(): if filter_f(node, lineage): yield (node, lineage[:]) else: if allow_empty: if node.is_root() and not node.name: continue else: if not node.name: continue if node.id in seen: lineage.pop(-1) else: lineage.append(node.name) seen.add(node.id) @experimental(as_of="0.4.0") def to_array(self, attrs=None, nan_length_value=None): """Return an array representation of self Parameters ---------- attrs : list of tuple or None The attributes and types to return. The expected form is [(attribute_name, type)]. If `None`, then `name`, `length`, and `id` are returned. nan_length_value : float, optional If provided, replaces any `nan` in the branch length vector (i.e., ``result['length']``) with this value. `nan` branch lengths can arise from an edge not having a length (common for the root node parent edge), which can making summing problematic. Returns ------- dict of array {id_index: {id: TreeNode}, child_index: ((node_id, left_child_id, right_child_id)), attr_1: array(...), ... attr_N: array(...)} Notes ----- Attribute arrays are in index order such that TreeNode.id can be used as a lookup into the array. Examples -------- >>> from pprint import pprint >>> from skbio import TreeNode >>> t = TreeNode.read(['(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7);']) >>> res = t.to_array() >>> sorted(res.keys()) ['child_index', 'id', 'id_index', 'length', 'name'] >>> res['child_index'] array([[4, 0, 2], [5, 3, 3], [6, 4, 5], [7, 6, 6]]) >>> for k, v in res['id_index'].items(): ... print(k, v) ... 0 a:1.0; <BLANKLINE> 1 b:2.0; <BLANKLINE> 2 c:3.0; <BLANKLINE> 3 d:5.0; <BLANKLINE> 4 (a:1.0,b:2.0,c:3.0)x:4.0; <BLANKLINE> 5 (d:5.0)y:6.0; <BLANKLINE> 6 ((a:1.0,b:2.0,c:3.0)x:4.0,(d:5.0)y:6.0)z:7.0; <BLANKLINE> 7 (((a:1.0,b:2.0,c:3.0)x:4.0,(d:5.0)y:6.0)z:7.0); <BLANKLINE> >>> res['id'] array([0, 1, 2, 3, 4, 5, 6, 7]) >>> res['name'] array(['a', 'b', 'c', 'd', 'x', 'y', 'z', None], dtype=object) """ if attrs is None: attrs = [('name', object), ('length', float), ('id', int)] else: for attr, dtype in attrs: if not hasattr(self, attr): raise AttributeError("Invalid attribute '%s'." % attr) id_index, child_index = self.index_tree() n = self.id + 1 # assign_ids starts at 0 tmp = [np.zeros(n, dtype=dtype) for attr, dtype in attrs] for node in self.traverse(include_self=True): n_id = node.id for idx, (attr, dtype) in enumerate(attrs): tmp[idx][n_id] = getattr(node, attr) results = {'id_index': id_index, 'child_index': child_index} results.update({attr: arr for (attr, dtype), arr in zip(attrs, tmp)}) if nan_length_value is not None: length_v = results['length'] length_v[np.isnan(length_v)] = nan_length_value return results def _ascii_art(self, char1='-', show_internal=True, compact=False): LEN = 10 PAD = ' ' * LEN PA = ' ' * (LEN - 1) namestr = self._node_label() if self.children: mids = [] result = [] for c in self.children: if c is self.children[0]: char2 = '/' elif c is self.children[-1]: char2 = '\\' else: char2 = '-' (clines, mid) = c._ascii_art(char2, show_internal, compact) mids.append(mid + len(result)) result.extend(clines) if not compact: result.append('') if not compact: result.pop() (lo, hi, end) = (mids[0], mids[-1], len(result)) prefixes = [PAD] * (lo + 1) + [PA + '|'] * \ (hi - lo - 1) + [PAD] * (end - hi) mid = np.int(np.trunc((lo + hi) / 2)) prefixes[mid] = char1 + '-' * (LEN - 2) + prefixes[mid][-1] result = [p + l for (p, l) in zip(prefixes, result)] if show_internal: stem = result[mid] result[mid] = stem[0] + namestr + stem[len(namestr) + 1:] return (result, mid) else: return ([char1 + '-' + namestr], 0) @experimental(as_of="0.4.0") def ascii_art(self, show_internal=True, compact=False): r"""Returns a string containing an ascii drawing of the tree Note, this method calls a private recursive function and is not safe for large trees. Parameters ---------- show_internal : bool includes internal edge names compact : bool use exactly one line per tip Returns ------- str an ASCII formatted version of the tree Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b)c,(d,e)f)root;"]) >>> print(tree.ascii_art()) /-a /c-------| | \-b -root----| | /-d \f-------| \-e """ (lines, mid) = self._ascii_art(show_internal=show_internal, compact=compact) return '\n'.join(lines) @experimental(as_of="0.4.0") def accumulate_to_ancestor(self, ancestor): r"""Return the sum of the distance between self and ancestor Parameters ---------- ancestor : TreeNode The node of the ancestor to accumulate distance too Returns ------- float The sum of lengths between self and ancestor Raises ------ NoParentError A NoParentError is raised if the ancestor is not an ancestor of self NoLengthError A NoLengthError is raised if one of the nodes between self and ancestor (including self) lacks a `length` attribute See Also -------- distance Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a:1,b:2)c:3,(d:4,e:5)f:6)root;"]) >>> root = tree >>> tree.find('a').accumulate_to_ancestor(root) 4.0 """ accum = 0.0 curr = self while curr is not ancestor: if curr.is_root(): raise NoParentError("Provided ancestor is not in the path") if curr.length is None: raise NoLengthError("No length on node %s found." % curr.name or "unnamed") accum += curr.length curr = curr.parent return accum @experimental(as_of="0.4.0") def distance(self, other): """Return the distance between self and other This method can be used to compute the distances between two tips, however, it is not optimized for computing pairwise tip distances. Parameters ---------- other : TreeNode The node to compute a distance to Returns ------- float The distance between two nodes Raises ------ NoLengthError A NoLengthError will be raised if a node without `length` is encountered See Also -------- tip_tip_distances accumulate_to_ancestor compare_tip_distances get_max_distance Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a:1,b:2)c:3,(d:4,e:5)f:6)root;"]) >>> tip_a = tree.find('a') >>> tip_d = tree.find('d') >>> tip_a.distance(tip_d) 14.0 """ if self is other: return 0.0 self_ancestors = [self] + list(self.ancestors()) other_ancestors = [other] + list(other.ancestors()) if self in other_ancestors: return other.accumulate_to_ancestor(self) elif other in self_ancestors: return self.accumulate_to_ancestor(other) else: root = self.root() lca = root.lowest_common_ancestor([self, other]) accum = self.accumulate_to_ancestor(lca) accum += other.accumulate_to_ancestor(lca) return accum def _set_max_distance(self): """Propagate tip distance information up the tree This method was originally implemented by Julia Goodrich with the intent of being able to determine max tip to tip distances between nodes on large trees efficiently. The code has been modified to track the specific tips the distance is between """ maxkey = itemgetter(0) for n in self.postorder(): if n.is_tip(): n.MaxDistTips = ((0.0, n), (0.0, n)) else: if len(n.children) == 1: raise TreeError("No support for single descedent nodes") else: tip_info = [(max(c.MaxDistTips, key=maxkey), c) for c in n.children] dists = [i[0][0] for i in tip_info] best_idx = np.argsort(dists)[-2:] (tip_a_d, tip_a), child_a = tip_info[best_idx[0]] (tip_b_d, tip_b), child_b = tip_info[best_idx[1]] tip_a_d += child_a.length or 0.0 tip_b_d += child_b.length or 0.0 n.MaxDistTips = ((tip_a_d, tip_a), (tip_b_d, tip_b)) def _get_max_distance_singledesc(self): """returns the max distance between any pair of tips Also returns the tip names that it is between as a tuple""" distmtx = self.tip_tip_distances() idx_max = divmod(distmtx.data.argmax(), distmtx.shape[1]) max_pair = (distmtx.ids[idx_max[0]], distmtx.ids[idx_max[1]]) return distmtx[idx_max], max_pair @experimental(as_of="0.4.0") def get_max_distance(self): """Returns the max tip tip distance between any pair of tips Returns ------- float The distance between the two most distant tips in the tree tuple of TreeNode The two most distant tips in the tree Raises ------ NoLengthError A NoLengthError will be thrown if a node without length is encountered See Also -------- distance tip_tip_distances compare_tip_distances Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a:1,b:2)c:3,(d:4,e:5)f:6)root;"]) >>> dist, tips = tree.get_max_distance() >>> dist 16.0 >>> [n.name for n in tips] ['b', 'e'] """ if not hasattr(self, 'MaxDistTips'): # _set_max_distance will throw a TreeError if a node with a single # child is encountered try: self._set_max_distance() except TreeError: # return self._get_max_distance_singledesc() longest = 0.0 tips = [None, None] for n in self.non_tips(include_self=True): tip_a, tip_b = n.MaxDistTips dist = (tip_a[0] + tip_b[0]) if dist > longest: longest = dist tips = [tip_a[1], tip_b[1]] return longest, tips @experimental(as_of="0.4.0") def tip_tip_distances(self, endpoints=None): """Returns distance matrix between pairs of tips, and a tip order. By default, all pairwise distances are calculated in the tree. If `endpoints` are specified, then only the distances between those tips are computed. Parameters ---------- endpoints : list of TreeNode or str, or None A list of TreeNode objects or names of TreeNode objects Returns ------- DistanceMatrix The distance matrix Raises ------ ValueError If any of the specified `endpoints` are not tips See Also -------- distance compare_tip_distances Notes ----- If a node does not have an associated length, 0.0 will be used and a ``RepresentationWarning`` will be raised. Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a:1,b:2)c:3,(d:4,e:5)f:6)root;"]) >>> mat = tree.tip_tip_distances() >>> print(mat) 4x4 distance matrix IDs: 'a', 'b', 'd', 'e' Data: [[ 0. 3. 14. 15.] [ 3. 0. 15. 16.] [ 14. 15. 0. 9.] [ 15. 16. 9. 0.]] """ all_tips = list(self.tips()) if endpoints is None: tip_order = all_tips else: tip_order = [self.find(n) for n in endpoints] for n in tip_order: if not n.is_tip(): raise ValueError("Node with name '%s' is not a tip." % n.name) # linearize all tips in postorder # .__start, .__stop compose the slice in tip_order. for i, node in enumerate(all_tips): node.__start, node.__stop = i, i + 1 # the result map provides index in the result matrix result_map = {n.__start: i for i, n in enumerate(tip_order)} num_all_tips = len(all_tips) # total number of tips num_tips = len(tip_order) # total number of tips in result result = np.zeros((num_tips, num_tips), float) # tip by tip matrix distances = np.zeros((num_all_tips), float) # dist from tip to tip def update_result(): # set tip_tip distance between tips of different child for child1, child2 in combinations(node.children, 2): for tip1 in range(child1.__start, child1.__stop): if tip1 not in result_map: continue t1idx = result_map[tip1] for tip2 in range(child2.__start, child2.__stop): if tip2 not in result_map: continue t2idx = result_map[tip2] result[t1idx, t2idx] = distances[ tip1] + distances[tip2] for node in self.postorder(): if not node.children: continue # subtree with solved child wedges # can possibly use np.zeros starts, stops = [], [] # to calc ._start and ._stop for curr node for child in node.children: length = child.length if length is None: warnings.warn( "`TreeNode.tip_tip_distances`: Node with name %r does " "not have an associated length, so a length of 0.0 " "will be used." % child.name, RepresentationWarning) length = 0.0 distances[child.__start:child.__stop] += length starts.append(child.__start) stops.append(child.__stop) node.__start, node.__stop = min(starts), max(stops) if len(node.children) > 1: update_result() return DistanceMatrix(result + result.T, [n.name for n in tip_order]) @experimental(as_of="0.4.0") def compare_rfd(self, other, proportion=False): """Calculates the Robinson and Foulds symmetric difference Parameters ---------- other : TreeNode A tree to compare against proportion : bool Return a proportional difference Returns ------- float The distance between the trees Notes ----- Implementation based off of code by Julia Goodrich. The original description of the algorithm can be found in [1]_. Raises ------ ValueError If the tip names between `self` and `other` are equal. See Also -------- compare_subsets compare_tip_distances References ---------- .. [1] Comparison of phylogenetic trees. Robinson and Foulds. Mathematical Biosciences. 1981. 53:131-141 Examples -------- >>> from skbio import TreeNode >>> tree1 = TreeNode.read(["((a,b),(c,d));"]) >>> tree2 = TreeNode.read(["(((a,b),c),d);"]) >>> tree1.compare_rfd(tree2) 2.0 """ t1names = {n.name for n in self.tips()} t2names = {n.name for n in other.tips()} if t1names != t2names: if t1names < t2names: tree1 = self tree2 = other.shear(t1names) else: tree1 = self.shear(t2names) tree2 = other else: tree1 = self tree2 = other tree1_sets = tree1.subsets() tree2_sets = tree2.subsets() not_in_both = tree1_sets.symmetric_difference(tree2_sets) dist = float(len(not_in_both)) if proportion: total_subsets = len(tree1_sets) + len(tree2_sets) dist = dist / total_subsets return dist @experimental(as_of="0.4.0") def compare_subsets(self, other, exclude_absent_taxa=False): """Returns fraction of overlapping subsets where self and other differ. Names present in only one of the two trees will count as mismatches, if you don't want this behavior, strip out the non-matching tips first. Parameters ---------- other : TreeNode The tree to compare exclude_absent_taxa : bool Strip out names that don't occur in both trees Returns ------- float The fraction of overlapping subsets that differ between the trees See Also -------- compare_rfd compare_tip_distances subsets Examples -------- >>> from skbio import TreeNode >>> tree1 = TreeNode.read(["((a,b),(c,d));"]) >>> tree2 = TreeNode.read(["(((a,b),c),d);"]) >>> tree1.compare_subsets(tree2) 0.5 """ self_sets, other_sets = self.subsets(), other.subsets() if exclude_absent_taxa: in_both = self.subset() & other.subset() self_sets = (i & in_both for i in self_sets) self_sets = frozenset({i for i in self_sets if len(i) > 1}) other_sets = (i & in_both for i in other_sets) other_sets = frozenset({i for i in other_sets if len(i) > 1}) total_subsets = len(self_sets) + len(other_sets) intersection_length = len(self_sets & other_sets) if not total_subsets: # no common subsets after filtering, so max dist return 1 return 1 - (2 * intersection_length / float(total_subsets)) @experimental(as_of="0.4.0") def compare_tip_distances(self, other, sample=None, dist_f=distance_from_r, shuffle_f=np.random.shuffle): """Compares self to other using tip-to-tip distance matrices. Value returned is `dist_f(m1, m2)` for the two matrices. Default is to use the Pearson correlation coefficient, with +1 giving a distance of 0 and -1 giving a distance of +1 (the maximum possible value). Depending on the application, you might instead want to use distance_from_r_squared, which counts correlations of both +1 and -1 as identical (0 distance). Note: automatically strips out the names that don't match (this is necessary for this method because the distance between non-matching names and matching names is undefined in the tree where they don't match, and because we need to reorder the names in the two trees to match up the distance matrices). Parameters ---------- other : TreeNode The tree to compare sample : int or None Randomly subsample the tips in common between the trees to compare. This is useful when comparing very large trees. dist_f : function The distance function used to compare two the tip-tip distance matrices shuffle_f : function The shuffling function used if `sample` is not None Returns ------- float The distance between the trees Raises ------ ValueError A ValueError is raised if there does not exist common tips between the trees See Also -------- compare_subsets compare_rfd Examples -------- >>> from skbio import TreeNode >>> # note, only three common taxa between the trees >>> tree1 = TreeNode.read(["((a:1,b:1):2,(c:0.5,X:0.7):3);"]) >>> tree2 = TreeNode.read(["(((a:1,b:1,Y:1):2,c:3):1,Z:4);"]) >>> dist = tree1.compare_tip_distances(tree2) >>> print("%.9f" % dist) 0.000133446 """ self_names = {i.name: i for i in self.tips()} other_names = {i.name: i for i in other.tips()} common_names = frozenset(self_names) & frozenset(other_names) common_names = list(common_names) if not common_names: raise ValueError("No tip names in common between the two trees.") if len(common_names) <= 2: return 1 # the two trees must match by definition in this case if sample is not None: shuffle_f(common_names) common_names = common_names[:sample] self_nodes = [self_names[k] for k in common_names] other_nodes = [other_names[k] for k in common_names] self_matrix = self.tip_tip_distances(endpoints=self_nodes) other_matrix = other.tip_tip_distances(endpoints=other_nodes) return dist_f(self_matrix, other_matrix) @experimental(as_of="0.4.2") def bifurcate(self, insert_length=None): r"""Reorders the tree into a bifurcating tree. All nodes that have more than 2 children will have additional intermediate nodes inserted to ensure that every node has only 2 children. Parameters ---------- insert_length : int, optional The branch length assigned to all inserted nodes. See Also -------- prune Notes ----- Any nodes that have a single child can be collapsed using the prune method to create strictly bifurcating trees. Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b,g,h)c,(d,e)f)root;"]) >>> print(tree.ascii_art()) /-a | |--b /c-------| | |--g | | -root----| \-h | | /-d \f-------| \-e >>> tree.bifurcate() >>> print(tree.ascii_art()) /-h /c-------| | | /-g | \--------| | | /-a -root----| \--------| | \-b | | /-d \f-------| \-e """ for n in self.traverse(include_self=True): if len(n.children) > 2: stack = n.children while len(stack) > 2: ind = stack.pop() intermediate = self.__class__() intermediate.length = insert_length intermediate.extend(stack) n.append(intermediate) for k in stack: n.remove(k) n.extend([ind, intermediate]) @experimental(as_of="0.4.0") def index_tree(self): """Index a tree for rapid lookups within a tree array Indexes nodes in-place as `n._leaf_index`. Returns ------- dict A mapping {node_id: TreeNode} np.array of ints This arrays describes the IDs of every internal node, and the ID range of the immediate descendents. The first column in the array corresponds to node_id. The second column is the left most descendent's ID. The third column is the right most descendent's ID. """ self.assign_ids() id_index = {} child_index = [] for n in self.postorder(): for c in n.children: id_index[c.id] = c if c: # c has children itself, so need to add to result child_index.append((c.id, c.children[0].id, c.children[-1].id)) # handle root, which should be t itself id_index[self.id] = self # only want to add to the child_index if self has children... if self.children: child_index.append((self.id, self.children[0].id, self.children[-1].id)) child_index = np.asarray(child_index, dtype=np.int64) child_index = np.atleast_2d(child_index) return id_index, child_index @experimental(as_of="0.4.0") def assign_ids(self): """Assign topologically stable unique ids to self Following the call, all nodes in the tree will have their id attribute set """ curr_index = 0 for n in self.postorder(): for c in n.children: c.id = curr_index curr_index += 1 self.id = curr_index @experimental(as_of="0.4.0") def descending_branch_length(self, tip_subset=None): """Find total descending branch length from self or subset of self tips Parameters ---------- tip_subset : Iterable, or None If None, the total descending branch length for all tips in the tree will be returned. If a list of tips is provided then only the total descending branch length associated with those tips will be returned. Returns ------- float The total descending branch length for the specified set of tips. Raises ------ ValueError A ValueError is raised if the list of tips supplied to tip_subset contains internal nodes or non-tips. Notes ----- This function replicates cogent's totalDescendingBranch Length method and extends that method to allow the calculation of total descending branch length of a subset of the tips if requested. The postorder guarantees that the function will always be able to add the descending branch length if the node is not a tip. Nodes with no length will have their length set to 0. The root length (if it exists) is ignored. Examples -------- >>> from skbio import TreeNode >>> tr = TreeNode.read(["(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4," ... "(H:.4,I:.5)J:1.3)K;"]) >>> tdbl = tr.descending_branch_length() >>> sdbl = tr.descending_branch_length(['A','E']) >>> print(round(tdbl, 1), round(sdbl, 1)) 8.9 2.2 """ self.assign_ids() if tip_subset is not None: all_tips = self.subset() if not set(tip_subset).issubset(all_tips): raise ValueError('tip_subset contains ids that aren\'t tip ' 'names.') lca = self.lowest_common_ancestor(tip_subset) ancestors = {} for tip in tip_subset: curr = self.find(tip) while curr is not lca: ancestors[curr.id] = curr.length if curr.length is not \ None else 0.0 curr = curr.parent return sum(ancestors.values()) else: return sum(n.length for n in self.postorder(include_self=True) if n.length is not None) @experimental(as_of="0.4.0") def cache_attr(self, func, cache_attrname, cache_type=list): """Cache attributes on internal nodes of the tree Parameters ---------- func : function func will be provided the node currently being evaluated and must return a list of item (or items) to cache from that node or an empty list. cache_attrname : str Name of the attribute to decorate on containing the cached values cache_type : {set, frozenset, list} The type of the cache Notes ----- This method is particularly useful if you need to frequently look up attributes that would normally require a traversal of the tree. WARNING: any cache created by this method will be invalidated if the topology of the tree changes (e.g., if `TreeNode.invalidate_caches` is called). Raises ------ TypeError If an cache_type that is not a `set` or a `list` is specified. Examples -------- Cache the tip names of the tree on its internal nodes >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b,(c,d)e)f,(g,h)i)root;"]) >>> f = lambda n: [n.name] if n.is_tip() else [] >>> tree.cache_attr(f, 'tip_names') >>> for n in tree.traverse(include_self=True): ... print("Node name: %s, cache: %r" % (n.name, n.tip_names)) Node name: root, cache: ['a', 'b', 'c', 'd', 'g', 'h'] Node name: f, cache: ['a', 'b', 'c', 'd'] Node name: a, cache: ['a'] Node name: b, cache: ['b'] Node name: e, cache: ['c', 'd'] Node name: c, cache: ['c'] Node name: d, cache: ['d'] Node name: i, cache: ['g', 'h'] Node name: g, cache: ['g'] Node name: h, cache: ['h'] """ if cache_type in [set, frozenset]: def reduce_f(a, b): return a | b elif cache_type == list: def reduce_f(a, b): return a + b else: raise TypeError("Only list, set and frozenset are supported.") for node in self.postorder(include_self=True): node._registered_caches.add(cache_attrname) cached = [getattr(c, cache_attrname) for c in node.children] cached.append(cache_type(func(node))) setattr(node, cache_attrname, reduce(reduce_f, cached)) @experimental(as_of="0.4.0") def shuffle(self, k=None, names=None, shuffle_f=np.random.shuffle, n=1): """Yield trees with shuffled tip names Parameters ---------- k : int, optional The number of tips to shuffle. If k is not `None`, k tips are randomly selected, and only those names will be shuffled. names : list, optional The specific tip names to shuffle. k and names cannot be specified at the same time. shuffle_f : func Shuffle method, this function must accept a list and modify inplace. n : int, optional The number of iterations to perform. Value must be > 0 and `np.inf` can be specified for an infinite number of iterations. Notes ----- Tip names are shuffled inplace. If neither `k` nor `names` are provided, all tips are shuffled. Yields ------ TreeNode Tree with shuffled tip names. Raises ------ ValueError If `k` is < 2 If `n` is < 1 ValueError If both `k` and `names` are specified MissingNodeError If `names` is specified but one of the names cannot be found Examples -------- Alternate the names on two of the tips, 'a', and 'b', and do this 5 times. >>> from skbio import TreeNode >>> tree = TreeNode.read(["((a,b),(c,d));"]) >>> rev = lambda items: items.reverse() >>> shuffler = tree.shuffle(names=['a', 'b'], shuffle_f=rev, n=5) >>> for shuffled_tree in shuffler: ... print(shuffled_tree) ((b,a),(c,d)); <BLANKLINE> ((a,b),(c,d)); <BLANKLINE> ((b,a),(c,d)); <BLANKLINE> ((a,b),(c,d)); <BLANKLINE> ((b,a),(c,d)); <BLANKLINE> """ if k is not None and k < 2: raise ValueError("k must be None or >= 2") if k is not None and names is not None: raise ValueError("n and names cannot be specified at the sametime") if n < 1: raise ValueError("n must be > 0") self.assign_ids() if names is None: all_tips = list(self.tips()) if n is None: n = len(all_tips) shuffle_f(all_tips) names = [tip.name for tip in all_tips[:k]] nodes = [self.find(name) for name in names] # Since the names are being shuffled, the association between ID and # name is no longer reliable self.invalidate_caches() counter = 0 while counter < n: shuffle_f(names) for node, name in zip(nodes, names): node.name = name yield self counter += 1 @experimental(as_of="0.5.6") def _extract_support(self): """Extract the support value from a node label, if available. Returns ------- tuple of int, float or None The support value extracted from the node label str or None The node label with the support value stripped """ support, label = None, None if self.name: # separate support value from node name by the first colon left, _, right = self.name.partition(':') try: support = int(left) except ValueError: try: support = float(left) except ValueError: pass # strip support value from node name label = right or None if support is not None else self.name return support, label @experimental(as_of="0.5.6") def _node_label(self): """Generate a node label in the format of "support:name" if both exist, or "support" or "name" if either exists. Returns ------- str Generated node label """ lblst = [] if self.support is not None: # prevents support of NoneType lblst.append(str(self.support)) if self.name: # prevents name of NoneType lblst.append(self.name) return ':'.join(lblst) @experimental(as_of="0.5.6") def assign_supports(self): """Extract support values from internal node labels of a tree. Notes ----- A "support value" measures the confidence or frequency of the incoming branch (the branch from parent to self) of an internal node in a tree. Roots and tips do not have support values. To extract a support value from a node label, this method reads from left and stops at the first ":" (if any), and attempts to convert it to a number. For examples: "(a,b)1.0", "(a,b)1.0:2.5", and "(a,b)'1.0:species_A'". In these cases the support values are all 1.0. For examples: "(a,b):1.0" and "(a,b)species_A". In these cases there are no support values. If a support value is successfully extracted, it will be stripped from the node label and assigned to the `support` property. IMPORTANT: mathematically, "support value" is a property of a branch, not a node. Because of historical reasons, support values are usually attached to nodes in a typical tree file [1]. [1] Czech, Lucas, Jaime Huerta-Cepas, and Alexandros Stamatakis. "A Critical Review on the Use of Support Values in Tree Viewers and Bioinformatics Toolkits." Molecular biology and evolution 34.6 (2017): 1535-1542. Examples -------- >>> from skbio import TreeNode >>> newick = "((a,b)95,(c,d):1.1,(e,f)'80:speciesA':1.0);" >>> tree = TreeNode.read([newick]) >>> tree.assign_supports() >>> tree.lca(['a', 'b']).support 95 >>> tree.lca(['c', 'd']).support is None True >>> tree.lca(['e', 'f']).support 80 >>> tree.lca(['e', 'f']).name 'speciesA' """ for node in self.traverse(): if node.is_root() or node.is_tip(): node.support = None else: node.support, node.name = node._extract_support() @experimental(as_of="0.5.3") def unpack(self): """Unpack an internal node in place. Notes ----- This function sequentially: 1) elongates child nodes by branch length of self (omit if there is no branch length), 2) removes self from parent node, and 3) grafts child nodes to parent node. Raises ------ ValueError if input node is root or tip See also -------- unpack_by_func prune Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(['((c:2.0,d:3.0)a:1.0,(e:2.0,f:1.0)b:2.0);']) >>> tree.find('b').unpack() >>> print(tree) ((c:2.0,d:3.0)a:1.0,e:4.0,f:3.0); <BLANKLINE> """ if self.is_root(): raise TreeError('Cannot unpack root.') if self.is_tip(): raise TreeError('Cannot unpack tip.') parent = self.parent blen = (self.length or 0.0) for child in self.children: clen = (child.length or 0.0) child.length = (clen + blen or None) parent.remove(self) parent.extend(self.children) @experimental(as_of="0.5.3") def unpack_by_func(self, func): """Unpack internal nodes of a tree that meet certain criteria. Parameters ---------- func : function a function that accepts a TreeNode and returns `True` or `False`, where `True` indicates the node is to be unpacked See also -------- unpack prune Examples -------- >>> from skbio import TreeNode >>> tree = TreeNode.read(['((c:2,d:3)a:1,(e:1,f:2)b:2);']) >>> tree.unpack_by_func(lambda x: x.length <= 1) >>> print(tree) ((e:1.0,f:2.0)b:2.0,c:3.0,d:4.0); <BLANKLINE> >>> tree = TreeNode.read(['(((a,b)85,(c,d)78)75,(e,(f,g)64)80);']) >>> tree.assign_supports() >>> tree.unpack_by_func(lambda x: x.support < 75) >>> print(tree) (((a,b)85,(c,d)78)75,(e,f,g)80); <BLANKLINE> """ nodes_to_unpack = [] for node in self.non_tips(include_self=False): if func(node): nodes_to_unpack.append(node) for node in nodes_to_unpack: node.unpack()
codeparrot/github-code-clean
# -*- coding: utf-8 -*- import datetime import hashlib import itertools import json import os import re import time import urlparse import uuid from django.conf import settings from django.core.cache import cache from django.core.exceptions import ObjectDoesNotExist from django.core.files.storage import default_storage as storage from django.core.urlresolvers import reverse from django.db import models, transaction from django.db.models import signals as dbsignals, Max, Q from django.dispatch import receiver from django.utils.translation import trans_real as translation import caching.base as caching import commonware.log import json_field from cache_nuggets.lib import memoize, memoize_key from jinja2.filters import do_dictsort from tower import ugettext as _ from tower import ugettext_lazy as _lazy import amo import mkt from amo.utils import (JSONEncoder, slugify, smart_path, sorted_groupby, urlparams) from lib.crypto import packaged from lib.iarc.client import get_iarc_client from lib.iarc.utils import get_iarc_app_title, render_xml from lib.utils import static_url from mkt.access import acl from mkt.constants import APP_FEATURES, apps, iarc_mappings from mkt.constants.applications import DEVICE_TYPES from mkt.constants.payments import PROVIDER_CHOICES from mkt.files.models import File, nfd_str from mkt.files.utils import parse_addon, WebAppParser from mkt.prices.models import AddonPremium, Price from mkt.ratings.models import Review from mkt.regions.utils import parse_region from mkt.site.decorators import skip_cache, use_master, write from mkt.site.helpers import absolutify from mkt.site.mail import send_mail from mkt.site.models import (DynamicBoolFieldsMixin, ManagerBase, ModelBase, OnChangeMixin) from mkt.site.storage_utils import copy_stored_file from mkt.tags.models import Tag from mkt.translations.fields import (PurifiedField, save_signal, TranslatedField, Translation) from mkt.translations.models import attach_trans_dict from mkt.translations.utils import find_language, to_language from mkt.users.models import UserForeignKey, UserProfile from mkt.versions.models import Version from mkt.webapps import query, signals from mkt.webapps.indexers import WebappIndexer from mkt.webapps.utils import (dehydrate_content_rating, get_locale_properties, get_supported_locales) log = commonware.log.getLogger('z.addons') def clean_slug(instance, slug_field='app_slug'): """Cleans a model instance slug. This strives to be as generic as possible as it's used by Webapps and maybe less in the future. :-D """ slug = getattr(instance, slug_field, None) or instance.name if not slug: # Initialize the slug with what we have available: a name translation, # or the id of the instance, or in last resort the model name. translations = Translation.objects.filter(id=instance.name_id) if translations.exists(): slug = translations[0] elif instance.id: slug = str(instance.id) else: slug = instance.__class__.__name__ max_length = instance._meta.get_field_by_name(slug_field)[0].max_length slug = slugify(slug)[:max_length] if BlacklistedSlug.blocked(slug): slug = slug[:max_length - 1] + '~' # The following trick makes sure we are using a manager that returns # all the objects, as otherwise we could have a slug clash on our hands. # Eg with the "Addon.objects" manager, which doesn't list deleted addons, # we could have a "clean" slug which is in fact already assigned to an # already existing (deleted) addon. # Also, make sure we use the base class (eg Webapp, which inherits from # Addon, shouldn't clash with addons). This is extra paranoid, as webapps # have a different slug field, but just in case we need this in the future. manager = models.Manager() manager.model = instance._meta.proxy_for_model or instance.__class__ qs = manager.values_list(slug_field, flat=True) # Get list of all slugs. if instance.id: qs = qs.exclude(pk=instance.id) # Can't clash with itself. # We first need to make sure there's a clash, before trying to find a # suffix that is available. Eg, if there's a "foo-bar" slug, "foo" is still # available. clash = qs.filter(**{slug_field: slug}) if clash.exists(): # Leave space for "-" and 99 clashes. slug = slugify(slug)[:max_length - 3] # There is a clash, so find a suffix that will make this slug unique. prefix = '%s-' % slug lookup = {'%s__startswith' % slug_field: prefix} clashes = qs.filter(**lookup) # Try numbers between 1 and the number of clashes + 1 (+ 1 because we # start the range at 1, not 0): # if we have two clashes "foo-1" and "foo-2", we need to try "foo-x" # for x between 1 and 3 to be absolutely sure to find an available one. for idx in range(1, len(clashes) + 2): new = ('%s%s' % (prefix, idx))[:max_length] if new not in clashes: slug = new break else: # This could happen. The current implementation (using # ``[:max_length -3]``) only works for the first 100 clashes in the # worst case (if the slug is equal to or longuer than # ``max_length - 3`` chars). # After that, {verylongslug}-100 will be trimmed down to # {verylongslug}-10, which is already assigned, but it's the last # solution tested. raise RuntimeError setattr(instance, slug_field, slug) return instance class AddonDeviceType(ModelBase): addon = models.ForeignKey('Webapp', db_constraint=False) device_type = models.PositiveIntegerField( default=amo.DEVICE_DESKTOP, choices=do_dictsort(amo.DEVICE_TYPES), db_index=True) class Meta: db_table = 'addons_devicetypes' unique_together = ('addon', 'device_type') def __unicode__(self): return u'%s: %s' % (self.addon.name, self.device.name) @property def device(self): return amo.DEVICE_TYPES[self.device_type] @receiver(signals.version_changed, dispatch_uid='version_changed') def version_changed(sender, **kw): from . import tasks tasks.version_changed.delay(sender.id) def attach_devices(addons): addon_dict = dict((a.id, a) for a in addons) devices = (AddonDeviceType.objects.filter(addon__in=addon_dict) .values_list('addon', 'device_type')) for addon, device_types in sorted_groupby(devices, lambda x: x[0]): addon_dict[addon].device_ids = [d[1] for d in device_types] def attach_prices(addons): addon_dict = dict((a.id, a) for a in addons) prices = (AddonPremium.objects .filter(addon__in=addon_dict, addon__premium_type__in=amo.ADDON_PREMIUMS) .values_list('addon', 'price__price')) for addon, price in prices: addon_dict[addon].price = price def attach_translations(addons): """Put all translations into a translations dict.""" attach_trans_dict(Webapp, addons) def attach_tags(addons): addon_dict = dict((a.id, a) for a in addons) qs = (Tag.objects.not_blacklisted().filter(addons__in=addon_dict) .values_list('addons__id', 'tag_text')) for addon, tags in sorted_groupby(qs, lambda x: x[0]): addon_dict[addon].tag_list = [t[1] for t in tags] class AddonUser(caching.CachingMixin, models.Model): addon = models.ForeignKey('Webapp') user = UserForeignKey() role = models.SmallIntegerField(default=amo.AUTHOR_ROLE_OWNER, choices=amo.AUTHOR_CHOICES) listed = models.BooleanField(_lazy(u'Listed'), default=True) position = models.IntegerField(default=0) objects = caching.CachingManager() def __init__(self, *args, **kwargs): super(AddonUser, self).__init__(*args, **kwargs) self._original_role = self.role self._original_user_id = self.user_id class Meta: db_table = 'addons_users' unique_together = (('addon', 'user'), ) class Preview(ModelBase): addon = models.ForeignKey('Webapp', related_name='previews') filetype = models.CharField(max_length=25) thumbtype = models.CharField(max_length=25) caption = TranslatedField() position = models.IntegerField(default=0) sizes = json_field.JSONField(max_length=25, default={}) class Meta: db_table = 'previews' ordering = ('position', 'created') def _image_url(self, url_template): if self.modified is not None: if isinstance(self.modified, unicode): self.modified = datetime.datetime.strptime(self.modified, '%Y-%m-%dT%H:%M:%S') modified = int(time.mktime(self.modified.timetuple())) else: modified = 0 args = [self.id / 1000, self.id, modified] if '.png' not in url_template: args.insert(2, self.file_extension) return url_template % tuple(args) def _image_path(self, url_template): args = [self.id / 1000, self.id] if '.png' not in url_template: args.append(self.file_extension) return url_template % tuple(args) def as_dict(self, src=None): d = {'full': urlparams(self.image_url, src=src), 'thumbnail': urlparams(self.thumbnail_url, src=src), 'caption': unicode(self.caption)} return d @property def is_landscape(self): size = self.image_size if not size: return False return size[0] > size[1] @property def file_extension(self): # Assume that blank is an image. if not self.filetype: return 'png' return self.filetype.split('/')[1] @property def thumbnail_url(self): return self._image_url(static_url('PREVIEW_THUMBNAIL_URL')) @property def image_url(self): return self._image_url(static_url('PREVIEW_FULL_URL')) @property def thumbnail_path(self): return self._image_path(settings.PREVIEW_THUMBNAIL_PATH) @property def image_path(self): return self._image_path(settings.PREVIEW_FULL_PATH) @property def thumbnail_size(self): return self.sizes.get('thumbnail', []) if self.sizes else [] @property def image_size(self): return self.sizes.get('image', []) if self.sizes else [] dbsignals.pre_save.connect(save_signal, sender=Preview, dispatch_uid='preview_translations') class BlacklistedSlug(ModelBase): name = models.CharField(max_length=255, unique=True, default='') class Meta: db_table = 'addons_blacklistedslug' def __unicode__(self): return self.name @classmethod def blocked(cls, slug): return slug.isdigit() or cls.objects.filter(name=slug).exists() def reverse_version(version): """ The try/except AttributeError allows this to be used where the input is ambiguous, and could be either an already-reversed URL or a Version object. """ if version: try: return reverse('version-detail', kwargs={'pk': version.pk}) except AttributeError: return version return class WebappManager(ManagerBase): def __init__(self, include_deleted=False): ManagerBase.__init__(self) self.include_deleted = include_deleted def get_query_set(self): qs = super(WebappManager, self).get_query_set() qs = qs._clone(klass=query.IndexQuerySet) if not self.include_deleted: qs = qs.exclude(status=amo.STATUS_DELETED) return qs.transform(Webapp.transformer) def valid(self): return self.filter(status__in=amo.LISTED_STATUSES, disabled_by_user=False) def visible(self): return self.filter(status__in=amo.LISTED_STATUSES, disabled_by_user=False) @skip_cache def pending_in_region(self, region): """ Apps that have been approved by reviewers but unapproved by reviewers in special regions (e.g., China). """ region = parse_region(region) column_prefix = '_geodata__region_%s' % region.slug return self.filter(**{ # Only nominated apps should show up. '%s_nominated__isnull' % column_prefix: False, 'status__in': amo.WEBAPPS_APPROVED_STATUSES, 'disabled_by_user': False, 'escalationqueue__isnull': True, '%s_status' % column_prefix: amo.STATUS_PENDING, }).order_by('-%s_nominated' % column_prefix) def rated(self): """IARC.""" return self.exclude(content_ratings__isnull=True) def by_identifier(self, identifier): """ Look up a single app by its `id` or `app_slug`. If the identifier is coercable into an integer, we first check for an ID match, falling back to a slug check (probably not necessary, as there is validation preventing numeric slugs). Otherwise, we only look for a slug match. """ try: return self.get(id=identifier) except (ObjectDoesNotExist, ValueError): return self.get(app_slug=identifier) class UUIDModelMixin(object): """ A mixin responsible for assigning a uniquely generated UUID at save time. """ def save(self, *args, **kwargs): self.assign_uuid() return super(UUIDModelMixin, self).save(*args, **kwargs) def assign_uuid(self): """Generates a UUID if self.guid is not already set.""" if not hasattr(self, 'guid'): raise AttributeError( 'A UUIDModel must contain a charfield called guid') if not self.guid: max_tries = 10 tried = 1 guid = str(uuid.uuid4()) while tried <= max_tries: if not type(self).objects.filter(guid=guid).exists(): self.guid = guid break else: guid = str(uuid.uuid4()) tried += 1 else: raise ValueError('Could not auto-generate a unique UUID') class Webapp(UUIDModelMixin, OnChangeMixin, ModelBase): STATUS_CHOICES = amo.STATUS_CHOICES.items() guid = models.CharField(max_length=255, unique=True, null=True) slug = models.CharField(max_length=30, unique=True, null=True) # This column is only used for webapps, so they can have a slug namespace # separate from addons and personas. app_slug = models.CharField(max_length=30, unique=True, null=True, blank=True) name = TranslatedField(default=None) default_locale = models.CharField(max_length=10, default=settings.LANGUAGE_CODE, db_column='defaultlocale') status = models.PositiveIntegerField( choices=STATUS_CHOICES, db_index=True, default=0) highest_status = models.PositiveIntegerField( choices=STATUS_CHOICES, default=0, help_text='An upper limit for what an author can change.', db_column='higheststatus') icon_type = models.CharField(max_length=25, blank=True, db_column='icontype') icon_hash = models.CharField(max_length=8, blank=True, null=True) homepage = TranslatedField() support_email = TranslatedField(db_column='supportemail') support_url = TranslatedField(db_column='supporturl') description = PurifiedField(short=False) privacy_policy = PurifiedField(db_column='privacypolicy') average_rating = models.FloatField(max_length=255, default=0, null=True, db_column='averagerating') bayesian_rating = models.FloatField(default=0, db_index=True, db_column='bayesianrating') total_reviews = models.PositiveIntegerField(default=0, db_column='totalreviews') weekly_downloads = models.PositiveIntegerField( default=0, db_column='weeklydownloads', db_index=True) total_downloads = models.PositiveIntegerField( default=0, db_column='totaldownloads') last_updated = models.DateTimeField( db_index=True, null=True, help_text='Last time this add-on had a file/version update') disabled_by_user = models.BooleanField(default=False, db_index=True, db_column='inactive') public_stats = models.BooleanField(default=False, db_column='publicstats') authors = models.ManyToManyField('users.UserProfile', through='AddonUser', related_name='addons') categories = json_field.JSONField(default=None) premium_type = models.PositiveIntegerField( choices=amo.ADDON_PREMIUM_TYPES.items(), default=amo.ADDON_FREE) manifest_url = models.URLField(max_length=255, blank=True, null=True) app_domain = models.CharField(max_length=255, blank=True, null=True, db_index=True) _current_version = models.ForeignKey(Version, db_column='current_version', related_name='+', null=True, on_delete=models.SET_NULL) _latest_version = models.ForeignKey(Version, db_column='latest_version', on_delete=models.SET_NULL, null=True, related_name='+') publish_type = models.PositiveIntegerField(default=0) mozilla_contact = models.EmailField(blank=True) vip_app = models.BooleanField(default=False) priority_review = models.BooleanField(default=False) # Whether the app is packaged or not (aka hosted). is_packaged = models.BooleanField(default=False, db_index=True) enable_new_regions = models.BooleanField(default=True, db_index=True) # Annotates disabled apps from the Great IARC purge for auto-reapprove. # Note: for currently PUBLIC apps only. iarc_purged = models.BooleanField(default=False) # This is the public_id to a Generic Solitude Product solitude_public_id = models.CharField(max_length=255, null=True, blank=True) objects = WebappManager() with_deleted = WebappManager(include_deleted=True) class PayAccountDoesNotExist(Exception): """The app has no payment account for the query.""" class Meta: db_table = 'addons' def __unicode__(self): return u'%s: %s' % (self.id, self.name) def save(self, **kw): self.clean_slug(slug_field='app_slug') creating = not self.id super(Webapp, self).save(**kw) if creating: # Set the slug once we have an id to keep things in order. # This breaks test_change_called_on_new_instance_save self.update(slug='app-%s' % self.id) # Create Geodata object (a 1-to-1 relationship). if not hasattr(self, '_geodata'): Geodata.objects.create(addon=self) @transaction.commit_on_success def delete(self, msg='', reason=''): # To avoid a circular import. from . import tasks if self.status == amo.STATUS_DELETED: return # We're already done. id = self.id # Tell IARC this app is delisted from the set_iarc_storefront_data. tasks.set_storefront_data.delay(self.pk, disable=True) # Fetch previews before deleting the addon instance, so that we can # pass the list of files to delete to the delete_preview_files task # after the addon is deleted. previews = list(Preview.objects.filter(addon__id=id) .values_list('id', flat=True)) log.debug('Deleting app: %s' % self.id) to = [settings.FLIGTAR] user = amo.get_user() context = { 'atype': 'App', 'authors': [u.email for u in self.authors.all()], 'guid': self.guid, 'id': self.id, 'msg': msg, 'reason': reason, 'name': self.name, 'slug': self.app_slug, 'total_downloads': self.total_downloads, 'url': absolutify(self.get_url_path()), 'user_str': ("%s, %s (%s)" % (user.display_name or user.username, user.email, user.id) if user else "Unknown"), } email_msg = u""" The following %(atype)s was deleted. %(atype)s: %(name)s URL: %(url)s DELETED BY: %(user_str)s ID: %(id)s GUID: %(guid)s AUTHORS: %(authors)s TOTAL DOWNLOADS: %(total_downloads)s NOTES: %(msg)s REASON GIVEN BY USER FOR DELETION: %(reason)s """ % context log.debug('Sending delete email for %(atype)s %(id)s' % context) subject = 'Deleting %(atype)s %(slug)s (%(id)d)' % context # Update or NULL out various fields. models.signals.pre_delete.send(sender=Webapp, instance=self) self.update(status=amo.STATUS_DELETED, slug=None, app_slug=None, app_domain=None, _current_version=None) models.signals.post_delete.send(sender=Webapp, instance=self) send_mail(subject, email_msg, recipient_list=to) for preview in previews: tasks.delete_preview_files.delay(preview) return True @use_master def clean_slug(self, slug_field='app_slug'): if self.status == amo.STATUS_DELETED: return clean_slug(self, slug_field) @staticmethod def attach_related_versions(addons, addon_dict=None): if addon_dict is None: addon_dict = dict((a.id, a) for a in addons) current_ids = filter(None, (a._current_version_id for a in addons)) latest_ids = filter(None, (a._latest_version_id for a in addons)) all_ids = set(current_ids) | set(latest_ids) versions = list(Version.objects.filter(id__in=all_ids).order_by()) for version in versions: try: addon = addon_dict[version.addon_id] except KeyError: log.debug('Version %s has an invalid add-on id.' % version.id) continue if addon._current_version_id == version.id: addon._current_version = version if addon._latest_version_id == version.id: addon._latest_version = version version.addon = addon @classmethod def get_indexer(cls): return WebappIndexer @classmethod def from_upload(cls, upload, is_packaged=False): data = parse_addon(upload) fields = cls._meta.get_all_field_names() addon = Webapp(**dict((k, v) for k, v in data.items() if k in fields)) addon.status = amo.STATUS_NULL locale_is_set = (addon.default_locale and addon.default_locale in ( settings.AMO_LANGUAGES + settings.HIDDEN_LANGUAGES) and data.get('default_locale') == addon.default_locale) if not locale_is_set: addon.default_locale = to_language(translation.get_language()) addon.is_packaged = is_packaged if is_packaged: addon.app_domain = data.get('origin') else: addon.manifest_url = upload.name addon.app_domain = addon.domain_from_url(addon.manifest_url) addon.save() Version.from_upload(upload, addon) amo.log(amo.LOG.CREATE_ADDON, addon) log.debug('New addon %r from %r' % (addon, upload)) return addon @staticmethod def attach_previews(addons, addon_dict=None, no_transforms=False): if addon_dict is None: addon_dict = dict((a.id, a) for a in addons) qs = Preview.objects.filter(addon__in=addons, position__gte=0).order_by() if no_transforms: qs = qs.no_transforms() qs = sorted(qs, key=lambda x: (x.addon_id, x.position, x.created)) for addon, previews in itertools.groupby(qs, lambda x: x.addon_id): addon_dict[addon].all_previews = list(previews) # FIXME: set all_previews to empty list on addons without previews. @staticmethod def attach_prices(addons, addon_dict=None): # FIXME: merge with attach_prices transformer below. if addon_dict is None: addon_dict = dict((a.id, a) for a in addons) # There's a constrained amount of price tiers, may as well load # them all and let cache machine keep them cached. prices = dict((p.id, p) for p in Price.objects.all()) # Attach premium addons. qs = AddonPremium.objects.filter(addon__in=addons) premium_dict = dict((ap.addon_id, ap) for ap in qs) # Attach premiums to addons, making sure to attach None to free addons # or addons where the corresponding AddonPremium is missing. for addon in addons: if addon.is_premium(): addon_p = premium_dict.get(addon.id) if addon_p: price = prices.get(addon_p.price_id) if price: addon_p.price = price addon_p.addon = addon addon._premium = addon_p else: addon._premium = None def is_public(self): """ True if the app is not disabled and the status is either STATUS_PUBLIC or STATUS_UNLISTED. Both statuses are "public" in that they should result in a 200 to the app detail page. """ return (not self.disabled_by_user and self.status in (amo.STATUS_PUBLIC, amo.STATUS_UNLISTED)) def is_approved(self): """ True if the app has status equal to amo.STATUS_APPROVED. This app has been approved by a reviewer but is currently private and only visitble to the app authors. """ return not self.disabled_by_user and self.status == amo.STATUS_APPROVED def is_published(self): """ True if the app status is amo.STATUS_PUBLIC. This means we can display the app in listing pages and index it in our search backend. """ return not self.disabled_by_user and self.status == amo.STATUS_PUBLIC def is_incomplete(self): return self.status == amo.STATUS_NULL def is_pending(self): return self.status == amo.STATUS_PENDING def is_rejected(self): return self.status == amo.STATUS_REJECTED @property def is_deleted(self): return self.status == amo.STATUS_DELETED @property def is_disabled(self): """True if this Addon is disabled. It could be disabled by an admin or disabled by the developer """ return self.status == amo.STATUS_DISABLED or self.disabled_by_user def can_become_premium(self): if self.upsell or self.is_premium(): return False return True def is_premium(self): """ If the addon is premium. Will include addons that are premium and have a price of zero. Primarily of use in the devhub to determine if an app is intending to be premium. """ return self.premium_type in amo.ADDON_PREMIUMS def is_free(self): """ This is the opposite of is_premium. Will not include apps that have a price of zero. Primarily of use in the devhub to determine if an app is intending to be free. """ return not (self.is_premium() and self.premium and self.premium.price) def is_free_inapp(self): return self.premium_type == amo.ADDON_FREE_INAPP def needs_payment(self): return (self.premium_type not in (amo.ADDON_FREE, amo.ADDON_OTHER_INAPP)) def can_be_deleted(self): return not self.is_deleted @classmethod def _last_updated_queries(cls): """ Get the queries used to calculate addon.last_updated. """ return (Webapp.objects.no_cache() .filter(status=amo.STATUS_PUBLIC, versions__files__status=amo.STATUS_PUBLIC) .values('id') .annotate(last_updated=Max('versions__created'))) @amo.cached_property(writable=True) def all_previews(self): return list(self.get_previews()) def get_previews(self): """Exclude promo graphics.""" return self.previews.exclude(position=-1) def remove_locale(self, locale): """NULLify strings in this locale for the add-on and versions.""" for o in itertools.chain([self], self.versions.all()): Translation.objects.remove_for(o, locale) def get_mozilla_contacts(self): return [x.strip() for x in self.mozilla_contact.split(',')] @amo.cached_property def upsell(self): """Return the upsell or add-on, or None if there isn't one.""" try: # We set unique_together on the model, so there will only be one. return self._upsell_from.all()[0] except IndexError: pass def has_author(self, user, roles=None): """True if ``user`` is an author with any of the specified ``roles``. ``roles`` should be a list of valid roles (see amo.AUTHOR_ROLE_*). If not specified, has_author will return true if the user has any role. """ if user is None or user.is_anonymous(): return False if roles is None: roles = dict(amo.AUTHOR_CHOICES).keys() return AddonUser.objects.filter(addon=self, user=user, role__in=roles).exists() @property def thumbnail_url(self): """ Returns the addon's thumbnail url or a default. """ try: preview = self.all_previews[0] return preview.thumbnail_url except IndexError: return settings.MEDIA_URL + '/img/icons/no-preview.png' def get_purchase_type(self, user): if user and isinstance(user, UserProfile): try: return self.addonpurchase_set.get(user=user).type except models.ObjectDoesNotExist: pass def has_purchased(self, user): return self.get_purchase_type(user) == amo.CONTRIB_PURCHASE def is_refunded(self, user): return self.get_purchase_type(user) == amo.CONTRIB_REFUND def is_chargeback(self, user): return self.get_purchase_type(user) == amo.CONTRIB_CHARGEBACK def can_review(self, user): if user and self.has_author(user): return False else: return (not self.is_premium() or self.has_purchased(user) or self.is_refunded(user)) def get_latest_file(self): """Get the latest file from the current version.""" cur = self.current_version if cur: res = cur.files.order_by('-created') if res: return res[0] @property def uses_flash(self): """ Convenience property until more sophisticated per-version checking is done for packaged apps. """ f = self.get_latest_file() if not f: return False return f.uses_flash def in_escalation_queue(self): return self.escalationqueue_set.exists() def update_names(self, new_names): """ Adds, edits, or removes names to match the passed in new_names dict. Will not remove the translation of the default_locale. `new_names` is a dictionary mapping of locales to names. Returns a message that can be used in logs showing what names were added or updated. Note: This method doesn't save the changes made to the addon object. Don't forget to call save() in your calling method. """ updated_locales = {} locales = dict(Translation.objects.filter(id=self.name_id) .values_list('locale', 'localized_string')) msg_c = [] # For names that were created. msg_d = [] # For deletes. msg_u = [] # For updates. # Normalize locales. names = {} for locale, name in new_names.iteritems(): loc = find_language(locale) if loc and loc not in names: names[loc] = name # Null out names no longer in `names` but exist in the database. for locale in set(locales) - set(names): names[locale] = None for locale, name in names.iteritems(): if locale in locales: if not name and locale.lower() == self.default_locale.lower(): pass # We never want to delete the default locale. elif not name: # A deletion. updated_locales[locale] = None msg_d.append(u'"%s" (%s).' % (locales.get(locale), locale)) elif name != locales[locale]: updated_locales[locale] = name msg_u.append(u'"%s" -> "%s" (%s).' % ( locales[locale], name, locale)) else: updated_locales[locale] = names.get(locale) msg_c.append(u'"%s" (%s).' % (name, locale)) if locales != updated_locales: self.name = updated_locales return { 'added': ' '.join(msg_c), 'deleted': ' '.join(msg_d), 'updated': ' '.join(msg_u), } def update_default_locale(self, locale): """ Updates default_locale if it's different and matches one of our supported locales. Returns tuple of (old_locale, new_locale) if updated. Otherwise None. """ old_locale = self.default_locale locale = find_language(locale) if locale and locale != old_locale: self.update(default_locale=locale) return old_locale, locale return None @property def premium(self): """ Returns the premium object which will be gotten by the transformer, if its not there, try and get it. Will return None if there's nothing there. """ if not hasattr(self, '_premium'): try: self._premium = self.addonpremium except AddonPremium.DoesNotExist: self._premium = None return self._premium def has_installed(self, user): if not user or not isinstance(user, UserProfile): return False return self.installed.filter(user=user).exists() @amo.cached_property def upsold(self): """ Return what this is going to upsold from, or None if there isn't one. """ try: return self._upsell_to.all()[0] except IndexError: pass @property def icon_url(self): return self.get_icon_url(32) @classmethod def get_fallback(cls): return cls._meta.get_field('default_locale') @amo.cached_property(writable=True) def listed_authors(self): return UserProfile.objects.filter( addons=self, addonuser__listed=True).order_by('addonuser__position') @property def reviews(self): return Review.objects.filter(addon=self, reply_to=None) def get_icon_dir(self): return os.path.join(settings.ADDON_ICONS_PATH, str(self.id / 1000)) def get_icon_url(self, size): """ Returns either the icon URL or a default icon. """ icon_type_split = [] if self.icon_type: icon_type_split = self.icon_type.split('/') # Get the closest allowed size without going over. if (size not in amo.APP_ICON_SIZES and size >= amo.APP_ICON_SIZES[0]): size = [s for s in amo.APP_ICON_SIZES if s < size][-1] elif size < amo.APP_ICON_SIZES[0]: size = amo.APP_ICON_SIZES[0] # Figure out what to return for an image URL. if not self.icon_type: return '%s/%s-%s.png' % (static_url('ADDON_ICONS_DEFAULT_URL'), 'default', size) elif icon_type_split[0] == 'icon': return '%s/%s-%s.png' % (static_url('ADDON_ICONS_DEFAULT_URL'), icon_type_split[1], size) else: # [1] is the whole ID, [2] is the directory. split_id = re.match(r'((\d*?)\d{1,3})$', str(self.id)) # If we don't have the icon_hash set to a dummy string ("never"), # when the icon is eventually changed, icon_hash will be updated. suffix = getattr(self, 'icon_hash', None) or 'never' return static_url('ADDON_ICON_URL') % ( split_id.group(2) or 0, self.id, size, suffix) @staticmethod def transformer(apps): if not apps: return apps_dict = dict((a.id, a) for a in apps) # Set _latest_version, _current_version Webapp.attach_related_versions(apps, apps_dict) # Attach previews. Don't use transforms, the only one present is for # translations and Previews don't have captions in the Marketplace, and # therefore don't have translations. Webapp.attach_previews(apps, apps_dict, no_transforms=True) # Attach prices. Webapp.attach_prices(apps, apps_dict) # FIXME: re-use attach_devices instead ? for adt in AddonDeviceType.objects.filter(addon__in=apps_dict): if not getattr(apps_dict[adt.addon_id], '_device_types', None): apps_dict[adt.addon_id]._device_types = [] apps_dict[adt.addon_id]._device_types.append( DEVICE_TYPES[adt.device_type]) # FIXME: attach geodata and content ratings. Maybe in a different # transformer that would then be called automatically for the API ? @staticmethod def version_and_file_transformer(apps): """Attach all the versions and files to the apps.""" # Don't just return an empty list, it will break code that expects # a query object if not len(apps): return apps ids = set(app.id for app in apps) versions = (Version.objects.no_cache().filter(addon__in=ids) .select_related('addon')) vids = [v.id for v in versions] files = (File.objects.no_cache().filter(version__in=vids) .select_related('version')) # Attach the files to the versions. f_dict = dict((k, list(vs)) for k, vs in amo.utils.sorted_groupby(files, 'version_id')) for version in versions: version.all_files = f_dict.get(version.id, []) # Attach the versions to the apps. v_dict = dict((k, list(vs)) for k, vs in amo.utils.sorted_groupby(versions, 'addon_id')) for app in apps: app.all_versions = v_dict.get(app.id, []) return apps def get_public_version(self): """Retrieves the latest PUBLIC version of an addon.""" if self.status not in amo.WEBAPPS_APPROVED_STATUSES: # Apps that aren't in an approved status have no current version. return None try: return (self.versions.no_cache() .filter(files__status=amo.STATUS_PUBLIC) .extra(where=[ """ NOT EXISTS ( SELECT 1 FROM versions as v2 INNER JOIN files AS f2 ON (f2.version_id = v2.id) WHERE v2.id = versions.id AND f2.status != %s)""" % amo.STATUS_PUBLIC])[0]) except (IndexError, Version.DoesNotExist): return None @write def update_version(self, ignore=None, _signal=True): """ Returns true if we updated the field. The optional ``ignore`` parameter, if present, is a version to not consider as part of the update, since it may be in the process of being deleted. Pass ``_signal=False`` if you want to no signals fired at all. """ current = self.get_public_version() try: latest_qs = self.versions.all() if ignore is not None: latest_qs = latest_qs.exclude(pk=ignore.pk) latest = latest_qs.latest() except Version.DoesNotExist: latest = None latest_id = latest and latest.id diff = [self._current_version, current] # Sometimes the DB is in an inconsistent state when this # signal is dispatched. try: if self._latest_version: # Make sure stringifying this does not trigger # Version.DoesNotExist before trying to use it for # logging. unicode(self._latest_version) diff += [self._latest_version, latest] except Version.DoesNotExist: diff += [self._latest_version_id, latest_id] updated = {} send_signal = False if self._current_version != current: updated.update({'_current_version': current}) send_signal = True # Don't use self.latest_version here. It may throw Version.DoesNotExist # if we're called from a post_delete signal. We also don't set # send_signal since we only want this fired if the public version # changes. if self._latest_version_id != latest_id: updated.update({'_latest_version': latest}) # update_version can be called by a post_delete signal (such # as File's) when deleting a version. If so, we should avoid putting # that version-being-deleted in any fields. if ignore is not None: updated = dict([(k, v) for (k, v) in updated.iteritems() if v != ignore]) if updated: # Pass along _signal to the .update() to prevent it from firing # signals if we don't want them. updated['_signal'] = _signal try: self.update(**updated) if send_signal and _signal: signals.version_changed.send(sender=self) log.info(u'Version changed from current: %s to %s, ' u'latest: %s to %s for addon %s' % tuple(diff + [self])) except Exception, e: log.error(u'Could not save version changes ' u'current: %s to %s, latest: %s to %s ' u'for addon %s (%s)' % tuple(diff + [self, e])) return bool(updated) @property def current_version(self): """Returns the current_version or None if the app is deleted or not created yet""" if not self.id or self.status == amo.STATUS_DELETED: return None try: return self._current_version except ObjectDoesNotExist: pass return None @property def latest_version(self): """Returns the latest_version or None if the app is deleted or not created yet""" if not self.id or self.status == amo.STATUS_DELETED: return None try: return self._latest_version except ObjectDoesNotExist: pass return None @property def geodata(self): if hasattr(self, '_geodata'): return self._geodata return Geodata.objects.get_or_create(addon=self)[0] def get_api_url(self, action=None, api=None, resource=None, pk=False): """Reverse a URL for the API.""" if pk: key = self.pk else: key = self.app_slug return reverse('app-detail', kwargs={'pk': key}) def get_url_path(self, src=None): url_ = reverse('detail', args=[self.app_slug]) if src is not None: return urlparams(url_, src=src) return url_ def get_detail_url(self, action=None): """Reverse URLs for 'detail', 'details.record', etc.""" return reverse(('detail.%s' % action) if action else 'detail', args=[self.app_slug]) def get_purchase_url(self, action=None, args=None): """Reverse URLs for 'purchase', 'purchase.done', etc.""" return reverse(('purchase.%s' % action) if action else 'purchase', args=[self.app_slug] + (args or [])) def get_dev_url(self, action='edit', args=None, prefix_only=False): # Either link to the "new" Marketplace Developer Hub or the old one. args = args or [] prefix = 'mkt.developers' view_name = ('%s.%s' if prefix_only else '%s.apps.%s') return reverse(view_name % (prefix, action), args=[self.app_slug] + args) def get_ratings_url(self, action='list', args=None): """Reverse URLs for 'ratings.list', 'ratings.add', etc.""" return reverse(('ratings.%s' % action), args=[self.app_slug] + (args or [])) def get_stats_url(self): return reverse('commonplace.stats.app_dashboard', args=[self.app_slug]) def get_comm_thread_url(self): return reverse('commonplace.commbadge.app_dashboard', args=[self.app_slug]) @staticmethod def domain_from_url(url, allow_none=False): if not url: if allow_none: return raise ValueError('URL was empty') pieces = urlparse.urlparse(url) return '%s://%s' % (pieces.scheme, pieces.netloc.lower()) @property def punycode_app_domain(self): return self.app_domain.encode('idna') @property def parsed_app_domain(self): if self.is_packaged: raise ValueError('Packaged apps do not have a domain') return urlparse.urlparse(self.app_domain) @property def device_types(self): # If the transformer attached something, use it. if hasattr(self, '_device_types'): return self._device_types return [DEVICE_TYPES[d.device_type] for d in self.addondevicetype_set.order_by('device_type')] @property def origin(self): if self.is_packaged: return self.app_domain parsed = urlparse.urlparse(self.get_manifest_url()) return '%s://%s' % (parsed.scheme, parsed.netloc) def language_ascii(self): lang = translation.to_language(self.default_locale) return settings.LANGUAGES.get(lang) def get_manifest_url(self, reviewer=False): """ Hosted apps: a URI to an external manifest. Packaged apps: a URI to a mini manifest on m.m.o. If reviewer, the mini-manifest behind reviewer auth pointing to the reviewer-signed package. """ if self.is_packaged: if reviewer and self.latest_version: # Get latest version and return reviewer manifest URL. version = self.latest_version return absolutify(reverse('reviewers.mini_manifest', args=[self.app_slug, version.id])) elif self.current_version: return absolutify(reverse('detail.manifest', args=[self.guid])) else: return '' # No valid version. else: return self.manifest_url def has_icon_in_manifest(self): data = self.get_manifest_json() return 'icons' in data def get_manifest_json(self, file_obj=None): file_ = file_obj or self.get_latest_file() if not file_: return {} try: return file_.version.manifest except AppManifest.DoesNotExist: # TODO: Remove this when we're satisified the above is working. log.info('Falling back to loading manifest from file system. ' 'Webapp:%s File:%s' % (self.id, file_.id)) if file_.status == amo.STATUS_DISABLED: file_path = file_.guarded_file_path else: file_path = file_.file_path return WebAppParser().get_json_data(file_path) def manifest_updated(self, manifest, upload): """The manifest has updated, update the version and file. This is intended to be used for hosted apps only, which have only a single version and a single file. """ data = parse_addon(upload, self) manifest = WebAppParser().get_json_data(upload) version = self.versions.latest() max_ = Version._meta.get_field_by_name('_developer_name')[0].max_length version.update(version=data['version'], _developer_name=data['developer_name'][:max_]) try: version.manifest_json.update(manifest=json.dumps(manifest)) except AppManifest.DoesNotExist: AppManifest.objects.create(version=version, manifest=json.dumps(manifest)) path = smart_path(nfd_str(upload.path)) file = version.files.latest() file.filename = file.generate_filename(extension='.webapp') file.size = storage.size(path) file.hash = file.generate_hash(path) log.info('Updated file hash to %s' % file.hash) file.save() # Move the uploaded file from the temp location. copy_stored_file(path, os.path.join(version.path_prefix, nfd_str(file.filename))) log.info('[Webapp:%s] Copied updated manifest to %s' % ( self, version.path_prefix)) amo.log(amo.LOG.MANIFEST_UPDATED, self) def has_incomplete_status(self): return self.is_incomplete() def details_errors(self): """ See if initial app submission is complete (details). Returns list of reasons app may not be complete. """ reasons = [] if not self.support_email: reasons.append(_('You must provide a support email.')) if not self.name: reasons.append(_('You must provide an app name.')) if not self.device_types: reasons.append(_('You must provide at least one device type.')) if not self.categories: reasons.append(_('You must provide at least one category.')) if not self.previews.count(): reasons.append(_('You must upload at least one screenshot or ' 'video.')) return reasons def details_complete(self): """ Checks if app detail submission is complete (first step of submit). """ return not self.details_errors() def is_rated(self): return self.content_ratings.exists() def all_payment_accounts(self): # TODO: cache this somehow. Using @cached_property was hard because # there's no easy way to invalidate something that should be # recalculated. return (self.app_payment_accounts.select_related('payment_account') .all()) def payment_account(self, provider_id): from mkt.developers.models import AddonPaymentAccount qs = (self.app_payment_accounts.select_related('payment_account') .filter(payment_account__provider=provider_id)) try: return qs.get() except AddonPaymentAccount.DoesNotExist, exc: log.info('non-existant payment account for app {app}: ' '{exc.__class__.__name__}: {exc}' .format(app=self, exc=exc)) raise self.PayAccountDoesNotExist( 'No payment account for {app} named {pr}. ' 'Choices: {all}' .format(app=self, pr=PROVIDER_CHOICES[provider_id], all=[PROVIDER_CHOICES[a.payment_account.provider] for a in self.all_payment_accounts()])) def has_payment_account(self): """True if app has at least one payment account.""" return bool(self.all_payment_accounts().count()) def has_multiple_payment_accounts(self): """True if the app has more than one payment account.""" return self.all_payment_accounts().count() > 1 def payments_complete(self): """Also returns True if the app doesn't needs payments.""" return not self.needs_payment() or self.has_payment_account() def completion_errors(self, ignore_ratings=False): """ Compiles all submission steps into a single error report. ignore_ratings -- doesn't check for content_ratings for cases in which content ratings were just created. """ errors = {} if not self.details_complete(): errors['details'] = self.details_errors() if not ignore_ratings and not self.is_rated(): errors['content_ratings'] = _('You must set up content ratings.') if not self.payments_complete(): errors['payments'] = _('You must set up a payment account.') return errors def completion_error_msgs(self): """Returns submission error messages as a flat list.""" errors = self.completion_errors() # details is a list of msgs instead of a string like others. detail_errors = errors.pop('details', []) or [] return detail_errors + errors.values() def is_fully_complete(self, ignore_ratings=False): """ Wrapper to submission errors for readability and testability (mocking). """ return not self.completion_errors(ignore_ratings) def next_step(self): """ Gets the next step to fully complete app submission. """ if self.has_incomplete_status() and not self.details_complete(): # Some old public apps may have some missing detail fields. return { 'name': _('Details'), 'description': _('This app\'s submission process has not been ' 'fully completed.'), 'url': self.get_dev_url(), } elif not self.is_rated(): return { 'name': _('Content Ratings'), 'description': _('This app needs to get a content rating.'), 'url': self.get_dev_url('ratings'), } elif not self.payments_complete(): return { 'name': _('Payments'), 'description': _('This app needs a payment account set up.'), 'url': self.get_dev_url('payments'), } @amo.cached_property(writable=True) def is_offline(self): """ Returns a boolean of whether this is an app that degrades gracefully offline (i.e., is a packaged app or has an `appcache_path` defined in its manifest). """ if self.is_packaged: return True manifest = self.get_manifest_json() return bool(manifest and 'appcache_path' in manifest) def mark_done(self): """When the submission process is done, update status accordingly.""" self.update(status=amo.WEBAPPS_UNREVIEWED_STATUS) def update_status(self, **kwargs): if self.is_deleted or self.status == amo.STATUS_BLOCKED: return def _log(reason, old=self.status): log.info(u'Update app status [%s]: %s => %s (%s).' % ( self.id, old, self.status, reason)) amo.log(amo.LOG.CHANGE_STATUS, self.get_status_display(), self) # Handle the case of no versions. if not self.versions.exists(): self.update(status=amo.STATUS_NULL) _log('no versions') return # Handle the case of versions with no files. if not self.versions.filter(files__isnull=False).exists(): self.update(status=amo.STATUS_NULL) _log('no versions with files') return # If the app is incomplete, don't update status. if not self.is_fully_complete(): return # If there are no public versions and at least one pending, set status # to pending. has_public = self.versions.filter( files__status=amo.STATUS_PUBLIC).exists() has_approved = self.versions.filter( files__status=amo.STATUS_APPROVED).exists() has_pending = self.versions.filter( files__status=amo.STATUS_PENDING).exists() # If no public versions but there are approved versions, set app to # approved. if not has_public and has_approved: _log('has approved but no public files') self.update(status=amo.STATUS_APPROVED) return # If no public versions but there are pending versions, set app to # pending. if not has_public and has_pending and not self.is_pending(): self.update(status=amo.STATUS_PENDING) _log('has pending but no public files') return def authors_other_addons(self, app=None): """Return other apps by the same author.""" return (self.__class__.objects.visible() .exclude(id=self.id).distinct() .filter(addonuser__listed=True, authors__in=self.listed_authors)) def can_be_purchased(self): return self.is_premium() and self.status in amo.REVIEWED_STATUSES def can_purchase(self): return self.is_premium() and self.premium and self.is_public() def is_purchased(self, user): return user and self.id in user.purchase_ids() def has_premium(self): """If the app is premium status and has a premium object.""" return bool(self.is_premium() and self.premium) def get_price(self, carrier=None, region=None, provider=None): """ A shortcut to get the price as decimal. Returns None if their is no price for the app. :param optional carrier: an int for the carrier. :param optional region: an int for the region. Defaults to restofworld. :param optional provider: an int for the provider. Defaults to bango. """ if self.has_premium() and self.premium.price: return self.premium.price.get_price(carrier=carrier, region=region, provider=provider) def get_price_locale(self, carrier=None, region=None, provider=None): """ A shortcut to get the localised price with currency. Returns None if their is no price for the app. :param optional carrier: an int for the carrier. :param optional region: an int for the region. Defaults to restofworld. :param optional provider: an int for the provider. Defaults to bango. """ if self.has_premium() and self.premium.price: return self.premium.price.get_price_locale( carrier=carrier, region=region, provider=provider) def get_tier(self): """ Returns the price tier object. """ if self.has_premium(): return self.premium.price def get_tier_name(self): """ Returns the price tier for showing prices in the reviewer tools and developer hub. """ tier = self.get_tier() if tier: return tier.tier_locale() @amo.cached_property def promo(self): return self.get_promo() def get_promo(self): try: return self.previews.filter(position=-1)[0] except IndexError: pass def get_region_ids(self, restofworld=False, excluded=None): """ Return IDs of regions in which this app is listed. If `excluded` is provided we'll use that instead of doing our own excluded lookup. """ if restofworld: all_ids = mkt.regions.ALL_REGION_IDS else: all_ids = mkt.regions.REGION_IDS if excluded is None: excluded = self.get_excluded_region_ids() return sorted(set(all_ids) - set(excluded or [])) def get_excluded_region_ids(self): """ Return IDs of regions for which this app is excluded. This will be all the addon excluded regions. If the app is premium, this will also exclude any region that does not have the price tier set. Note: free and in-app are not included in this. """ excluded = set(self.addonexcludedregion .values_list('region', flat=True)) if self.is_premium(): all_regions = set(mkt.regions.ALL_REGION_IDS) # Find every region that does not have payments supported # and add that into the exclusions. excluded = excluded.union( all_regions.difference(self.get_price_region_ids())) geo = self.geodata if geo.region_de_iarc_exclude or geo.region_de_usk_exclude: excluded.add(mkt.regions.DE.id) if geo.region_br_iarc_exclude: excluded.add(mkt.regions.BR.id) return sorted(list(excluded)) def get_price_region_ids(self): tier = self.get_tier() if tier: return sorted(p['region'] for p in tier.prices() if p['paid']) return [] def get_regions(self, regions=None): """ Return a list of regions objects the app is available in, e.g.: [<class 'mkt.constants.regions.BR'>, <class 'mkt.constants.regions.CA'>, <class 'mkt.constants.regions.UK'>, <class 'mkt.constants.regions.US'>, <class 'mkt.constants.regions.RESTOFWORLD'>] if `regions` is provided we'll use that instead of calling self.get_region_ids() """ regions_ids = regions or self.get_region_ids(restofworld=True) _regions = map(mkt.regions.REGIONS_CHOICES_ID_DICT.get, regions_ids) return sorted(_regions, key=lambda x: x.slug) def listed_in(self, region=None, category=None): listed = [] if region: listed.append(region.id in self.get_region_ids(restofworld=True)) if category: listed.append(category in (self.categories or [])) return all(listed or [False]) def content_ratings_in(self, region, category=None): """ Get all content ratings for this app in REGION for CATEGORY. (e.g. give me the content ratings for a game listed in a Brazil.) """ # If we want to find games in Brazil with content ratings, then # make sure it's actually listed in Brazil and it's a game. if category and not self.listed_in(region, category): return [] rb = [] if not region.ratingsbody: # If a region doesn't specify a ratings body, default to GENERIC. rb = mkt.ratingsbodies.GENERIC.id else: rb = region.ratingsbody.id return list(self.content_ratings.filter(ratings_body=rb) .order_by('rating')) @classmethod def now(cls): return datetime.date.today() def in_rereview_queue(self): return self.rereviewqueue_set.exists() def in_tarako_queue(self): from mkt.reviewers.models import QUEUE_TARAKO return self.additionalreview_set.unreviewed(queue=QUEUE_TARAKO) def get_package_path(self): """Returns the `package_path` if the app is packaged.""" if not self.is_packaged: return version = self.current_version if not version: return try: file_obj = version.all_files[0] except IndexError: return else: return absolutify( os.path.join(reverse('downloads.file', args=[file_obj.id]), file_obj.filename)) def get_cached_manifest(self, force=False): """ Creates the "mini" manifest for packaged apps and caches it. Call this with `force=True` whenever we need to update the cached version of this manifest, e.g., when a new version of the packaged app is approved. If the addon is not a packaged app, this will not cache anything. """ if not self.is_packaged: return key = 'webapp:{0}:manifest'.format(self.pk) if not force: data = cache.get(key) if data: return data version = self.current_version if not version: # There's no valid version so we return an empty mini-manifest. # Note: We want to avoid caching this so when a version does become # available it can get picked up correctly. return '{}' else: # This will sign the package if it isn't already. # # Ensure that the calling method checks various permissions if # needed. E.g. see mkt/detail/views.py. This is also called as a # task after reviewer approval so we can't perform some checks # here. signed_file_path = packaged.sign(version.pk) file_obj = version.all_files[0] manifest = self.get_manifest_json(file_obj) package_path = absolutify( os.path.join(reverse('downloads.file', args=[file_obj.id]), file_obj.filename)) data = { 'name': manifest['name'], 'version': version.version, 'size': storage.size(signed_file_path), 'release_notes': version.releasenotes, 'package_path': package_path, } for key in ['developer', 'icons', 'locales']: if key in manifest: data[key] = manifest[key] data = json.dumps(data, cls=JSONEncoder) cache.set(key, data, None) return data def sign_if_packaged(self, version_pk, reviewer=False): if not self.is_packaged: return return packaged.sign(version_pk, reviewer=reviewer) def is_premium_type_upgrade(self, premium_type): """ Returns True if changing self.premium_type from current value to passed in value is considered an upgrade that should trigger a re-review. """ ALL = set(amo.ADDON_FREES + amo.ADDON_PREMIUMS) free_upgrade = ALL - set([amo.ADDON_FREE]) free_inapp_upgrade = ALL - set([amo.ADDON_FREE, amo.ADDON_FREE_INAPP]) if (self.premium_type == amo.ADDON_FREE and premium_type in free_upgrade): return True if (self.premium_type == amo.ADDON_FREE_INAPP and premium_type in free_inapp_upgrade): return True return False def create_blocklisted_version(self): """ Creates a new version who's file is the blocklisted app found in /media and sets status to STATUS_BLOCKLISTED. """ blocklisted_path = os.path.join(settings.MEDIA_ROOT, 'packaged-apps', 'blocklisted.zip') v = Version.objects.create(addon=self, version='blocklisted') f = File(version=v, status=amo.STATUS_BLOCKED) f.filename = f.generate_filename() copy_stored_file(blocklisted_path, f.file_path) log.info(u'[Webapp:%s] Copied blocklisted app from %s to %s' % ( self.id, blocklisted_path, f.file_path)) f.size = storage.size(f.file_path) f.hash = f.generate_hash(f.file_path) f.save() mf = WebAppParser().get_json_data(f.file_path) AppManifest.objects.create(version=v, manifest=json.dumps(mf)) self.sign_if_packaged(v.pk) self.status = amo.STATUS_BLOCKED self._current_version = v self.save() def update_name_from_package_manifest(self): """ Looks at the manifest.webapp inside the current version's file and updates the app's name and translated names. Note: Make sure the correct version is in place before calling this. """ if not self.is_packaged: return None file_ = self.current_version.all_files[0] mf = self.get_manifest_json(file_) # Get names in "locales" as {locale: name}. locale_names = get_locale_properties(mf, 'name', self.default_locale) # Check changes to default_locale. locale_changed = self.update_default_locale(mf.get('default_locale')) if locale_changed: log.info(u'[Webapp:%s] Default locale changed from "%s" to "%s".' % (self.pk, locale_changed[0], locale_changed[1])) # Update names crud = self.update_names(locale_names) if any(crud.values()): self.save() def update_supported_locales(self, latest=False, manifest=None): """ Loads the manifest (for either hosted or packaged) and updates Version.supported_locales for the current version or latest version if latest=True. """ version = self.versions.latest() if latest else self.current_version if not manifest: file_ = version.all_files[0] manifest = self.get_manifest_json(file_) updated = False supported_locales = ','.join(get_supported_locales(manifest)) if version.supported_locales != supported_locales: updated = True version.update(supported_locales=supported_locales, _signal=False) return updated @property def app_type_id(self): """ Returns int of `1` (hosted), `2` (packaged), or `3` (privileged). Used by ES. """ if self.latest_version and self.latest_version.is_privileged: return amo.ADDON_WEBAPP_PRIVILEGED elif self.is_packaged: return amo.ADDON_WEBAPP_PACKAGED return amo.ADDON_WEBAPP_HOSTED @property def app_type(self): """ Returns string of 'hosted', 'packaged', or 'privileged'. Used in the API. """ return amo.ADDON_WEBAPP_TYPES[self.app_type_id] def check_ownership(self, request, require_owner, require_author, ignore_disabled, admin): """ Used by acl.check_ownership to see if request.user has permissions for the addon. """ if require_author: require_owner = False ignore_disabled = True admin = False return acl.check_addon_ownership(request, self, admin=admin, viewer=(not require_owner), ignore_disabled=ignore_disabled) @property def supported_locales(self): """ Returns a tuple of the form: (localized default_locale, list of localized supported locales) for the current public version. """ languages = [] version = self.current_version or self.latest_version if version: for locale in version.supported_locales.split(','): if locale: language = settings.LANGUAGES.get(locale.lower()) if language: languages.append(language) return ( settings.LANGUAGES.get(self.default_locale.lower()), sorted(languages) ) @property def developer_name(self): """This is the developer name extracted from the manifest.""" version = self.current_version or self.latest_version if version: return version.developer_name def get_trending(self, region=None): """ Returns trending value. If no region, uses global value. If region and region is not mature, uses global value. Otherwise uses regional trending value. """ if region and not region.adolescent: by_region = region.id else: by_region = 0 try: return self.trending.get(region=by_region).value except ObjectDoesNotExist: return 0 def iarc_token(self): """ Simple hash to verify token in pingback API. """ return hashlib.sha512(settings.SECRET_KEY + str(self.id)).hexdigest() def get_content_ratings_by_body(self, es=False): """ Gets content ratings on this app keyed by bodies. es -- denotes whether to return ES-friendly results (just the IDs of rating classes) to fetch and translate later. """ content_ratings = {} for cr in self.content_ratings.all(): body = cr.get_body() rating_serialized = { 'body': body.id, 'rating': cr.get_rating().id } if not es: rating_serialized = dehydrate_content_rating(rating_serialized) content_ratings[body.label] = rating_serialized return content_ratings def set_iarc_info(self, submission_id, security_code): """ Sets the iarc_info for this app. """ data = {'submission_id': submission_id, 'security_code': security_code} info, created = IARCInfo.objects.safer_get_or_create( addon=self, defaults=data) if not created: info.update(**data) @write def set_content_ratings(self, data): """ Central method for setting content ratings. This overwrites or creates ratings, it doesn't delete and expects data of the form:: {<ratingsbodies class>: <rating class>, ...} """ from . import tasks if not data: return log.info('IARC setting content ratings for app:%s:%s' % (self.id, self.app_slug)) for ratings_body, rating in data.items(): cr, created = self.content_ratings.safer_get_or_create( ratings_body=ratings_body.id, defaults={'rating': rating.id}) if not created: cr.update(rating=rating.id, modified=datetime.datetime.now()) log.info('IARC content ratings set for app:%s:%s' % (self.id, self.app_slug)) geodata, c = Geodata.objects.get_or_create(addon=self) save = False # If app gets USK Rating Refused, exclude it from Germany. has_usk_refused = self.content_ratings.filter( ratings_body=mkt.ratingsbodies.USK.id, rating=mkt.ratingsbodies.USK_REJECTED.id).exists() save = geodata.region_de_usk_exclude != has_usk_refused geodata.region_de_usk_exclude = has_usk_refused # Un-exclude games in Brazil/Germany once they get a content rating. save = (save or geodata.region_br_iarc_exclude or geodata.region_de_iarc_exclude) geodata.region_br_iarc_exclude = False geodata.region_de_iarc_exclude = False # Un-disable apps that were disabled by the great IARC purge. if (self.status == amo.STATUS_DISABLED and self.iarc_purged): self.update(status=amo.STATUS_PUBLIC, iarc_purged=False) if save: geodata.save() log.info('Un-excluding IARC-excluded app:%s from br/de') tasks.index_webapps.delay([self.id]) @write def set_descriptors(self, data): """ Sets IARC rating descriptors on this app. data -- list of database flags ('has_usk_lang') """ create_kwargs = {} for body in mkt.iarc_mappings.DESCS: for desc, db_flag in mkt.iarc_mappings.DESCS[body].items(): create_kwargs[db_flag] = db_flag in data rd, created = RatingDescriptors.objects.get_or_create( addon=self, defaults=create_kwargs) if not created: rd.update(modified=datetime.datetime.now(), **create_kwargs) @write def set_interactives(self, data): """ Sets IARC interactive elements on this app. data -- list of database flags ('has_users_interact') """ create_kwargs = {} for interactive, db_flag in mkt.iarc_mappings.INTERACTIVES.items(): create_kwargs[db_flag] = db_flag in data ri, created = RatingInteractives.objects.get_or_create( addon=self, defaults=create_kwargs) if not created: ri.update(**create_kwargs) def set_iarc_storefront_data(self, disable=False): """Send app data to IARC for them to verify.""" try: iarc_info = self.iarc_info except IARCInfo.DoesNotExist: # App wasn't rated by IARC, return. return release_date = datetime.date.today() if self.status in amo.WEBAPPS_APPROVED_STATUSES: version = self.current_version if version and version.reviewed: release_date = version.reviewed elif self.status in amo.WEBAPPS_EXCLUDED_STATUSES: # Using `_latest_version` since the property returns None when # deleted. version = self._latest_version # Send an empty string to signify the app was removed. release_date = '' else: # If not approved or one of the disabled statuses, we shouldn't be # calling SET_STOREFRONT_DATA. Ignore this call. return log.debug('Calling SET_STOREFRONT_DATA for app:%s' % self.id) xmls = [] for cr in self.content_ratings.all(): xmls.append(render_xml('set_storefront_data.xml', { 'app_url': self.get_url_path(), 'submission_id': iarc_info.submission_id, 'security_code': iarc_info.security_code, 'rating_system': cr.get_body().iarc_name, 'release_date': '' if disable else release_date, 'title': get_iarc_app_title(self), 'company': version.developer_name if version else '', 'rating': cr.get_rating().iarc_name, 'descriptors': self.rating_descriptors.iarc_deserialize( body=cr.get_body()), 'interactive_elements': self.rating_interactives.iarc_deserialize(), })) for xml in xmls: r = get_iarc_client('services').Set_Storefront_Data(XMLString=xml) log.debug('IARC result app:%s, rating_body:%s: %s' % ( self.id, cr.get_body().iarc_name, r)) def last_rated_time(self): """Most recent content rating modified time or None if not rated.""" if self.is_rated(): return self.content_ratings.order_by('-modified')[0].modified class AddonUpsell(ModelBase): free = models.ForeignKey(Webapp, related_name='_upsell_from') premium = models.ForeignKey(Webapp, related_name='_upsell_to') class Meta: db_table = 'addon_upsell' unique_together = ('free', 'premium') def __unicode__(self): return u'Free: %s to Premium: %s' % (self.free, self.premium) @amo.cached_property def premium_addon(self): """ Return the premium version, or None if there isn't one. """ try: return self.premium except Webapp.DoesNotExist: pass def cleanup(self): try: # Just accessing these may raise an error. assert self.free and self.premium except ObjectDoesNotExist: log.info('Deleted upsell: from %s, to %s' % (self.free_id, self.premium_id)) self.delete() def cleanup_upsell(sender, instance, **kw): if 'raw' in kw: return both = Q(free=instance) | Q(premium=instance) for upsell in list(AddonUpsell.objects.filter(both)): upsell.cleanup() dbsignals.post_delete.connect(cleanup_upsell, sender=Webapp, dispatch_uid='addon_upsell') class Trending(ModelBase): addon = models.ForeignKey(Webapp, related_name='trending') value = models.FloatField(default=0.0) # When region=0, it's trending using install counts across all regions. region = models.PositiveIntegerField(null=False, default=0, db_index=True) class Meta: db_table = 'addons_trending' unique_together = ('addon', 'region') # Set translated_fields manually to avoid querying translations for addon # fields we don't use. Webapp._meta.translated_fields = [ Webapp._meta.get_field('homepage'), Webapp._meta.get_field('privacy_policy'), Webapp._meta.get_field('name'), Webapp._meta.get_field('description'), Webapp._meta.get_field('support_email'), Webapp._meta.get_field('support_url'), ] @receiver(dbsignals.post_save, sender=Webapp, dispatch_uid='webapp.search.index') def update_search_index(sender, instance, **kw): from . import tasks if not kw.get('raw'): if instance.upsold and instance.upsold.free_id: tasks.index_webapps.delay([instance.upsold.free_id]) tasks.index_webapps.delay([instance.id]) @receiver(dbsignals.post_save, sender=AddonUpsell, dispatch_uid='addonupsell.search.index') def update_search_index_upsell(sender, instance, **kw): # When saving an AddonUpsell instance, reindex both apps to update their # upsell/upsold properties in ES. from . import tasks if instance.free: tasks.index_webapps.delay([instance.free.id]) if instance.premium: tasks.index_webapps.delay([instance.premium.id]) models.signals.pre_save.connect(save_signal, sender=Webapp, dispatch_uid='webapp_translations') @receiver(signals.version_changed, dispatch_uid='update_cached_manifests') def update_cached_manifests(sender, **kw): if not kw.get('raw') and sender.is_packaged: from mkt.webapps.tasks import update_cached_manifests update_cached_manifests.delay(sender.id) @Webapp.on_change def watch_status(old_attr={}, new_attr={}, instance=None, sender=None, **kw): """Set nomination date when app is pending review.""" new_status = new_attr.get('status') if not new_status: return addon = instance old_status = old_attr['status'] # Log all status changes. if old_status != new_status: log.info( '[Webapp:{id}] Status changed from {old_status}:{old_status_name} ' 'to {new_status}:{new_status_name}'.format( id=addon.id, old_status=old_status, old_status_name=amo.STATUS_CHOICES_API.get(old_status, 'unknown'), new_status=new_status, new_status_name=amo.STATUS_CHOICES_API[new_status])) if new_status == amo.STATUS_PENDING and old_status != new_status: # We always set nomination date when app switches to PENDING, even if # previously rejected. try: latest = addon.versions.latest() log.debug('[Webapp:%s] Setting nomination date to now.' % addon.id) latest.update(nomination=datetime.datetime.now()) except Version.DoesNotExist: log.debug('[Webapp:%s] Missing version, no nomination set.' % addon.id) @Webapp.on_change def watch_disabled(old_attr={}, new_attr={}, instance=None, sender=None, **kw): attrs = dict((k, v) for k, v in old_attr.items() if k in ('disabled_by_user', 'status')) qs = (File.objects.filter(version__addon=instance.id) .exclude(version__deleted=True)) if Webapp(**attrs).is_disabled and not instance.is_disabled: for f in qs: f.unhide_disabled_file() if instance.is_disabled and not Webapp(**attrs).is_disabled: for f in qs: f.hide_disabled_file() @receiver(dbsignals.post_save, sender=Webapp, dispatch_uid='webapp.pre_generate_apk') def pre_generate_apk(sender=None, instance=None, **kw): """ Pre-generate an Android APK for a public app. """ if kw.get('raw'): return if not getattr(settings, 'PRE_GENERATE_APKS', False): log.info('[Webapp:{a}] APK pre-generation is disabled.' .format(a=instance.id)) return from . import tasks generated = False if instance.status in amo.WEBAPPS_APPROVED_STATUSES: app_devs = set(d.id for d in instance.device_types) if (amo.DEVICE_MOBILE.id in app_devs or amo.DEVICE_TABLET.id in app_devs): tasks.pre_generate_apk.delay(instance.id) generated = True log.info('[Webapp:{a}] APK pre-generated? {result}' .format(a=instance.id, result='YES' if generated else 'NO')) class Installed(ModelBase): """Track WebApp installations.""" addon = models.ForeignKey(Webapp, related_name='installed') user = models.ForeignKey('users.UserProfile') uuid = models.CharField(max_length=255, db_index=True, unique=True) # Because the addon could change between free and premium, # we need to store the state at time of install here. premium_type = models.PositiveIntegerField( null=True, default=None, choices=amo.ADDON_PREMIUM_TYPES.items()) install_type = models.PositiveIntegerField( db_index=True, default=apps.INSTALL_TYPE_USER, choices=apps.INSTALL_TYPES.items()) class Meta: db_table = 'users_install' unique_together = ('addon', 'user', 'install_type') @receiver(models.signals.post_save, sender=Installed) def add_uuid(sender, **kw): if not kw.get('raw'): install = kw['instance'] if not install.uuid and install.premium_type is None: install.uuid = ('%s-%s' % (install.pk, str(uuid.uuid4()))) install.premium_type = install.addon.premium_type install.save() class AddonExcludedRegion(ModelBase): """ Apps are listed in all regions by default. When regions are unchecked, we remember those excluded regions. """ addon = models.ForeignKey(Webapp, related_name='addonexcludedregion') region = models.PositiveIntegerField( choices=mkt.regions.REGIONS_CHOICES_ID) class Meta: db_table = 'addons_excluded_regions' unique_together = ('addon', 'region') def __unicode__(self): region = self.get_region() return u'%s: %s' % (self.addon, region.slug if region else None) def get_region(self): return mkt.regions.REGIONS_CHOICES_ID_DICT.get(self.region) @memoize(prefix='get_excluded_in') def get_excluded_in(region_id): """ Return IDs of Webapp objects excluded from a particular region or excluded due to Geodata flags. """ aers = list(AddonExcludedRegion.objects.filter(region=region_id) .values_list('addon', flat=True)) # For pre-IARC unrated games in Brazil/Germany. geodata_qs = Q() region = parse_region(region_id) if region in (mkt.regions.BR, mkt.regions.DE): geodata_qs |= Q(**{'region_%s_iarc_exclude' % region.slug: True}) # For USK_RATING_REFUSED apps in Germany. if region == mkt.regions.DE: geodata_qs |= Q(**{'region_de_usk_exclude': True}) geodata_exclusions = [] if geodata_qs: geodata_exclusions = list(Geodata.objects.filter(geodata_qs) .values_list('addon', flat=True)) return set(aers + geodata_exclusions) @receiver(models.signals.post_save, sender=AddonExcludedRegion, dispatch_uid='clean_memoized_exclusions') def clean_memoized_exclusions(sender, **kw): if not kw.get('raw'): for k in mkt.regions.ALL_REGION_IDS: cache.delete_many([memoize_key('get_excluded_in', k) for k in mkt.regions.ALL_REGION_IDS]) class IARCInfo(ModelBase): """ Stored data for IARC. """ addon = models.OneToOneField(Webapp, related_name='iarc_info') submission_id = models.PositiveIntegerField(null=False) security_code = models.CharField(max_length=10) class Meta: db_table = 'webapps_iarc_info' def __unicode__(self): return u'app:%s' % self.addon.app_slug class ContentRating(ModelBase): """ Ratings body information about an app. """ addon = models.ForeignKey(Webapp, related_name='content_ratings') ratings_body = models.PositiveIntegerField( choices=[(k, rb.name) for k, rb in mkt.ratingsbodies.RATINGS_BODIES.items()], null=False) rating = models.PositiveIntegerField(null=False) class Meta: db_table = 'webapps_contentrating' unique_together = ('addon', 'ratings_body') def __unicode__(self): return u'%s: %s' % (self.addon, self.get_label()) def get_regions(self): """Gives us a list of Region classes that use this rating body.""" # All regions w/o specified ratings bodies fallback to Generic. generic_regions = [] if self.get_body_class() == mkt.ratingsbodies.GENERIC: generic_regions = mkt.regions.ALL_REGIONS_WITHOUT_CONTENT_RATINGS() return ([x for x in mkt.regions.ALL_REGIONS_WITH_CONTENT_RATINGS() if self.get_body_class() == x.ratingsbody] + list(generic_regions)) def get_region_slugs(self): """Gives us the region slugs that use this rating body.""" if self.get_body_class() == mkt.ratingsbodies.GENERIC: # For the generic rating body, we just pigeonhole all of the misc. # regions into one region slug, GENERIC. Reduces redundancy in the # final data structure. Rather than # {'pe': {generic_rating}, 'ar': {generic_rating}, etc}, generic # regions will just use single {'generic': {generic rating}} return [mkt.regions.GENERIC_RATING_REGION_SLUG] return [x.slug for x in self.get_regions()] def get_body_class(self): return mkt.ratingsbodies.RATINGS_BODIES[self.ratings_body] def get_body(self): """Ratings body instance with translated strings attached.""" return mkt.ratingsbodies.dehydrate_ratings_body(self.get_body_class()) def get_rating_class(self): return self.get_body_class().ratings[self.rating] def get_rating(self): """Ratings instance with translated strings attached.""" return mkt.ratingsbodies.dehydrate_rating(self.get_rating_class()) def get_label(self): """Gives us the name to be used for the form options.""" return u'%s - %s' % (self.get_body().name, self.get_rating().name) def update_status_content_ratings(sender, instance, **kw): # Flips the app's status from NULL if it has everything else together. if (instance.addon.has_incomplete_status() and instance.addon.is_fully_complete()): instance.addon.update(status=amo.STATUS_PENDING) models.signals.post_save.connect(update_status_content_ratings, sender=ContentRating, dispatch_uid='c_rating_update_app_status') # The RatingDescriptors table is created with dynamic fields based on # mkt.constants.ratingdescriptors. class RatingDescriptors(ModelBase, DynamicBoolFieldsMixin): """ A dynamically generated model that contains a set of boolean values stating if an app is rated with a particular descriptor. """ addon = models.OneToOneField(Webapp, related_name='rating_descriptors') class Meta: db_table = 'webapps_rating_descriptors' def __unicode__(self): return u'%s: %s' % (self.id, self.addon.name) def to_keys_by_body(self, body): return [key for key in self.to_keys() if key.startswith('has_%s' % body)] def iarc_deserialize(self, body=None): """Map our descriptor strings back to the IARC ones (comma-sep.).""" keys = self.to_keys() if body: keys = [key for key in keys if body.iarc_name.lower() in key] return ', '.join(iarc_mappings.REVERSE_DESCS.get(desc) for desc in keys) # Add a dynamic field to `RatingDescriptors` model for each rating descriptor. for db_flag, desc in mkt.iarc_mappings.REVERSE_DESCS.items(): field = models.BooleanField(default=False, help_text=desc) field.contribute_to_class(RatingDescriptors, db_flag) # The RatingInteractives table is created with dynamic fields based on # mkt.constants.ratinginteractives. class RatingInteractives(ModelBase, DynamicBoolFieldsMixin): """ A dynamically generated model that contains a set of boolean values stating if an app features a particular interactive element. """ addon = models.OneToOneField(Webapp, related_name='rating_interactives') class Meta: db_table = 'webapps_rating_interactives' def __unicode__(self): return u'%s: %s' % (self.id, self.addon.name) def iarc_deserialize(self): """Map our descriptor strings back to the IARC ones (comma-sep.).""" return ', '.join(iarc_mappings.REVERSE_INTERACTIVES.get(inter) for inter in self.to_keys()) # Add a dynamic field to `RatingInteractives` model for each rating descriptor. for interactive, db_flag in mkt.iarc_mappings.INTERACTIVES.items(): field = models.BooleanField(default=False, help_text=interactive) field.contribute_to_class(RatingInteractives, db_flag) def iarc_cleanup(*args, **kwargs): instance = kwargs.get('instance') IARCInfo.objects.filter(addon=instance).delete() ContentRating.objects.filter(addon=instance).delete() RatingDescriptors.objects.filter(addon=instance).delete() RatingInteractives.objects.filter(addon=instance).delete() # When an app is deleted we need to remove the IARC data so the certificate can # be re-used later. models.signals.post_delete.connect(iarc_cleanup, sender=Webapp, dispatch_uid='webapps_iarc_cleanup') # The AppFeatures table is created with dynamic fields based on # mkt.constants.features, which requires some setup work before we call `type`. class AppFeatures(ModelBase, DynamicBoolFieldsMixin): """ A dynamically generated model that contains a set of boolean values stating if an app requires a particular feature. """ version = models.OneToOneField(Version, related_name='features') field_source = APP_FEATURES class Meta: db_table = 'addons_features' def __unicode__(self): return u'Version: %s: %s' % (self.version.id, self.to_signature()) def set_flags(self, signature): """ Sets flags given the signature. This takes the reverse steps in `to_signature` to set the various flags given a signature. Boolean math is used since "0.23.1" is a valid signature but does not produce a string of required length when doing string indexing. """ fields = self._fields() # Grab the profile part of the signature and convert to binary string. try: profile = bin(int(signature.split('.')[0], 16)).lstrip('0b') n = len(fields) - 1 for i, f in enumerate(fields): setattr(self, f, bool(int(profile, 2) & 2 ** (n - i))) except ValueError as e: log.error(u'ValueError converting %s. %s' % (signature, e)) def to_signature(self): """ This converts the boolean values of the flags to a signature string. For example, all the flags in APP_FEATURES order produce a string of binary digits that is then converted to a hexadecimal string with the length of the features list plus a version appended. E.g.:: >>> profile = '10001010111111010101011' >>> int(profile, 2) 4554411 >>> '%x' % int(profile, 2) '457eab' >>> '%x.%s.%s' % (int(profile, 2), len(profile), 1) '457eab.23.1' """ profile = ''.join('1' if getattr(self, f) else '0' for f in self._fields()) return '%x.%s.%s' % (int(profile, 2), len(profile), settings.APP_FEATURES_VERSION) def to_list(self): keys = self.to_keys() # Strip `has_` from each feature. field_names = [self.field_source[key[4:].upper()]['name'] for key in keys] return sorted(field_names) # Add a dynamic field to `AppFeatures` model for each buchet feature. for k, v in APP_FEATURES.iteritems(): field = models.BooleanField(default=False, help_text=v['name']) field.contribute_to_class(AppFeatures, 'has_%s' % k.lower()) class AppManifest(ModelBase): """ Storage for manifests. Tied to version since they change between versions. This stores both hosted and packaged apps manifests for easy access. """ version = models.OneToOneField(Version, related_name='manifest_json') manifest = models.TextField() class Meta: db_table = 'app_manifest' class RegionListField(json_field.JSONField): def to_python(self, value): value = super(RegionListField, self).to_python(value) if value: value = [int(v) for v in value] return value class Geodata(ModelBase): """TODO: Forgo AER and use bool columns for every region and carrier.""" addon = models.OneToOneField(Webapp, related_name='_geodata') restricted = models.BooleanField(default=False) popular_region = models.CharField(max_length=10, null=True) banner_regions = RegionListField(default=None, null=True) banner_message = PurifiedField() # Exclude apps with USK_RATING_REFUSED in Germany. region_de_usk_exclude = models.BooleanField(default=False) class Meta: db_table = 'webapps_geodata' def __unicode__(self): return u'%s (%s): <Webapp %s>' % ( self.id, 'restricted' if self.restricted else 'unrestricted', self.addon.id) def get_status(self, region): """ Return the status of listing in a given region (e.g., China). """ return getattr(self, 'region_%s_status' % parse_region(region).slug, amo.STATUS_PUBLIC) def set_status(self, region, status, save=False): """Return a tuple of `(value, changed)`.""" value, changed = None, False attr = 'region_%s_status' % parse_region(region).slug if hasattr(self, attr): value = setattr(self, attr, status) if self.get_status(region) != value: changed = True # Save only if the value is different. if save: self.save() return None, changed def get_status_slug(self, region): return { amo.STATUS_PENDING: 'pending', amo.STATUS_PUBLIC: 'public', amo.STATUS_REJECTED: 'rejected', }.get(self.get_status(region), 'unavailable') @classmethod def get_status_messages(cls): return { # L10n: An app is awaiting approval for a particular region. 'pending': _('awaiting approval'), # L10n: An app is rejected for a particular region. 'rejected': _('rejected'), # L10n: An app requires additional review for a particular region. 'unavailable': _('requires additional review') } def banner_regions_names(self): if self.banner_regions is None: return [] return sorted(unicode(mkt.regions.REGIONS_CHOICES_ID_DICT.get(k).name) for k in self.banner_regions) def banner_regions_slugs(self): if self.banner_regions is None: return [] return sorted(unicode(mkt.regions.REGIONS_CHOICES_ID_DICT.get(k).slug) for k in self.banner_regions) def get_nominated_date(self, region): """ Return the timestamp of when the app was approved in a region. """ return getattr(self, 'region_%s_nominated' % parse_region(region).slug) def set_nominated_date(self, region, timestamp=None, save=False): """Return a tuple of `(value, saved)`.""" value, changed = None, False attr = 'region_%s_nominated' % parse_region(region).slug if hasattr(self, attr): if timestamp is None: timestamp = datetime.datetime.now() value = setattr(self, attr, timestamp) if self.get_nominated_date(region) != value: changed = True # Save only if the value is different. if save: self.save() return None, changed # (1) Add a dynamic status field to `Geodata` model for each special region: # - 0: STATUS_NULL (Unavailable) # - 2: STATUS_PENDING (Pending) # - 4: STATUS_PUBLIC (Public) # - 12: STATUS_REJECTED (Rejected) # # (2) Add a dynamic nominated field to keep track of timestamp for when # the developer requested approval for each region. for region in mkt.regions.SPECIAL_REGIONS: help_text = _('{region} approval status').format(region=region.name) field = models.PositiveIntegerField(help_text=help_text, choices=amo.STATUS_CHOICES.items(), db_index=True, default=amo.STATUS_PENDING) field.contribute_to_class(Geodata, 'region_%s_status' % region.slug) help_text = _('{region} nomination date').format(region=region.name) field = models.DateTimeField(help_text=help_text, null=True) field.contribute_to_class(Geodata, 'region_%s_nominated' % region.slug) # Add a dynamic field to `Geodata` model to exclude pre-IARC public unrated # Brazil and Germany games. for region in (mkt.regions.BR, mkt.regions.DE): field = models.BooleanField(default=False) field.contribute_to_class(Geodata, 'region_%s_iarc_exclude' % region.slug) # Save geodata translations when a Geodata instance is saved. models.signals.pre_save.connect(save_signal, sender=Geodata, dispatch_uid='geodata_translations')
codeparrot/github-code-clean
"""Tests for base_events.py""" import errno import logging import math import os import socket import sys import threading import time import unittest from unittest import mock import asyncio from asyncio import base_events from asyncio import constants from asyncio import events from test.test_asyncio import utils as test_utils from test import support from test.support.script_helper import assert_python_ok MOCK_ANY = mock.ANY PY34 = sys.version_info >= (3, 4) def mock_socket_module(): m_socket = mock.MagicMock(spec=socket) for name in ( 'AF_INET', 'AF_INET6', 'AF_UNSPEC', 'IPPROTO_TCP', 'IPPROTO_UDP', 'SOCK_STREAM', 'SOCK_DGRAM', 'SOL_SOCKET', 'SO_REUSEADDR', 'inet_pton' ): if hasattr(socket, name): setattr(m_socket, name, getattr(socket, name)) else: delattr(m_socket, name) m_socket.socket = mock.MagicMock() m_socket.socket.return_value = test_utils.mock_nonblocking_socket() m_socket.getaddrinfo._is_coroutine = False return m_socket def patch_socket(f): return mock.patch('asyncio.base_events.socket', new_callable=mock_socket_module)(f) class BaseEventTests(test_utils.TestCase): def test_ipaddr_info(self): UNSPEC = socket.AF_UNSPEC INET = socket.AF_INET INET6 = socket.AF_INET6 STREAM = socket.SOCK_STREAM DGRAM = socket.SOCK_DGRAM TCP = socket.IPPROTO_TCP UDP = socket.IPPROTO_UDP self.assertEqual( (INET, STREAM, TCP, '', ('1.2.3.4', 1)), base_events._ipaddr_info('1.2.3.4', 1, INET, STREAM, TCP)) self.assertEqual( (INET, STREAM, TCP, '', ('1.2.3.4', 1)), base_events._ipaddr_info(b'1.2.3.4', 1, INET, STREAM, TCP)) self.assertEqual( (INET, STREAM, TCP, '', ('1.2.3.4', 1)), base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, TCP)) self.assertEqual( (INET, DGRAM, UDP, '', ('1.2.3.4', 1)), base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, UDP)) # Socket type STREAM implies TCP protocol. self.assertEqual( (INET, STREAM, TCP, '', ('1.2.3.4', 1)), base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, 0)) # Socket type DGRAM implies UDP protocol. self.assertEqual( (INET, DGRAM, UDP, '', ('1.2.3.4', 1)), base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, 0)) # No socket type. self.assertIsNone( base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, 0, 0)) # IPv4 address with family IPv6. self.assertIsNone( base_events._ipaddr_info('1.2.3.4', 1, INET6, STREAM, TCP)) self.assertEqual( (INET6, STREAM, TCP, '', ('::3', 1, 0, 0)), base_events._ipaddr_info('::3', 1, INET6, STREAM, TCP)) self.assertEqual( (INET6, STREAM, TCP, '', ('::3', 1, 0, 0)), base_events._ipaddr_info('::3', 1, UNSPEC, STREAM, TCP)) # IPv6 address with family IPv4. self.assertIsNone( base_events._ipaddr_info('::3', 1, INET, STREAM, TCP)) # IPv6 address with zone index. self.assertIsNone( base_events._ipaddr_info('::3%lo0', 1, INET6, STREAM, TCP)) def test_port_parameter_types(self): # Test obscure kinds of arguments for "port". INET = socket.AF_INET STREAM = socket.SOCK_STREAM TCP = socket.IPPROTO_TCP self.assertEqual( (INET, STREAM, TCP, '', ('1.2.3.4', 0)), base_events._ipaddr_info('1.2.3.4', None, INET, STREAM, TCP)) self.assertEqual( (INET, STREAM, TCP, '', ('1.2.3.4', 0)), base_events._ipaddr_info('1.2.3.4', b'', INET, STREAM, TCP)) self.assertEqual( (INET, STREAM, TCP, '', ('1.2.3.4', 0)), base_events._ipaddr_info('1.2.3.4', '', INET, STREAM, TCP)) self.assertEqual( (INET, STREAM, TCP, '', ('1.2.3.4', 1)), base_events._ipaddr_info('1.2.3.4', '1', INET, STREAM, TCP)) self.assertEqual( (INET, STREAM, TCP, '', ('1.2.3.4', 1)), base_events._ipaddr_info('1.2.3.4', b'1', INET, STREAM, TCP)) @patch_socket def test_ipaddr_info_no_inet_pton(self, m_socket): del m_socket.inet_pton self.assertIsNone(base_events._ipaddr_info('1.2.3.4', 1, socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)) class BaseEventLoopTests(test_utils.TestCase): def setUp(self): super().setUp() self.loop = base_events.BaseEventLoop() self.loop._selector = mock.Mock() self.loop._selector.select.return_value = () self.set_event_loop(self.loop) def test_not_implemented(self): m = mock.Mock() self.assertRaises( NotImplementedError, self.loop._make_socket_transport, m, m) self.assertRaises( NotImplementedError, self.loop._make_ssl_transport, m, m, m, m) self.assertRaises( NotImplementedError, self.loop._make_datagram_transport, m, m) self.assertRaises( NotImplementedError, self.loop._process_events, []) self.assertRaises( NotImplementedError, self.loop._write_to_self) self.assertRaises( NotImplementedError, self.loop._make_read_pipe_transport, m, m) self.assertRaises( NotImplementedError, self.loop._make_write_pipe_transport, m, m) gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m) with self.assertRaises(NotImplementedError): gen.send(None) def test_close(self): self.assertFalse(self.loop.is_closed()) self.loop.close() self.assertTrue(self.loop.is_closed()) # it should be possible to call close() more than once self.loop.close() self.loop.close() # operation blocked when the loop is closed f = asyncio.Future(loop=self.loop) self.assertRaises(RuntimeError, self.loop.run_forever) self.assertRaises(RuntimeError, self.loop.run_until_complete, f) def test__add_callback_handle(self): h = asyncio.Handle(lambda: False, (), self.loop, None) self.loop._add_callback(h) self.assertFalse(self.loop._scheduled) self.assertIn(h, self.loop._ready) def test__add_callback_cancelled_handle(self): h = asyncio.Handle(lambda: False, (), self.loop, None) h.cancel() self.loop._add_callback(h) self.assertFalse(self.loop._scheduled) self.assertFalse(self.loop._ready) def test_set_default_executor(self): executor = mock.Mock() self.loop.set_default_executor(executor) self.assertIs(executor, self.loop._default_executor) def test_call_soon(self): def cb(): pass h = self.loop.call_soon(cb) self.assertEqual(h._callback, cb) self.assertIsInstance(h, asyncio.Handle) self.assertIn(h, self.loop._ready) def test_call_soon_non_callable(self): self.loop.set_debug(True) with self.assertRaisesRegex(TypeError, 'a callable object'): self.loop.call_soon(1) def test_call_later(self): def cb(): pass h = self.loop.call_later(10.0, cb) self.assertIsInstance(h, asyncio.TimerHandle) self.assertIn(h, self.loop._scheduled) self.assertNotIn(h, self.loop._ready) def test_call_later_negative_delays(self): calls = [] def cb(arg): calls.append(arg) self.loop._process_events = mock.Mock() self.loop.call_later(-1, cb, 'a') self.loop.call_later(-2, cb, 'b') test_utils.run_briefly(self.loop) self.assertEqual(calls, ['b', 'a']) def test_time_and_call_at(self): def cb(): self.loop.stop() self.loop._process_events = mock.Mock() delay = 0.1 when = self.loop.time() + delay self.loop.call_at(when, cb) t0 = self.loop.time() self.loop.run_forever() dt = self.loop.time() - t0 # 50 ms: maximum granularity of the event loop self.assertGreaterEqual(dt, delay - 0.050, dt) # tolerate a difference of +800 ms because some Python buildbots # are really slow self.assertLessEqual(dt, 0.9, dt) def check_thread(self, loop, debug): def cb(): pass loop.set_debug(debug) if debug: msg = ("Non-thread-safe operation invoked on an event loop other " "than the current one") with self.assertRaisesRegex(RuntimeError, msg): loop.call_soon(cb) with self.assertRaisesRegex(RuntimeError, msg): loop.call_later(60, cb) with self.assertRaisesRegex(RuntimeError, msg): loop.call_at(loop.time() + 60, cb) else: loop.call_soon(cb) loop.call_later(60, cb) loop.call_at(loop.time() + 60, cb) def test_check_thread(self): def check_in_thread(loop, event, debug, create_loop, fut): # wait until the event loop is running event.wait() try: if create_loop: loop2 = base_events.BaseEventLoop() try: asyncio.set_event_loop(loop2) self.check_thread(loop, debug) finally: asyncio.set_event_loop(None) loop2.close() else: self.check_thread(loop, debug) except Exception as exc: loop.call_soon_threadsafe(fut.set_exception, exc) else: loop.call_soon_threadsafe(fut.set_result, None) def test_thread(loop, debug, create_loop=False): event = threading.Event() fut = asyncio.Future(loop=loop) loop.call_soon(event.set) args = (loop, event, debug, create_loop, fut) thread = threading.Thread(target=check_in_thread, args=args) thread.start() loop.run_until_complete(fut) thread.join() self.loop._process_events = mock.Mock() self.loop._write_to_self = mock.Mock() # raise RuntimeError if the thread has no event loop test_thread(self.loop, True) # check disabled if debug mode is disabled test_thread(self.loop, False) # raise RuntimeError if the event loop of the thread is not the called # event loop test_thread(self.loop, True, create_loop=True) # check disabled if debug mode is disabled test_thread(self.loop, False, create_loop=True) def test__run_once(self): h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (), self.loop, None) h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (), self.loop, None) h1.cancel() self.loop._process_events = mock.Mock() self.loop._scheduled.append(h1) self.loop._scheduled.append(h2) self.loop._run_once() t = self.loop._selector.select.call_args[0][0] self.assertTrue(9.5 < t < 10.5, t) self.assertEqual([h2], self.loop._scheduled) self.assertTrue(self.loop._process_events.called) def test_set_debug(self): self.loop.set_debug(True) self.assertTrue(self.loop.get_debug()) self.loop.set_debug(False) self.assertFalse(self.loop.get_debug()) @mock.patch('asyncio.base_events.logger') def test__run_once_logging(self, m_logger): def slow_select(timeout): # Sleep a bit longer than a second to avoid timer resolution # issues. time.sleep(1.1) return [] # logging needs debug flag self.loop.set_debug(True) # Log to INFO level if timeout > 1.0 sec. self.loop._selector.select = slow_select self.loop._process_events = mock.Mock() self.loop._run_once() self.assertEqual(logging.INFO, m_logger.log.call_args[0][0]) def fast_select(timeout): time.sleep(0.001) return [] self.loop._selector.select = fast_select self.loop._run_once() self.assertEqual(logging.DEBUG, m_logger.log.call_args[0][0]) def test__run_once_schedule_handle(self): handle = None processed = False def cb(loop): nonlocal processed, handle processed = True handle = loop.call_soon(lambda: True) h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,), self.loop, None) self.loop._process_events = mock.Mock() self.loop._scheduled.append(h) self.loop._run_once() self.assertTrue(processed) self.assertEqual([handle], list(self.loop._ready)) def test__run_once_cancelled_event_cleanup(self): self.loop._process_events = mock.Mock() self.assertTrue( 0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0) def cb(): pass # Set up one "blocking" event that will not be cancelled to # ensure later cancelled events do not make it to the head # of the queue and get cleaned. not_cancelled_count = 1 self.loop.call_later(3000, cb) # Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES) # cancelled handles, ensure they aren't removed cancelled_count = 2 for x in range(2): h = self.loop.call_later(3600, cb) h.cancel() # Add some cancelled events that will be at head and removed cancelled_count += 2 for x in range(2): h = self.loop.call_later(100, cb) h.cancel() # This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low self.assertLessEqual(cancelled_count + not_cancelled_count, base_events._MIN_SCHEDULED_TIMER_HANDLES) self.assertEqual(self.loop._timer_cancelled_count, cancelled_count) self.loop._run_once() cancelled_count -= 2 self.assertEqual(self.loop._timer_cancelled_count, cancelled_count) self.assertEqual(len(self.loop._scheduled), cancelled_count + not_cancelled_count) # Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION # so that deletion of cancelled events will occur on next _run_once add_cancel_count = int(math.ceil( base_events._MIN_SCHEDULED_TIMER_HANDLES * base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1 add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES - add_cancel_count, 0) # Add some events that will not be cancelled not_cancelled_count += add_not_cancel_count for x in range(add_not_cancel_count): self.loop.call_later(3600, cb) # Add enough cancelled events cancelled_count += add_cancel_count for x in range(add_cancel_count): h = self.loop.call_later(3600, cb) h.cancel() # Ensure all handles are still scheduled self.assertEqual(len(self.loop._scheduled), cancelled_count + not_cancelled_count) self.loop._run_once() # Ensure cancelled events were removed self.assertEqual(len(self.loop._scheduled), not_cancelled_count) # Ensure only uncancelled events remain scheduled self.assertTrue(all([not x._cancelled for x in self.loop._scheduled])) def test_run_until_complete_type_error(self): self.assertRaises(TypeError, self.loop.run_until_complete, 'blah') def test_run_until_complete_loop(self): task = asyncio.Future(loop=self.loop) other_loop = self.new_test_loop() self.addCleanup(other_loop.close) self.assertRaises(ValueError, other_loop.run_until_complete, task) def test_run_until_complete_loop_orphan_future_close_loop(self): class ShowStopper(BaseException): pass async def foo(delay): await asyncio.sleep(delay, loop=self.loop) def throw(): raise ShowStopper self.loop._process_events = mock.Mock() self.loop.call_soon(throw) try: self.loop.run_until_complete(foo(0.1)) except ShowStopper: pass # This call fails if run_until_complete does not clean up # done-callback for the previous future. self.loop.run_until_complete(foo(0.2)) def test_subprocess_exec_invalid_args(self): args = [sys.executable, '-c', 'pass'] # missing program parameter (empty args) self.assertRaises(TypeError, self.loop.run_until_complete, self.loop.subprocess_exec, asyncio.SubprocessProtocol) # expected multiple arguments, not a list self.assertRaises(TypeError, self.loop.run_until_complete, self.loop.subprocess_exec, asyncio.SubprocessProtocol, args) # program arguments must be strings, not int self.assertRaises(TypeError, self.loop.run_until_complete, self.loop.subprocess_exec, asyncio.SubprocessProtocol, sys.executable, 123) # universal_newlines, shell, bufsize must not be set self.assertRaises(TypeError, self.loop.run_until_complete, self.loop.subprocess_exec, asyncio.SubprocessProtocol, *args, universal_newlines=True) self.assertRaises(TypeError, self.loop.run_until_complete, self.loop.subprocess_exec, asyncio.SubprocessProtocol, *args, shell=True) self.assertRaises(TypeError, self.loop.run_until_complete, self.loop.subprocess_exec, asyncio.SubprocessProtocol, *args, bufsize=4096) def test_subprocess_shell_invalid_args(self): # expected a string, not an int or a list self.assertRaises(TypeError, self.loop.run_until_complete, self.loop.subprocess_shell, asyncio.SubprocessProtocol, 123) self.assertRaises(TypeError, self.loop.run_until_complete, self.loop.subprocess_shell, asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass']) # universal_newlines, shell, bufsize must not be set self.assertRaises(TypeError, self.loop.run_until_complete, self.loop.subprocess_shell, asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True) self.assertRaises(TypeError, self.loop.run_until_complete, self.loop.subprocess_shell, asyncio.SubprocessProtocol, 'exit 0', shell=True) self.assertRaises(TypeError, self.loop.run_until_complete, self.loop.subprocess_shell, asyncio.SubprocessProtocol, 'exit 0', bufsize=4096) def test_default_exc_handler_callback(self): self.loop._process_events = mock.Mock() def zero_error(fut): fut.set_result(True) 1/0 # Test call_soon (events.Handle) with mock.patch('asyncio.base_events.logger') as log: fut = asyncio.Future(loop=self.loop) self.loop.call_soon(zero_error, fut) fut.add_done_callback(lambda fut: self.loop.stop()) self.loop.run_forever() log.error.assert_called_with( test_utils.MockPattern('Exception in callback.*zero'), exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY)) # Test call_later (events.TimerHandle) with mock.patch('asyncio.base_events.logger') as log: fut = asyncio.Future(loop=self.loop) self.loop.call_later(0.01, zero_error, fut) fut.add_done_callback(lambda fut: self.loop.stop()) self.loop.run_forever() log.error.assert_called_with( test_utils.MockPattern('Exception in callback.*zero'), exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY)) def test_default_exc_handler_coro(self): self.loop._process_events = mock.Mock() @asyncio.coroutine def zero_error_coro(): yield from asyncio.sleep(0.01, loop=self.loop) 1/0 # Test Future.__del__ with mock.patch('asyncio.base_events.logger') as log: fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop) fut.add_done_callback(lambda *args: self.loop.stop()) self.loop.run_forever() fut = None # Trigger Future.__del__ or futures._TracebackLogger support.gc_collect() if PY34: # Future.__del__ in Python 3.4 logs error with # an actual exception context log.error.assert_called_with( test_utils.MockPattern('.*exception was never retrieved'), exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY)) else: # futures._TracebackLogger logs only textual traceback log.error.assert_called_with( test_utils.MockPattern( '.*exception was never retrieved.*ZeroDiv'), exc_info=False) def test_set_exc_handler_invalid(self): with self.assertRaisesRegex(TypeError, 'A callable object or None'): self.loop.set_exception_handler('spam') def test_set_exc_handler_custom(self): def zero_error(): 1/0 def run_loop(): handle = self.loop.call_soon(zero_error) self.loop._run_once() return handle self.loop.set_debug(True) self.loop._process_events = mock.Mock() self.assertIsNone(self.loop.get_exception_handler()) mock_handler = mock.Mock() self.loop.set_exception_handler(mock_handler) self.assertIs(self.loop.get_exception_handler(), mock_handler) handle = run_loop() mock_handler.assert_called_with(self.loop, { 'exception': MOCK_ANY, 'message': test_utils.MockPattern( 'Exception in callback.*zero_error'), 'handle': handle, 'source_traceback': handle._source_traceback, }) mock_handler.reset_mock() self.loop.set_exception_handler(None) with mock.patch('asyncio.base_events.logger') as log: run_loop() log.error.assert_called_with( test_utils.MockPattern( 'Exception in callback.*zero'), exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY)) assert not mock_handler.called def test_set_exc_handler_broken(self): def run_loop(): def zero_error(): 1/0 self.loop.call_soon(zero_error) self.loop._run_once() def handler(loop, context): raise AttributeError('spam') self.loop._process_events = mock.Mock() self.loop.set_exception_handler(handler) with mock.patch('asyncio.base_events.logger') as log: run_loop() log.error.assert_called_with( test_utils.MockPattern( 'Unhandled error in exception handler'), exc_info=(AttributeError, MOCK_ANY, MOCK_ANY)) def test_default_exc_handler_broken(self): _context = None class Loop(base_events.BaseEventLoop): _selector = mock.Mock() _process_events = mock.Mock() def default_exception_handler(self, context): nonlocal _context _context = context # Simulates custom buggy "default_exception_handler" raise ValueError('spam') loop = Loop() self.addCleanup(loop.close) asyncio.set_event_loop(loop) def run_loop(): def zero_error(): 1/0 loop.call_soon(zero_error) loop._run_once() with mock.patch('asyncio.base_events.logger') as log: run_loop() log.error.assert_called_with( 'Exception in default exception handler', exc_info=True) def custom_handler(loop, context): raise ValueError('ham') _context = None loop.set_exception_handler(custom_handler) with mock.patch('asyncio.base_events.logger') as log: run_loop() log.error.assert_called_with( test_utils.MockPattern('Exception in default exception.*' 'while handling.*in custom'), exc_info=True) # Check that original context was passed to default # exception handler. self.assertIn('context', _context) self.assertIs(type(_context['context']['exception']), ZeroDivisionError) def test_set_task_factory_invalid(self): with self.assertRaisesRegex( TypeError, 'task factory must be a callable or None'): self.loop.set_task_factory(1) self.assertIsNone(self.loop.get_task_factory()) def test_set_task_factory(self): self.loop._process_events = mock.Mock() class MyTask(asyncio.Task): pass @asyncio.coroutine def coro(): pass factory = lambda loop, coro: MyTask(coro, loop=loop) self.assertIsNone(self.loop.get_task_factory()) self.loop.set_task_factory(factory) self.assertIs(self.loop.get_task_factory(), factory) task = self.loop.create_task(coro()) self.assertTrue(isinstance(task, MyTask)) self.loop.run_until_complete(task) self.loop.set_task_factory(None) self.assertIsNone(self.loop.get_task_factory()) task = self.loop.create_task(coro()) self.assertTrue(isinstance(task, asyncio.Task)) self.assertFalse(isinstance(task, MyTask)) self.loop.run_until_complete(task) def test_env_var_debug(self): code = '\n'.join(( 'import asyncio', 'loop = asyncio.get_event_loop()', 'print(loop.get_debug())')) # Test with -E to not fail if the unit test was run with # PYTHONASYNCIODEBUG set to a non-empty string sts, stdout, stderr = assert_python_ok('-E', '-c', code) self.assertEqual(stdout.rstrip(), b'False') sts, stdout, stderr = assert_python_ok('-c', code, PYTHONASYNCIODEBUG='', PYTHONDEVMODE='') self.assertEqual(stdout.rstrip(), b'False') sts, stdout, stderr = assert_python_ok('-c', code, PYTHONASYNCIODEBUG='1', PYTHONDEVMODE='') self.assertEqual(stdout.rstrip(), b'True') sts, stdout, stderr = assert_python_ok('-E', '-c', code, PYTHONASYNCIODEBUG='1') self.assertEqual(stdout.rstrip(), b'False') # -X dev sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev', '-c', code) self.assertEqual(stdout.rstrip(), b'True') def test_create_task(self): class MyTask(asyncio.Task): pass @asyncio.coroutine def test(): pass class EventLoop(base_events.BaseEventLoop): def create_task(self, coro): return MyTask(coro, loop=loop) loop = EventLoop() self.set_event_loop(loop) coro = test() task = asyncio.ensure_future(coro, loop=loop) self.assertIsInstance(task, MyTask) # make warnings quiet task._log_destroy_pending = False coro.close() def test_run_forever_keyboard_interrupt(self): # Python issue #22601: ensure that the temporary task created by # run_forever() consumes the KeyboardInterrupt and so don't log # a warning @asyncio.coroutine def raise_keyboard_interrupt(): raise KeyboardInterrupt self.loop._process_events = mock.Mock() self.loop.call_exception_handler = mock.Mock() try: self.loop.run_until_complete(raise_keyboard_interrupt()) except KeyboardInterrupt: pass self.loop.close() support.gc_collect() self.assertFalse(self.loop.call_exception_handler.called) def test_run_until_complete_baseexception(self): # Python issue #22429: run_until_complete() must not schedule a pending # call to stop() if the future raised a BaseException @asyncio.coroutine def raise_keyboard_interrupt(): raise KeyboardInterrupt self.loop._process_events = mock.Mock() try: self.loop.run_until_complete(raise_keyboard_interrupt()) except KeyboardInterrupt: pass def func(): self.loop.stop() func.called = True func.called = False try: self.loop.call_soon(func) self.loop.run_forever() except KeyboardInterrupt: pass self.assertTrue(func.called) def test_single_selecter_event_callback_after_stopping(self): # Python issue #25593: A stopped event loop may cause event callbacks # to run more than once. event_sentinel = object() callcount = 0 doer = None def proc_events(event_list): nonlocal doer if event_sentinel in event_list: doer = self.loop.call_soon(do_event) def do_event(): nonlocal callcount callcount += 1 self.loop.call_soon(clear_selector) def clear_selector(): doer.cancel() self.loop._selector.select.return_value = () self.loop._process_events = proc_events self.loop._selector.select.return_value = (event_sentinel,) for i in range(1, 3): with self.subTest('Loop %d/2' % i): self.loop.call_soon(self.loop.stop) self.loop.run_forever() self.assertEqual(callcount, 1) def test_run_once(self): # Simple test for test_utils.run_once(). It may seem strange # to have a test for this (the function isn't even used!) but # it's a de-factor standard API for library tests. This tests # the idiom: loop.call_soon(loop.stop); loop.run_forever(). count = 0 def callback(): nonlocal count count += 1 self.loop._process_events = mock.Mock() self.loop.call_soon(callback) test_utils.run_once(self.loop) self.assertEqual(count, 1) def test_run_forever_pre_stopped(self): # Test that the old idiom for pre-stopping the loop works. self.loop._process_events = mock.Mock() self.loop.stop() self.loop.run_forever() self.loop._selector.select.assert_called_once_with(0) async def leave_unfinalized_asyncgen(self): # Create an async generator, iterate it partially, and leave it # to be garbage collected. # Used in async generator finalization tests. # Depends on implementation details of garbage collector. Changes # in gc may break this function. status = {'started': False, 'stopped': False, 'finalized': False} async def agen(): status['started'] = True try: for item in ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR']: yield item finally: status['finalized'] = True ag = agen() ai = ag.__aiter__() async def iter_one(): try: item = await ai.__anext__() except StopAsyncIteration: return if item == 'THREE': status['stopped'] = True return asyncio.create_task(iter_one()) asyncio.create_task(iter_one()) return status def test_asyncgen_finalization_by_gc(self): # Async generators should be finalized when garbage collected. self.loop._process_events = mock.Mock() self.loop._write_to_self = mock.Mock() with support.disable_gc(): status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen()) while not status['stopped']: test_utils.run_briefly(self.loop) self.assertTrue(status['started']) self.assertTrue(status['stopped']) self.assertFalse(status['finalized']) support.gc_collect() test_utils.run_briefly(self.loop) self.assertTrue(status['finalized']) def test_asyncgen_finalization_by_gc_in_other_thread(self): # Python issue 34769: If garbage collector runs in another # thread, async generators will not finalize in debug # mode. self.loop._process_events = mock.Mock() self.loop._write_to_self = mock.Mock() self.loop.set_debug(True) with support.disable_gc(): status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen()) while not status['stopped']: test_utils.run_briefly(self.loop) self.assertTrue(status['started']) self.assertTrue(status['stopped']) self.assertFalse(status['finalized']) self.loop.run_until_complete( self.loop.run_in_executor(None, support.gc_collect)) test_utils.run_briefly(self.loop) self.assertTrue(status['finalized']) class MyProto(asyncio.Protocol): done = None def __init__(self, create_future=False): self.state = 'INITIAL' self.nbytes = 0 if create_future: self.done = asyncio.Future() def connection_made(self, transport): self.transport = transport assert self.state == 'INITIAL', self.state self.state = 'CONNECTED' transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n') def data_received(self, data): assert self.state == 'CONNECTED', self.state self.nbytes += len(data) def eof_received(self): assert self.state == 'CONNECTED', self.state self.state = 'EOF' def connection_lost(self, exc): assert self.state in ('CONNECTED', 'EOF'), self.state self.state = 'CLOSED' if self.done: self.done.set_result(None) class MyDatagramProto(asyncio.DatagramProtocol): done = None def __init__(self, create_future=False, loop=None): self.state = 'INITIAL' self.nbytes = 0 if create_future: self.done = asyncio.Future(loop=loop) def connection_made(self, transport): self.transport = transport assert self.state == 'INITIAL', self.state self.state = 'INITIALIZED' def datagram_received(self, data, addr): assert self.state == 'INITIALIZED', self.state self.nbytes += len(data) def error_received(self, exc): assert self.state == 'INITIALIZED', self.state def connection_lost(self, exc): assert self.state == 'INITIALIZED', self.state self.state = 'CLOSED' if self.done: self.done.set_result(None) class BaseEventLoopWithSelectorTests(test_utils.TestCase): def setUp(self): super().setUp() self.loop = asyncio.new_event_loop() self.set_event_loop(self.loop) @mock.patch('socket.getnameinfo') def test_getnameinfo(self, m_gai): m_gai.side_effect = lambda *args: 42 r = self.loop.run_until_complete(self.loop.getnameinfo(('abc', 123))) self.assertEqual(r, 42) @patch_socket def test_create_connection_multiple_errors(self, m_socket): class MyProto(asyncio.Protocol): pass @asyncio.coroutine def getaddrinfo(*args, **kw): yield from [] return [(2, 1, 6, '', ('107.6.106.82', 80)), (2, 1, 6, '', ('107.6.106.82', 80))] def getaddrinfo_task(*args, **kwds): return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) idx = -1 errors = ['err1', 'err2'] def _socket(*args, **kw): nonlocal idx, errors idx += 1 raise OSError(errors[idx]) m_socket.socket = _socket self.loop.getaddrinfo = getaddrinfo_task coro = self.loop.create_connection(MyProto, 'example.com', 80) with self.assertRaises(OSError) as cm: self.loop.run_until_complete(coro) self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2') @patch_socket def test_create_connection_timeout(self, m_socket): # Ensure that the socket is closed on timeout sock = mock.Mock() m_socket.socket.return_value = sock def getaddrinfo(*args, **kw): fut = asyncio.Future(loop=self.loop) addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '', ('127.0.0.1', 80)) fut.set_result([addr]) return fut self.loop.getaddrinfo = getaddrinfo with mock.patch.object(self.loop, 'sock_connect', side_effect=asyncio.TimeoutError): coro = self.loop.create_connection(MyProto, '127.0.0.1', 80) with self.assertRaises(asyncio.TimeoutError): self.loop.run_until_complete(coro) self.assertTrue(sock.close.called) def test_create_connection_host_port_sock(self): coro = self.loop.create_connection( MyProto, 'example.com', 80, sock=object()) self.assertRaises(ValueError, self.loop.run_until_complete, coro) def test_create_connection_wrong_sock(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) with sock: coro = self.loop.create_connection(MyProto, sock=sock) with self.assertRaisesRegex(ValueError, 'A Stream Socket was expected'): self.loop.run_until_complete(coro) def test_create_server_wrong_sock(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) with sock: coro = self.loop.create_server(MyProto, sock=sock) with self.assertRaisesRegex(ValueError, 'A Stream Socket was expected'): self.loop.run_until_complete(coro) def test_create_server_ssl_timeout_for_plain_socket(self): coro = self.loop.create_server( MyProto, 'example.com', 80, ssl_handshake_timeout=1) with self.assertRaisesRegex( ValueError, 'ssl_handshake_timeout is only meaningful with ssl'): self.loop.run_until_complete(coro) @unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'), 'no socket.SOCK_NONBLOCK (linux only)') def test_create_server_stream_bittype(self): sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK) with sock: coro = self.loop.create_server(lambda: None, sock=sock) srv = self.loop.run_until_complete(coro) srv.close() self.loop.run_until_complete(srv.wait_closed()) @unittest.skipUnless(hasattr(socket, 'AF_INET6'), 'no IPv6 support') def test_create_server_ipv6(self): async def main(): srv = await asyncio.start_server( lambda: None, '::1', 0, loop=self.loop) try: self.assertGreater(len(srv.sockets), 0) finally: srv.close() await srv.wait_closed() try: self.loop.run_until_complete(main()) except OSError as ex: if (hasattr(errno, 'EADDRNOTAVAIL') and ex.errno == errno.EADDRNOTAVAIL): self.skipTest('failed to bind to ::1') else: raise def test_create_datagram_endpoint_wrong_sock(self): sock = socket.socket(socket.AF_INET) with sock: coro = self.loop.create_datagram_endpoint(MyProto, sock=sock) with self.assertRaisesRegex(ValueError, 'A UDP Socket was expected'): self.loop.run_until_complete(coro) def test_create_connection_no_host_port_sock(self): coro = self.loop.create_connection(MyProto) self.assertRaises(ValueError, self.loop.run_until_complete, coro) def test_create_connection_no_getaddrinfo(self): @asyncio.coroutine def getaddrinfo(*args, **kw): yield from [] def getaddrinfo_task(*args, **kwds): return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) self.loop.getaddrinfo = getaddrinfo_task coro = self.loop.create_connection(MyProto, 'example.com', 80) self.assertRaises( OSError, self.loop.run_until_complete, coro) def test_create_connection_connect_err(self): async def getaddrinfo(*args, **kw): return [(2, 1, 6, '', ('107.6.106.82', 80))] def getaddrinfo_task(*args, **kwds): return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) self.loop.getaddrinfo = getaddrinfo_task self.loop.sock_connect = mock.Mock() self.loop.sock_connect.side_effect = OSError coro = self.loop.create_connection(MyProto, 'example.com', 80) self.assertRaises( OSError, self.loop.run_until_complete, coro) def test_create_connection_multiple(self): @asyncio.coroutine def getaddrinfo(*args, **kw): return [(2, 1, 6, '', ('0.0.0.1', 80)), (2, 1, 6, '', ('0.0.0.2', 80))] def getaddrinfo_task(*args, **kwds): return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) self.loop.getaddrinfo = getaddrinfo_task self.loop.sock_connect = mock.Mock() self.loop.sock_connect.side_effect = OSError coro = self.loop.create_connection( MyProto, 'example.com', 80, family=socket.AF_INET) with self.assertRaises(OSError): self.loop.run_until_complete(coro) @patch_socket def test_create_connection_multiple_errors_local_addr(self, m_socket): def bind(addr): if addr[0] == '0.0.0.1': err = OSError('Err') err.strerror = 'Err' raise err m_socket.socket.return_value.bind = bind @asyncio.coroutine def getaddrinfo(*args, **kw): return [(2, 1, 6, '', ('0.0.0.1', 80)), (2, 1, 6, '', ('0.0.0.2', 80))] def getaddrinfo_task(*args, **kwds): return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) self.loop.getaddrinfo = getaddrinfo_task self.loop.sock_connect = mock.Mock() self.loop.sock_connect.side_effect = OSError('Err2') coro = self.loop.create_connection( MyProto, 'example.com', 80, family=socket.AF_INET, local_addr=(None, 8080)) with self.assertRaises(OSError) as cm: self.loop.run_until_complete(coro) self.assertTrue(str(cm.exception).startswith('Multiple exceptions: ')) self.assertTrue(m_socket.socket.return_value.close.called) def _test_create_connection_ip_addr(self, m_socket, allow_inet_pton): # Test the fallback code, even if this system has inet_pton. if not allow_inet_pton: del m_socket.inet_pton m_socket.getaddrinfo = socket.getaddrinfo sock = m_socket.socket.return_value self.loop._add_reader = mock.Mock() self.loop._add_reader._is_coroutine = False self.loop._add_writer = mock.Mock() self.loop._add_writer._is_coroutine = False coro = self.loop.create_connection(asyncio.Protocol, '1.2.3.4', 80) t, p = self.loop.run_until_complete(coro) try: sock.connect.assert_called_with(('1.2.3.4', 80)) _, kwargs = m_socket.socket.call_args self.assertEqual(kwargs['family'], m_socket.AF_INET) self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM) finally: t.close() test_utils.run_briefly(self.loop) # allow transport to close sock.family = socket.AF_INET6 coro = self.loop.create_connection(asyncio.Protocol, '::1', 80) t, p = self.loop.run_until_complete(coro) try: # Without inet_pton we use getaddrinfo, which transforms ('::1', 80) # to ('::1', 80, 0, 0). The last 0s are flow info, scope id. [address] = sock.connect.call_args[0] host, port = address[:2] self.assertRegex(host, r'::(0\.)*1') self.assertEqual(port, 80) _, kwargs = m_socket.socket.call_args self.assertEqual(kwargs['family'], m_socket.AF_INET6) self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM) finally: t.close() test_utils.run_briefly(self.loop) # allow transport to close @patch_socket def test_create_connection_ip_addr(self, m_socket): self._test_create_connection_ip_addr(m_socket, True) @patch_socket def test_create_connection_no_inet_pton(self, m_socket): self._test_create_connection_ip_addr(m_socket, False) @patch_socket def test_create_connection_service_name(self, m_socket): m_socket.getaddrinfo = socket.getaddrinfo sock = m_socket.socket.return_value self.loop._add_reader = mock.Mock() self.loop._add_reader._is_coroutine = False self.loop._add_writer = mock.Mock() self.loop._add_writer._is_coroutine = False for service, port in ('http', 80), (b'http', 80): coro = self.loop.create_connection(asyncio.Protocol, '127.0.0.1', service) t, p = self.loop.run_until_complete(coro) try: sock.connect.assert_called_with(('127.0.0.1', port)) _, kwargs = m_socket.socket.call_args self.assertEqual(kwargs['family'], m_socket.AF_INET) self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM) finally: t.close() test_utils.run_briefly(self.loop) # allow transport to close for service in 'nonsense', b'nonsense': coro = self.loop.create_connection(asyncio.Protocol, '127.0.0.1', service) with self.assertRaises(OSError): self.loop.run_until_complete(coro) def test_create_connection_no_local_addr(self): @asyncio.coroutine def getaddrinfo(host, *args, **kw): if host == 'example.com': return [(2, 1, 6, '', ('107.6.106.82', 80)), (2, 1, 6, '', ('107.6.106.82', 80))] else: return [] def getaddrinfo_task(*args, **kwds): return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) self.loop.getaddrinfo = getaddrinfo_task coro = self.loop.create_connection( MyProto, 'example.com', 80, family=socket.AF_INET, local_addr=(None, 8080)) self.assertRaises( OSError, self.loop.run_until_complete, coro) @patch_socket def test_create_connection_bluetooth(self, m_socket): # See http://bugs.python.org/issue27136, fallback to getaddrinfo when # we can't recognize an address is resolved, e.g. a Bluetooth address. addr = ('00:01:02:03:04:05', 1) def getaddrinfo(host, port, *args, **kw): assert (host, port) == addr return [(999, 1, 999, '', (addr, 1))] m_socket.getaddrinfo = getaddrinfo sock = m_socket.socket() coro = self.loop.sock_connect(sock, addr) self.loop.run_until_complete(coro) def test_create_connection_ssl_server_hostname_default(self): self.loop.getaddrinfo = mock.Mock() def mock_getaddrinfo(*args, **kwds): f = asyncio.Future(loop=self.loop) f.set_result([(socket.AF_INET, socket.SOCK_STREAM, socket.SOL_TCP, '', ('1.2.3.4', 80))]) return f self.loop.getaddrinfo.side_effect = mock_getaddrinfo self.loop.sock_connect = mock.Mock() self.loop.sock_connect.return_value = self.loop.create_future() self.loop.sock_connect.return_value.set_result(None) self.loop._make_ssl_transport = mock.Mock() class _SelectorTransportMock: _sock = None def get_extra_info(self, key): return mock.Mock() def close(self): self._sock.close() def mock_make_ssl_transport(sock, protocol, sslcontext, waiter, **kwds): waiter.set_result(None) transport = _SelectorTransportMock() transport._sock = sock return transport self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport ANY = mock.ANY handshake_timeout = object() # First try the default server_hostname. self.loop._make_ssl_transport.reset_mock() coro = self.loop.create_connection( MyProto, 'python.org', 80, ssl=True, ssl_handshake_timeout=handshake_timeout) transport, _ = self.loop.run_until_complete(coro) transport.close() self.loop._make_ssl_transport.assert_called_with( ANY, ANY, ANY, ANY, server_side=False, server_hostname='python.org', ssl_handshake_timeout=handshake_timeout) # Next try an explicit server_hostname. self.loop._make_ssl_transport.reset_mock() coro = self.loop.create_connection( MyProto, 'python.org', 80, ssl=True, server_hostname='perl.com', ssl_handshake_timeout=handshake_timeout) transport, _ = self.loop.run_until_complete(coro) transport.close() self.loop._make_ssl_transport.assert_called_with( ANY, ANY, ANY, ANY, server_side=False, server_hostname='perl.com', ssl_handshake_timeout=handshake_timeout) # Finally try an explicit empty server_hostname. self.loop._make_ssl_transport.reset_mock() coro = self.loop.create_connection( MyProto, 'python.org', 80, ssl=True, server_hostname='', ssl_handshake_timeout=handshake_timeout) transport, _ = self.loop.run_until_complete(coro) transport.close() self.loop._make_ssl_transport.assert_called_with( ANY, ANY, ANY, ANY, server_side=False, server_hostname='', ssl_handshake_timeout=handshake_timeout) def test_create_connection_no_ssl_server_hostname_errors(self): # When not using ssl, server_hostname must be None. coro = self.loop.create_connection(MyProto, 'python.org', 80, server_hostname='') self.assertRaises(ValueError, self.loop.run_until_complete, coro) coro = self.loop.create_connection(MyProto, 'python.org', 80, server_hostname='python.org') self.assertRaises(ValueError, self.loop.run_until_complete, coro) def test_create_connection_ssl_server_hostname_errors(self): # When using ssl, server_hostname may be None if host is non-empty. coro = self.loop.create_connection(MyProto, '', 80, ssl=True) self.assertRaises(ValueError, self.loop.run_until_complete, coro) coro = self.loop.create_connection(MyProto, None, 80, ssl=True) self.assertRaises(ValueError, self.loop.run_until_complete, coro) sock = socket.socket() coro = self.loop.create_connection(MyProto, None, None, ssl=True, sock=sock) self.addCleanup(sock.close) self.assertRaises(ValueError, self.loop.run_until_complete, coro) def test_create_connection_ssl_timeout_for_plain_socket(self): coro = self.loop.create_connection( MyProto, 'example.com', 80, ssl_handshake_timeout=1) with self.assertRaisesRegex( ValueError, 'ssl_handshake_timeout is only meaningful with ssl'): self.loop.run_until_complete(coro) def test_create_server_empty_host(self): # if host is empty string use None instead host = object() @asyncio.coroutine def getaddrinfo(*args, **kw): nonlocal host host = args[0] yield from [] def getaddrinfo_task(*args, **kwds): return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) self.loop.getaddrinfo = getaddrinfo_task fut = self.loop.create_server(MyProto, '', 0) self.assertRaises(OSError, self.loop.run_until_complete, fut) self.assertIsNone(host) def test_create_server_host_port_sock(self): fut = self.loop.create_server( MyProto, '0.0.0.0', 0, sock=object()) self.assertRaises(ValueError, self.loop.run_until_complete, fut) def test_create_server_no_host_port_sock(self): fut = self.loop.create_server(MyProto) self.assertRaises(ValueError, self.loop.run_until_complete, fut) def test_create_server_no_getaddrinfo(self): getaddrinfo = self.loop.getaddrinfo = mock.Mock() getaddrinfo.return_value = self.loop.create_future() getaddrinfo.return_value.set_result(None) f = self.loop.create_server(MyProto, 'python.org', 0) self.assertRaises(OSError, self.loop.run_until_complete, f) @patch_socket def test_create_server_nosoreuseport(self, m_socket): m_socket.getaddrinfo = socket.getaddrinfo del m_socket.SO_REUSEPORT m_socket.socket.return_value = mock.Mock() f = self.loop.create_server( MyProto, '0.0.0.0', 0, reuse_port=True) self.assertRaises(ValueError, self.loop.run_until_complete, f) @patch_socket def test_create_server_soreuseport_only_defined(self, m_socket): m_socket.getaddrinfo = socket.getaddrinfo m_socket.socket.return_value = mock.Mock() m_socket.SO_REUSEPORT = -1 f = self.loop.create_server( MyProto, '0.0.0.0', 0, reuse_port=True) self.assertRaises(ValueError, self.loop.run_until_complete, f) @patch_socket def test_create_server_cant_bind(self, m_socket): class Err(OSError): strerror = 'error' m_socket.getaddrinfo.return_value = [ (2, 1, 6, '', ('127.0.0.1', 10100))] m_socket.getaddrinfo._is_coroutine = False m_sock = m_socket.socket.return_value = mock.Mock() m_sock.bind.side_effect = Err fut = self.loop.create_server(MyProto, '0.0.0.0', 0) self.assertRaises(OSError, self.loop.run_until_complete, fut) self.assertTrue(m_sock.close.called) @patch_socket def test_create_datagram_endpoint_no_addrinfo(self, m_socket): m_socket.getaddrinfo.return_value = [] m_socket.getaddrinfo._is_coroutine = False coro = self.loop.create_datagram_endpoint( MyDatagramProto, local_addr=('localhost', 0)) self.assertRaises( OSError, self.loop.run_until_complete, coro) def test_create_datagram_endpoint_addr_error(self): coro = self.loop.create_datagram_endpoint( MyDatagramProto, local_addr='localhost') self.assertRaises( AssertionError, self.loop.run_until_complete, coro) coro = self.loop.create_datagram_endpoint( MyDatagramProto, local_addr=('localhost', 1, 2, 3)) self.assertRaises( AssertionError, self.loop.run_until_complete, coro) def test_create_datagram_endpoint_connect_err(self): self.loop.sock_connect = mock.Mock() self.loop.sock_connect.side_effect = OSError coro = self.loop.create_datagram_endpoint( asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0)) self.assertRaises( OSError, self.loop.run_until_complete, coro) @patch_socket def test_create_datagram_endpoint_socket_err(self, m_socket): m_socket.getaddrinfo = socket.getaddrinfo m_socket.socket.side_effect = OSError coro = self.loop.create_datagram_endpoint( asyncio.DatagramProtocol, family=socket.AF_INET) self.assertRaises( OSError, self.loop.run_until_complete, coro) coro = self.loop.create_datagram_endpoint( asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0)) self.assertRaises( OSError, self.loop.run_until_complete, coro) @unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled') def test_create_datagram_endpoint_no_matching_family(self): coro = self.loop.create_datagram_endpoint( asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0), local_addr=('::1', 0)) self.assertRaises( ValueError, self.loop.run_until_complete, coro) @patch_socket def test_create_datagram_endpoint_setblk_err(self, m_socket): m_socket.socket.return_value.setblocking.side_effect = OSError coro = self.loop.create_datagram_endpoint( asyncio.DatagramProtocol, family=socket.AF_INET) self.assertRaises( OSError, self.loop.run_until_complete, coro) self.assertTrue( m_socket.socket.return_value.close.called) def test_create_datagram_endpoint_noaddr_nofamily(self): coro = self.loop.create_datagram_endpoint( asyncio.DatagramProtocol) self.assertRaises(ValueError, self.loop.run_until_complete, coro) @patch_socket def test_create_datagram_endpoint_cant_bind(self, m_socket): class Err(OSError): pass m_socket.getaddrinfo = socket.getaddrinfo m_sock = m_socket.socket.return_value = mock.Mock() m_sock.bind.side_effect = Err fut = self.loop.create_datagram_endpoint( MyDatagramProto, local_addr=('127.0.0.1', 0), family=socket.AF_INET) self.assertRaises(Err, self.loop.run_until_complete, fut) self.assertTrue(m_sock.close.called) def test_create_datagram_endpoint_sock(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind(('127.0.0.1', 0)) fut = self.loop.create_datagram_endpoint( lambda: MyDatagramProto(create_future=True, loop=self.loop), sock=sock) transport, protocol = self.loop.run_until_complete(fut) transport.close() self.loop.run_until_complete(protocol.done) self.assertEqual('CLOSED', protocol.state) @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_create_datagram_endpoint_sock_unix(self): fut = self.loop.create_datagram_endpoint( lambda: MyDatagramProto(create_future=True, loop=self.loop), family=socket.AF_UNIX) transport, protocol = self.loop.run_until_complete(fut) assert transport._sock.family == socket.AF_UNIX transport.close() self.loop.run_until_complete(protocol.done) self.assertEqual('CLOSED', protocol.state) def test_create_datagram_endpoint_sock_sockopts(self): class FakeSock: type = socket.SOCK_DGRAM fut = self.loop.create_datagram_endpoint( MyDatagramProto, local_addr=('127.0.0.1', 0), sock=FakeSock()) self.assertRaises(ValueError, self.loop.run_until_complete, fut) fut = self.loop.create_datagram_endpoint( MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=FakeSock()) self.assertRaises(ValueError, self.loop.run_until_complete, fut) fut = self.loop.create_datagram_endpoint( MyDatagramProto, family=1, sock=FakeSock()) self.assertRaises(ValueError, self.loop.run_until_complete, fut) fut = self.loop.create_datagram_endpoint( MyDatagramProto, proto=1, sock=FakeSock()) self.assertRaises(ValueError, self.loop.run_until_complete, fut) fut = self.loop.create_datagram_endpoint( MyDatagramProto, flags=1, sock=FakeSock()) self.assertRaises(ValueError, self.loop.run_until_complete, fut) fut = self.loop.create_datagram_endpoint( MyDatagramProto, reuse_address=True, sock=FakeSock()) self.assertRaises(ValueError, self.loop.run_until_complete, fut) fut = self.loop.create_datagram_endpoint( MyDatagramProto, reuse_port=True, sock=FakeSock()) self.assertRaises(ValueError, self.loop.run_until_complete, fut) fut = self.loop.create_datagram_endpoint( MyDatagramProto, allow_broadcast=True, sock=FakeSock()) self.assertRaises(ValueError, self.loop.run_until_complete, fut) def test_create_datagram_endpoint_sockopts(self): # Socket options should not be applied unless asked for. # SO_REUSEADDR defaults to on for UNIX. # SO_REUSEPORT is not available on all platforms. coro = self.loop.create_datagram_endpoint( lambda: MyDatagramProto(create_future=True, loop=self.loop), local_addr=('127.0.0.1', 0)) transport, protocol = self.loop.run_until_complete(coro) sock = transport.get_extra_info('socket') reuse_address_default_on = ( os.name == 'posix' and sys.platform != 'cygwin') reuseport_supported = hasattr(socket, 'SO_REUSEPORT') if reuse_address_default_on: self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR)) else: self.assertFalse( sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR)) if reuseport_supported: self.assertFalse( sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEPORT)) self.assertFalse( sock.getsockopt( socket.SOL_SOCKET, socket.SO_BROADCAST)) transport.close() self.loop.run_until_complete(protocol.done) self.assertEqual('CLOSED', protocol.state) coro = self.loop.create_datagram_endpoint( lambda: MyDatagramProto(create_future=True, loop=self.loop), local_addr=('127.0.0.1', 0), reuse_address=True, reuse_port=reuseport_supported, allow_broadcast=True) transport, protocol = self.loop.run_until_complete(coro) sock = transport.get_extra_info('socket') self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR)) if reuseport_supported: self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEPORT)) self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_BROADCAST)) transport.close() self.loop.run_until_complete(protocol.done) self.assertEqual('CLOSED', protocol.state) @patch_socket def test_create_datagram_endpoint_nosoreuseport(self, m_socket): del m_socket.SO_REUSEPORT m_socket.socket.return_value = mock.Mock() coro = self.loop.create_datagram_endpoint( lambda: MyDatagramProto(loop=self.loop), local_addr=('127.0.0.1', 0), reuse_address=False, reuse_port=True) self.assertRaises(ValueError, self.loop.run_until_complete, coro) @patch_socket def test_create_datagram_endpoint_ip_addr(self, m_socket): def getaddrinfo(*args, **kw): self.fail('should not have called getaddrinfo') m_socket.getaddrinfo = getaddrinfo m_socket.socket.return_value.bind = bind = mock.Mock() self.loop._add_reader = mock.Mock() self.loop._add_reader._is_coroutine = False reuseport_supported = hasattr(socket, 'SO_REUSEPORT') coro = self.loop.create_datagram_endpoint( lambda: MyDatagramProto(loop=self.loop), local_addr=('1.2.3.4', 0), reuse_address=False, reuse_port=reuseport_supported) t, p = self.loop.run_until_complete(coro) try: bind.assert_called_with(('1.2.3.4', 0)) m_socket.socket.assert_called_with(family=m_socket.AF_INET, proto=m_socket.IPPROTO_UDP, type=m_socket.SOCK_DGRAM) finally: t.close() test_utils.run_briefly(self.loop) # allow transport to close def test_accept_connection_retry(self): sock = mock.Mock() sock.accept.side_effect = BlockingIOError() self.loop._accept_connection(MyProto, sock) self.assertFalse(sock.close.called) @mock.patch('asyncio.base_events.logger') def test_accept_connection_exception(self, m_log): sock = mock.Mock() sock.fileno.return_value = 10 sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files') self.loop._remove_reader = mock.Mock() self.loop.call_later = mock.Mock() self.loop._accept_connection(MyProto, sock) self.assertTrue(m_log.error.called) self.assertFalse(sock.close.called) self.loop._remove_reader.assert_called_with(10) self.loop.call_later.assert_called_with( constants.ACCEPT_RETRY_DELAY, # self.loop._start_serving mock.ANY, MyProto, sock, None, None, mock.ANY, mock.ANY) def test_call_coroutine(self): @asyncio.coroutine def simple_coroutine(): pass self.loop.set_debug(True) coro_func = simple_coroutine coro_obj = coro_func() self.addCleanup(coro_obj.close) for func in (coro_func, coro_obj): with self.assertRaises(TypeError): self.loop.call_soon(func) with self.assertRaises(TypeError): self.loop.call_soon_threadsafe(func) with self.assertRaises(TypeError): self.loop.call_later(60, func) with self.assertRaises(TypeError): self.loop.call_at(self.loop.time() + 60, func) with self.assertRaises(TypeError): self.loop.run_until_complete( self.loop.run_in_executor(None, func)) @mock.patch('asyncio.base_events.logger') def test_log_slow_callbacks(self, m_logger): def stop_loop_cb(loop): loop.stop() @asyncio.coroutine def stop_loop_coro(loop): yield from () loop.stop() asyncio.set_event_loop(self.loop) self.loop.set_debug(True) self.loop.slow_callback_duration = 0.0 # slow callback self.loop.call_soon(stop_loop_cb, self.loop) self.loop.run_forever() fmt, *args = m_logger.warning.call_args[0] self.assertRegex(fmt % tuple(args), "^Executing <Handle.*stop_loop_cb.*> " "took .* seconds$") # slow task asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop) self.loop.run_forever() fmt, *args = m_logger.warning.call_args[0] self.assertRegex(fmt % tuple(args), "^Executing <Task.*stop_loop_coro.*> " "took .* seconds$") class RunningLoopTests(unittest.TestCase): def test_running_loop_within_a_loop(self): @asyncio.coroutine def runner(loop): loop.run_forever() loop = asyncio.new_event_loop() outer_loop = asyncio.new_event_loop() try: with self.assertRaisesRegex(RuntimeError, 'while another loop is running'): outer_loop.run_until_complete(runner(loop)) finally: loop.close() outer_loop.close() class BaseLoopSockSendfileTests(test_utils.TestCase): DATA = b"12345abcde" * 16 * 1024 # 160 KiB class MyProto(asyncio.Protocol): def __init__(self, loop): self.started = False self.closed = False self.data = bytearray() self.fut = loop.create_future() self.transport = None def connection_made(self, transport): self.started = True self.transport = transport def data_received(self, data): self.data.extend(data) def connection_lost(self, exc): self.closed = True self.fut.set_result(None) self.transport = None async def wait_closed(self): await self.fut @classmethod def setUpClass(cls): cls.__old_bufsize = constants.SENDFILE_FALLBACK_READBUFFER_SIZE constants.SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 16 with open(support.TESTFN, 'wb') as fp: fp.write(cls.DATA) super().setUpClass() @classmethod def tearDownClass(cls): constants.SENDFILE_FALLBACK_READBUFFER_SIZE = cls.__old_bufsize support.unlink(support.TESTFN) super().tearDownClass() def setUp(self): from asyncio.selector_events import BaseSelectorEventLoop # BaseSelectorEventLoop() has no native implementation self.loop = BaseSelectorEventLoop() self.set_event_loop(self.loop) self.file = open(support.TESTFN, 'rb') self.addCleanup(self.file.close) super().setUp() def make_socket(self, blocking=False): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setblocking(blocking) self.addCleanup(sock.close) return sock def run_loop(self, coro): return self.loop.run_until_complete(coro) def prepare(self): sock = self.make_socket() proto = self.MyProto(self.loop) server = self.run_loop(self.loop.create_server( lambda: proto, support.HOST, 0, family=socket.AF_INET)) addr = server.sockets[0].getsockname() for _ in range(10): try: self.run_loop(self.loop.sock_connect(sock, addr)) except OSError: self.run_loop(asyncio.sleep(0.5)) continue else: break else: # One last try, so we get the exception self.run_loop(self.loop.sock_connect(sock, addr)) def cleanup(): server.close() self.run_loop(server.wait_closed()) sock.close() if proto.transport is not None: proto.transport.close() self.run_loop(proto.wait_closed()) self.addCleanup(cleanup) return sock, proto def test__sock_sendfile_native_failure(self): sock, proto = self.prepare() with self.assertRaisesRegex(events.SendfileNotAvailableError, "sendfile is not available"): self.run_loop(self.loop._sock_sendfile_native(sock, self.file, 0, None)) self.assertEqual(proto.data, b'') self.assertEqual(self.file.tell(), 0) def test_sock_sendfile_no_fallback(self): sock, proto = self.prepare() with self.assertRaisesRegex(events.SendfileNotAvailableError, "sendfile is not available"): self.run_loop(self.loop.sock_sendfile(sock, self.file, fallback=False)) self.assertEqual(self.file.tell(), 0) self.assertEqual(proto.data, b'') def test_sock_sendfile_fallback(self): sock, proto = self.prepare() ret = self.run_loop(self.loop.sock_sendfile(sock, self.file)) sock.close() self.run_loop(proto.wait_closed()) self.assertEqual(ret, len(self.DATA)) self.assertEqual(self.file.tell(), len(self.DATA)) self.assertEqual(proto.data, self.DATA) def test_sock_sendfile_fallback_offset_and_count(self): sock, proto = self.prepare() ret = self.run_loop(self.loop.sock_sendfile(sock, self.file, 1000, 2000)) sock.close() self.run_loop(proto.wait_closed()) self.assertEqual(ret, 2000) self.assertEqual(self.file.tell(), 3000) self.assertEqual(proto.data, self.DATA[1000:3000]) def test_blocking_socket(self): self.loop.set_debug(True) sock = self.make_socket(blocking=True) with self.assertRaisesRegex(ValueError, "must be non-blocking"): self.run_loop(self.loop.sock_sendfile(sock, self.file)) def test_nonbinary_file(self): sock = self.make_socket() with open(support.TESTFN, 'r') as f: with self.assertRaisesRegex(ValueError, "binary mode"): self.run_loop(self.loop.sock_sendfile(sock, f)) def test_nonstream_socket(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setblocking(False) self.addCleanup(sock.close) with self.assertRaisesRegex(ValueError, "only SOCK_STREAM type"): self.run_loop(self.loop.sock_sendfile(sock, self.file)) def test_notint_count(self): sock = self.make_socket() with self.assertRaisesRegex(TypeError, "count must be a positive integer"): self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, 'count')) def test_negative_count(self): sock = self.make_socket() with self.assertRaisesRegex(ValueError, "count must be a positive integer"): self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, -1)) def test_notint_offset(self): sock = self.make_socket() with self.assertRaisesRegex(TypeError, "offset must be a non-negative integer"): self.run_loop(self.loop.sock_sendfile(sock, self.file, 'offset')) def test_negative_offset(self): sock = self.make_socket() with self.assertRaisesRegex(ValueError, "offset must be a non-negative integer"): self.run_loop(self.loop.sock_sendfile(sock, self.file, -1)) class TestSelectorUtils(test_utils.TestCase): def check_set_nodelay(self, sock): opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) self.assertFalse(opt) base_events._set_nodelay(sock) opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) self.assertTrue(opt) @unittest.skipUnless(hasattr(socket, 'TCP_NODELAY'), 'need socket.TCP_NODELAY') def test_set_nodelay(self): sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=socket.IPPROTO_TCP) with sock: self.check_set_nodelay(sock) sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=socket.IPPROTO_TCP) with sock: sock.setblocking(False) self.check_set_nodelay(sock) if __name__ == '__main__': unittest.main()
codeparrot/github-code-clean
# -*- coding: utf-8 -*- # Resource object code # # Created: Thu May 2 15:49:03 2013 # by: The Resource Compiler for PyQt (Qt v5.0.2) # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore qt_resource_data = b"\ \x00\x00\x36\xe2\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ \x00\x00\x64\x00\x00\x00\x64\x08\x06\x00\x00\x00\x70\xe2\x95\x54\ \x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\ \x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\ \x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\x01\ \x95\x2b\x0e\x1b\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x03\x03\ \x0e\x1c\x24\x7c\x1a\xa6\xff\x00\x00\x20\x00\x49\x44\x41\x54\x78\ \xda\xed\x9d\x77\x98\x9d\x65\x9d\xf7\x3f\xf7\x53\x4e\x9f\x99\x33\ \x33\x99\x4c\x66\x12\x32\xa9\xa4\x03\x91\x90\x10\x6a\xe8\x48\xd9\ \x05\xa5\x88\xc8\xba\xeb\xcb\xb2\xae\x04\x51\x57\x5f\x57\x5d\x5d\ \x76\x5d\x45\x51\x56\x50\x71\x59\x75\xa5\x59\xc0\x12\x3a\x16\x88\ \x94\x00\x09\x29\xa4\x4d\xea\xb4\x4c\xa6\xcf\x9c\x39\xbd\x3d\xf5\ \x7e\xff\x38\xf3\x3c\x99\x41\x94\x34\x7d\xdf\xeb\xbd\xb8\xaf\x2b\ \x57\x32\xc9\xc9\x79\xca\xf7\xfe\xb5\xef\xaf\xdc\xf0\xee\x7a\x77\ \xbd\xbb\xde\x5d\xef\xae\x77\xd7\xbb\xeb\xdd\xf5\xee\x7a\x77\xfd\ \x7f\xbf\xc4\xb1\xfc\xe7\xba\xba\xba\xab\x0c\xc3\x08\x4e\x99\x32\ \xe5\x73\x2b\x57\xae\x3c\x51\x51\x14\xb6\x6d\xdb\x46\x6d\x6d\xed\ \xa1\x0b\xb8\x02\x09\x20\xe4\x1f\x5c\x4d\x55\xd5\x94\xa2\x28\xf9\ \x72\xb9\xac\xbf\xfa\xea\xab\xb3\xfe\x6f\xbf\x8c\xda\xda\xda\x9f\ \x5d\x77\xdd\x75\x57\x97\xcb\x65\x17\x40\x41\xe0\x48\x89\xf8\x23\ \x6f\xc9\x71\x1c\x02\x81\x00\xaf\xbf\xfe\xba\xb3\x60\xc1\x82\xc2\ \xc0\xc0\x40\x6c\xcb\x96\x2d\xd3\x2d\xcb\x4a\x1e\xed\x3d\x68\xc7\ \xf2\x00\x8b\x16\x2d\xfa\xe1\xcc\x99\x33\xeb\x37\x6f\xde\x4c\xa9\ \x54\xc2\xb6\x6d\xea\xeb\xeb\x69\x68\x68\x40\x08\x41\xbf\x95\x62\ \xda\x05\x45\xaa\xa2\x3a\x4e\x3a\x88\x93\xd1\x71\x73\x3a\x76\x4a\ \x43\x16\x35\x80\xb0\x94\xf2\x0f\xbe\x77\xda\xb4\x69\xa2\xb7\xb7\ \x57\xfe\x25\x40\xa8\xaa\xaa\xba\x60\xc5\x8a\x15\x6b\x74\x5d\x37\ \x1d\xc7\xa9\xe9\xef\xed\xd3\x6d\x0b\x84\x22\x31\x30\x09\xc5\x04\ \xb2\xa0\x4d\xd8\x4c\x42\x08\xf6\xee\xdd\xcb\xd2\xa5\x4b\xbd\x8d\ \x85\xa6\x69\xb1\x58\x2c\x86\xeb\xba\xa7\x85\x42\xa1\x6c\x6d\x6d\ \xed\xae\x81\x81\x81\xec\x9f\x1d\x90\x45\x8b\x16\xb1\x77\xef\xde\ \x90\xe3\x38\x28\x8a\xe2\x94\x4a\x25\x5c\xd7\xc5\x34\x4d\x6c\xdb\ \xc6\xb2\x2c\xca\xe5\x32\x42\x08\x8a\xdd\x09\xba\xba\xc3\x48\x69\ \x22\x30\x91\xaa\x8b\x40\x80\xa8\x3c\xf0\x38\x19\x95\x67\x9c\x71\ \x86\x04\xc8\x66\xb3\xb4\xb6\xb6\x8a\xbf\x94\x54\x94\xcb\xe5\xe6\ \xc9\x93\x27\x57\x9b\xa6\x89\x82\xe0\x60\xb8\x87\x85\x17\x2a\xd8\ \xc3\x41\xec\x44\x10\x67\x34\x88\x8c\x0a\xa4\xa9\x20\x2d\xa5\xf2\ \xd2\x34\x0d\x55\x55\x31\x4d\x13\x45\x51\x70\x5d\x17\xc3\x30\x30\ \x4d\x53\x5e\x75\xd5\x55\xbf\x11\x42\xf0\xda\x6b\xaf\x5d\x00\xfc\ \xfe\xcf\x0e\x48\x47\x47\xc7\x9c\xd3\x4e\x3b\xad\xad\xb6\xb6\x96\ \x42\xa1\x80\x69\x9a\x13\x00\xf1\x6e\x4e\x51\x14\xa6\x4c\x6d\x22\ \x1c\x0e\xa3\x08\x41\xab\x71\x80\xe5\x1f\x82\x40\x40\xc1\x1e\xd5\ \xb1\x7a\xa3\x98\x83\x01\x9c\xe1\x20\x42\xa9\x28\x05\x29\x25\xc1\ \x60\xf0\x2f\xab\xb3\x85\xf0\x37\x92\x86\x8a\x3b\x3f\x43\x20\x5e\ \x43\x20\x5e\x82\x13\x4b\x13\x3e\x3b\xf4\xe8\x24\xdc\xa2\x8a\xeb\ \xba\x48\x29\x31\x0c\x03\x21\x04\x52\x4a\x2c\xcb\xc2\x75\x5d\x61\ \x18\x06\x9a\xa6\x01\xb8\x7f\x11\x95\x55\x2e\x97\x1b\x1a\x1a\x1a\ \x08\x85\x42\xe4\xf3\x79\x0c\xc3\xf0\x6f\xce\x71\x1c\x1c\xc7\xf1\ \x6f\x34\x10\x08\x54\xc0\x11\x82\x5e\x7b\x84\xf3\x62\x4d\xb8\x46\ \x90\x25\x4b\x4e\x61\xe7\xe4\x57\x89\x08\x89\xa2\xe8\xa4\x5f\xae\ \xa2\xb0\x27\x8c\x94\x12\xd7\x75\xff\xec\x20\x44\xa3\xd1\xb3\xae\ \xb9\xe6\x9a\x75\xa5\x52\xa9\xf2\x42\x5d\x29\x43\xf5\x42\xb8\x29\ \x9d\x19\xbb\x4e\x61\x74\x97\xac\x48\xf0\x5b\xe4\xb4\x6d\xef\x2e\ \x66\xcc\x98\xe1\x03\x32\x5e\x42\xca\xe5\xb2\xbf\x31\xa5\x94\x94\ \xcb\xe5\x8f\x57\x55\x55\x5d\x57\x55\x55\xf5\x40\x7f\x7f\xff\xa6\ \x3f\xab\x0d\xb1\x6d\x1b\xc3\x30\x7c\x69\xf0\x6e\xc4\x71\x1c\x7f\ \xb7\x09\x21\x50\x55\x15\xcb\xb2\x90\xae\x4b\x7d\x3a\xc4\xc1\xa7\ \x15\x4e\x08\x2e\x26\x57\x3f\x05\x3b\x31\x17\xac\x30\x49\x3a\x29\ \x0c\x98\x38\x79\xc7\x07\xe4\xec\xb3\xcf\xfe\x1f\x29\x65\x20\x99\ \x4c\x86\x7b\x7b\x7b\xff\x36\x9b\xcd\xe6\x8f\x27\x20\x52\xca\xb0\ \xaa\xaa\x15\x89\x96\x92\x5d\x93\x77\x8a\xf3\xaf\xac\xc3\x95\x36\ \x76\x56\x43\x64\xc3\xd8\x59\x0d\x27\xab\x51\xde\x1f\x41\x3a\xc2\ \x97\xa6\x72\xb9\x8c\xa2\x54\x54\x97\x07\x88\x27\x21\x8e\xe3\x54\ \x9e\x57\x4a\xb9\x6a\xd5\xaa\xab\x9b\x9b\x9b\x79\xf0\xc1\x07\xd7\ \x03\x7f\x1e\x40\xa6\x4d\x9b\xb6\xb0\x54\x2a\xcd\xb7\x6d\x1b\x29\ \x25\x8e\xe3\x4c\x50\x59\x8e\xe3\x00\xf8\x2a\x4b\xd3\x34\xff\x33\ \x0b\x1b\x66\x13\x4d\x55\x91\x24\x41\x77\x7f\x37\xf1\xf7\x14\xd0\ \xcc\x00\xd1\x44\x9c\x48\xbd\x86\xac\x11\x48\x43\x01\x57\x80\xe0\ \x23\x00\xe1\x70\x98\xdd\xbb\x77\x7f\x14\x38\x26\x40\x9a\x9a\x9a\ \x18\x18\x18\x18\x0f\x88\x34\x4d\x13\xd3\x34\x31\x5d\x9b\x58\xb5\ \x0e\xb6\xc2\xf2\xc9\xef\xa7\xaa\x25\xc4\x4b\x9d\xbf\x62\xfa\xbc\ \x26\x86\x8c\x2e\xf2\x1d\x2a\x4e\xa9\x02\x80\xf7\x9c\xe3\xed\x86\ \xf7\x67\x4f\x32\xc6\xde\x87\xc8\x66\xb3\x44\xa3\x51\x14\x45\x91\ \x7f\x36\x95\x55\x5b\x5b\xbb\x66\xd9\xb2\x65\xf3\xd2\xe9\xb4\x6f\ \x2f\x1c\xa7\xb2\xb3\x3d\x2f\x2b\x18\x0c\x32\x66\xf0\x09\x06\x83\ \xb8\xae\x8b\x65\x59\xbe\xb1\x57\x51\xd8\x17\xee\xe0\xc2\x05\xd5\ \x40\xf1\x2d\x0a\x5d\x92\x5e\x57\x43\xb9\x2d\x02\x80\x65\x59\xc7\ \x45\x22\x06\x06\x06\x68\x6a\x6a\x9a\x04\x2c\x01\x1c\xc3\x30\x4e\ \x4e\x24\x12\x64\xb3\x59\x6c\x5c\xec\x11\x9b\x9d\xcf\x17\xb8\xf2\ \x7d\xf3\x79\xfa\xd5\x35\xcc\x6b\xbe\x82\xf7\xd4\xaf\xe4\xbf\xb6\ \x7f\x8a\x6c\x2a\x87\x2e\xc3\xbe\x9b\xeb\x01\x02\x50\x28\x14\x70\ \x1c\x07\xd7\x75\x29\x95\x4a\x9e\xed\xc0\xf3\x1c\x3d\x4d\xf1\x67\ \x03\x44\x4a\xa9\x14\x0a\x05\x1f\x08\xdb\xb6\xc9\xe5\x72\xd8\xb6\ \x4d\x3e\x9f\xf7\x75\x6b\xa9\x54\x42\x51\x14\xc2\xe1\x30\xb6\x6d\ \x63\xdb\xf6\xa1\x87\x91\x02\xc2\x15\xf1\xff\xc8\xc2\xbb\x91\x8a\ \x4b\xda\xed\x64\xe3\xd6\x2e\x92\x55\xeb\x18\x1d\xfb\x9c\xb7\x23\ \x8f\xd7\x2a\x95\x4a\x37\x9f\x7f\xfe\xf9\x77\xaa\xaa\x8a\x40\x90\ \x30\x33\x4c\x3f\xbd\x1a\x77\x38\x8c\x33\xda\x82\x48\xc3\x8f\xbe\ \xf3\x53\x14\x55\xf0\x1a\x5b\x78\x55\x6e\x26\xa0\xcd\x27\x9f\x6c\ \x27\x1a\x55\x91\x52\x7a\xb6\xc1\x07\xa4\x58\x2c\xfa\xef\x22\x9b\ \xcd\x12\x8b\xc5\x7c\x30\xc6\x49\x4b\x63\x2c\x16\x9b\x95\xcf\xe7\ \x3b\xff\x1c\x80\xf8\x2f\xd8\xb2\x2c\x6c\xdb\xf6\x5f\x9e\xa6\x69\ \x3e\x10\xaa\xaa\xfa\xde\x4b\xb1\x58\xf4\x8d\xbd\x27\x21\x58\x92\ \xd1\x5e\x83\xa9\xef\x99\xcb\x75\x3f\x5a\xca\xcf\x3e\xb2\x81\xa6\ \x93\xd2\xac\xed\xc9\xb2\x27\xfb\x2a\xf9\x9c\xe9\xab\x3e\xa0\xce\ \xbb\xfe\xf4\xe9\xd3\x93\x07\x0f\x1e\x74\x8f\xd2\x6e\x44\xbd\x0d\ \x53\x74\x4d\xb4\x73\xfa\xa9\x9d\x1f\xc1\x92\x83\x48\x1b\x44\x36\ \x86\x9d\x51\x71\x52\x3a\xf9\x2d\xd5\xa0\x48\x1c\x69\xfb\xb6\xc2\ \x93\x90\x54\x2a\x85\xaa\xaa\x38\x8e\xe3\xc5\x1f\x08\x21\xd0\x34\ \x0d\x45\x51\xc8\xe5\x72\x04\x83\x41\x22\x91\x08\xa6\x69\xca\x93\ \x4f\x3e\xf9\x9b\xaa\xaa\xde\xf1\xca\x2b\xaf\x54\xfd\x39\x00\x91\ \x1e\x10\xde\x4e\x51\x14\x05\xdb\xb6\x99\x34\x69\x12\x37\xdd\x74\ \x13\xdf\xf9\xce\x77\xa8\xad\xad\x65\xfa\xf4\xe9\x74\x75\x75\x91\ \xcd\x66\x7d\x63\xed\x38\x0e\x96\x10\xa8\xdb\x5c\x8a\xf9\xa9\xdc\ \xf4\xab\x0f\x50\x1b\x38\x95\x9b\xd6\xdd\x88\xae\xa9\xa8\x76\x08\ \x4d\x6b\x24\x5e\xe3\xc7\x28\xf2\xca\x2b\xaf\xdc\xe7\xfd\xff\xf5\ \xeb\xd7\xcf\x07\xf6\x1d\xad\x94\x78\x2a\xc4\x74\x4d\xe2\xe1\x00\ \x85\x8c\xcb\x4d\x27\xdf\x81\xab\x16\x59\xb3\xf7\x5e\x26\x37\x34\ \x32\x9c\xef\x65\xf4\xd5\x30\x4a\x40\x4e\x50\xc9\xde\x86\x54\x55\ \xd5\x07\x24\x16\x8b\x21\x84\xf0\x37\xa1\xa2\x28\xfe\x35\x02\x81\ \x00\x96\x65\x09\x21\x04\xe1\x70\xb8\x70\xb8\xf7\xa8\x1c\xce\x87\ \xa6\x4c\x99\x32\x69\xca\x94\x29\x2f\xb9\xae\x3b\xcd\xb3\x05\xd9\ \x6c\x25\x08\xbd\xea\xaa\xab\x68\x6c\x6c\xe4\x57\xbf\xfa\x15\xcd\ \xcd\xcd\x00\xac\x5d\xbb\x96\xdb\x6f\xbf\x9d\x50\x28\x84\xa6\x69\ \xbe\x71\x37\x0c\x83\x72\xa9\x84\xd0\x04\xa1\x58\x90\xda\xda\x5a\ \xba\xd5\x21\x16\x7c\xbc\xcc\xac\x8f\x16\x69\xb9\x75\x94\xc9\xd7\ \x24\x89\x9f\x6e\x10\x69\x16\x04\x83\x01\x11\x0c\x06\x09\x06\x83\ \xd4\xd7\xd7\xe3\x38\xce\x31\xe9\x30\xc3\x30\xbc\x00\x0e\x45\x28\ \x9c\x14\x7e\x3f\xa1\x6a\x9b\xe7\xdf\x78\x9d\x45\xda\xf5\xac\x3e\ \xe3\x6e\x6a\x42\xf5\x94\xcb\xe5\xca\xbd\x96\xcb\x98\xa6\x49\x36\ \x9b\xf5\x37\x56\x20\x10\xf0\xa5\xe1\xb2\xcb\x2e\x23\x12\x89\x50\ \x5f\x5f\xcf\xdc\xb9\x73\x7d\x60\x34\x4d\xf3\xb5\x87\xe7\x7d\x1d\ \x57\x40\x92\xc9\x64\xed\x05\x17\x5c\x70\x6e\x28\x14\x8a\x58\x96\ \xe5\x5f\xb8\x5c\x2e\xf3\xe1\x0f\x7f\x18\xc3\x30\xb8\xff\xfe\xfb\ \x59\xbe\x7c\xb9\xaf\x3b\xbd\x1b\x5f\xb0\x60\x01\xb7\xdd\x76\xdb\ \x04\xe3\xee\xc5\x2d\xa6\x61\xe2\x04\x4c\x34\x4d\x25\x9b\x74\xb8\ \x6a\xe1\xc7\xa8\x9b\x1a\x23\xb2\x38\x4b\xed\x15\xc3\xe8\x0b\x52\ \x18\xa5\x31\x6f\xc8\x34\x8f\xd8\x40\x36\x34\x34\x5c\x1e\x8d\x46\ \x6f\x8a\xc5\x62\x37\x4a\x29\x97\x0c\x0d\x0d\x31\x38\x38\x48\x62\ \x68\x84\x81\x37\xb3\xf4\xed\x18\xe0\xb9\x35\x6b\xd9\xf1\xfa\x0e\ \xf6\x6d\xda\xc3\xfd\x0f\x7c\x97\x9d\x2f\x76\x91\x2f\x64\x7d\xe0\ \x3c\x0f\xca\xf3\xa2\xce\x3e\xfb\x6c\xaa\xab\xab\x91\x52\x92\x48\ \x24\x7c\xa3\x9e\x48\x24\x18\x93\x06\x5f\x75\x65\xb3\x59\x5f\x5d\ \x1f\x57\xa3\xee\xf9\xdf\xde\xc5\x15\x45\xe1\xfc\xf3\xcf\xa7\xb5\ \xb5\x95\xc7\x1f\x7f\x9c\xba\xba\x3a\x9e\x7a\xea\x29\x9e\x79\xe6\ \x19\x42\xa1\x10\x57\x5d\x75\x15\xa7\x9f\x7e\x3a\xa9\x54\x8a\xfb\ \xef\xbf\x9f\xbe\xbe\x3e\x0c\xc3\x20\x99\x4c\xfa\xd1\xb8\x69\x9a\ \x20\x41\x6a\x92\x72\xc9\xe1\xae\xf7\x3e\xc5\xa3\xad\xdf\xe0\xa6\ \xf9\x5f\x65\xed\x8e\x17\x50\x1b\xfa\x78\xcd\xde\x56\xb1\x23\x4a\ \x65\x67\x1e\xe9\x9a\x3c\x79\xf2\xb7\x97\x2d\x5b\x36\xcb\xb2\x2c\ \x54\xa1\x50\xac\x4e\xcb\x58\xb5\x26\xdc\xac\x8e\x9d\xd4\xe9\xdb\ \x36\x0c\x02\xea\xb5\x69\x8c\xa6\x73\x24\x64\x96\xa6\xc0\x0a\xf6\ \x8f\xbe\x44\x53\xa0\xc9\x7f\x91\x9e\x4d\xb4\x2c\x8b\x48\x24\xe2\ \x7b\x91\x1b\x37\x6e\x24\x16\x8b\x91\x48\x24\xa8\xae\xae\x46\x08\ \xc1\xb4\x69\xd3\x18\x1e\x1e\x26\x9f\xcf\xfb\x31\xdb\x91\x48\xb6\ \x72\xb8\x80\x78\x60\x14\x8b\x45\xa6\x4d\x9b\xc6\xbf\xfc\xcb\xbf\ \xa0\x69\x1a\xcf\x3d\xf7\x1c\x7d\x7d\x7d\xc4\x62\x31\x82\xc1\xa0\ \xef\xee\x6e\xdb\xb6\x0d\x4d\xd3\x26\xf8\xff\x9e\xcd\x39\x24\x21\ \x95\x28\x3f\xa0\x46\x98\xd6\x38\x85\x84\xd8\x85\xb4\x42\x48\x09\ \x8e\xb4\x70\x1d\x07\xc3\x34\x7c\xd1\xb7\xde\xe2\x07\x37\x35\x35\ \xbd\xd3\x7d\x3b\xe5\x72\x99\x72\xb9\xcc\xee\x6c\x17\x91\x73\x07\ \x45\xe8\xac\x7e\x42\x97\x75\x50\xf5\xe1\x7d\xd4\x7c\xf0\x00\xa1\ \x73\xfb\x11\x33\x93\x95\xeb\x58\x26\x96\x7b\x28\x9e\xf0\x24\xbd\ \xbe\xbe\x1e\x4d\xd3\xd0\x75\x9d\x97\x5e\x7a\xc9\xb7\x29\xba\xae\ \xfb\x52\x14\x0e\x87\x31\x0c\xc3\x07\xcf\xb3\x35\x63\x80\x34\xad\ \x5c\xb9\x52\xd6\xd7\xd7\xcf\x3f\x6e\x46\xdd\x8b\x3b\x4c\xd3\xa4\ \x54\x2a\x31\x30\x30\x40\x24\x12\x61\x68\x68\x08\xc7\x71\xe8\xef\ \xef\xf7\xe9\xe8\xd1\xd1\xd1\x0a\x97\x35\x65\x0a\x9f\xfd\xec\x67\ \x71\x1c\x87\x60\x30\x88\xaa\x1e\x72\x1f\xbd\x77\xeb\xb4\x97\xd8\ \xf0\xef\x2a\xa7\xfe\xdb\xa9\x00\x3c\xcf\x87\xde\xf6\xfa\xd9\x6c\ \x96\xf9\xf3\xe7\x77\x79\xee\xf0\xd0\xd0\xd0\xbf\x0d\x0c\x0c\xdc\ \xf1\x4e\x5e\xa1\x07\xa6\x29\x2d\x14\x14\xc8\x4c\xe6\x8e\xcb\xbf\ \x47\x7b\x7a\x33\xdf\x7b\xf9\xdf\x58\xbc\x64\x0e\xdd\x75\x9d\x64\ \x77\x69\xbe\x34\x18\x86\x41\x2a\x95\xf2\x8d\xf3\xf5\xd7\x5f\xcf\ \x9a\x35\x6b\xc8\x64\x32\xd4\xd5\x55\x9c\xbe\xd1\xd1\x51\xff\x1a\ \xf5\xf5\xf5\x98\xa6\x49\x38\x1c\x66\x68\x68\x08\x55\x55\x89\xc7\ \xe3\xe8\xba\x4e\x2e\x97\x03\x60\xee\xdc\xb9\xec\xda\xb5\xcb\x39\ \x2e\x80\x58\x96\x55\xf0\x5e\xa0\xa2\x28\x14\x8b\x45\x3e\xf3\x99\ \xcf\x50\x5d\x5d\xed\xe7\x3e\xc6\x7b\x22\xde\x9f\x93\xc9\x24\x81\ \x40\xc0\x07\xe4\x87\x3f\xfc\x21\x86\x61\xf0\xf1\x8f\x7f\xdc\xe7\ \xbb\x54\x54\x96\x2c\x59\x52\x51\x89\xd2\x21\x39\xf7\x00\x73\x16\ \x46\x70\x72\x2a\xd6\x70\x08\x7b\x28\x88\x35\x18\x44\x48\x05\xa1\ \xbb\xfe\x35\x86\x86\x86\x0e\xdb\x90\xdb\x96\x8d\xab\x3a\x08\x45\ \x70\xdd\x82\x4f\xb3\xbe\xf7\x49\x9c\x91\x69\x5c\x37\xf7\x4b\x9c\ \x7f\xea\x99\xdc\xfe\xe8\xe5\xbe\x6b\xab\xaa\xaa\x1f\x03\x79\x6c\ \xc3\x96\x2d\x5b\x2a\xe4\xe3\x98\x73\x32\x69\xd2\x24\xa4\x94\x08\ \x21\x28\x95\x4a\xfe\xc6\x54\x55\x95\x50\x28\x44\xb1\x58\x24\x12\ \x89\xf8\xa1\x80\x65\x59\xfe\xf7\x1f\x33\x20\xb5\xb5\xb5\xb7\x2c\ \x5d\xba\xf4\x2e\x8f\x2e\x39\xe1\x84\x13\x48\xa5\x52\x04\x02\x01\ \xd2\xe9\xb4\xaf\x82\xa4\x94\x28\x8a\xe2\xb3\x9f\xde\xee\x1a\xa3\ \x16\x5c\x65\x2c\x9a\x9a\x36\x6d\x1a\x8e\xe3\x50\x28\x14\x10\x08\ \x6c\x69\x51\x2c\x16\x29\x97\xcb\xf4\x59\x49\x5a\xa6\x3b\xc8\x48\ \x89\xc6\x49\xd3\x49\x4d\xed\x43\x0a\x07\xd3\x35\x51\x4a\x61\x86\ \x1e\x99\x5c\x49\x74\x1d\x01\x45\x62\x59\x16\xb6\x65\x21\x15\x89\ \xa2\x09\x1a\xaa\x9b\x39\x58\xfa\x3d\xc9\x24\x64\xdd\x7e\xea\x4e\ \x30\x08\x07\x23\x5e\xcc\x33\x41\xe5\x78\x2a\xb6\xa7\xa7\x87\x50\ \x28\x84\xeb\xba\xac\x5e\xbd\x9a\xad\x5b\xb7\xd2\xd9\xd9\xe9\x7b\ \x52\xaa\x5a\x61\x80\x6b\x6a\x6a\xc8\x66\xb3\xa8\xaa\x4a\x2c\x16\ \xf3\xd5\x9e\x17\xbb\x1d\x8e\x53\xf2\x8e\x80\x98\xa6\x39\xb5\xa9\ \xa9\xa9\xa6\xa7\xa7\x07\xd3\x34\xb9\xe7\x9e\x7b\xd8\xb3\x67\x0f\ \x37\xde\x78\x23\x3f\xf8\xc1\x0f\x48\x24\x12\xd4\xd6\xd6\x12\x0c\ \x06\x79\xe9\xa5\x97\x58\xb8\x70\x21\xbb\x76\xed\xa2\xa7\xa7\x87\ \xb9\x73\xe7\xb2\x6d\xdb\x36\xf6\xef\xdf\x7f\xfb\x82\x05\x0b\xbe\ \xf3\xec\xb3\xcf\x72\xd1\x45\x17\x51\x2e\x97\x7d\x95\x25\x74\xc5\ \xf7\xbe\x5c\xc7\x45\x55\x04\xa1\xec\x7c\x3e\x71\xd1\x9d\x68\x32\ \xca\x17\x9e\xf8\x08\x1f\x3e\xfb\x46\x7e\xb6\xfd\x6e\x2c\xcb\xac\ \xe4\x52\xfe\xc8\x83\x0d\x0c\x0c\x30\x6b\xd6\xac\x3b\xa3\xd1\xe8\ \x54\x00\xe9\xca\x29\xc9\xd1\x14\xd2\x75\x41\xb7\xe9\xfc\xa5\xc2\ \x77\x5f\xf8\x16\x69\xf7\x20\x46\x36\x88\x4d\x89\xee\x75\x9d\x74\ \x0d\xe6\x30\xcd\xb0\x0f\x48\x38\x1c\x66\xc6\x8c\x19\x0c\x0e\x0e\ \x62\x9a\x66\xaa\x54\x2a\xf5\x38\x8e\xb3\x44\x51\x14\x71\xd7\x5d\ \x77\x49\xd7\x75\x85\xb7\x01\x3d\x1b\x0b\xf8\x86\x1c\x20\x95\x4a\ \x11\x8b\xc5\xfc\x4d\x7a\xb8\x34\x8a\x76\x38\x06\xdd\x34\x4d\x0a\ \x85\x02\xaa\xaa\xf2\xa3\x1f\xfd\x88\x6f\x7f\xfb\xdb\xfc\xf4\xa7\ \x3f\x65\xfd\xfa\xf5\xa8\xaa\x8a\xae\xeb\x68\x9a\x46\x2a\x95\x62\ \xef\xde\xbd\xe4\xf3\x79\xca\xe5\x32\x89\x44\xc2\x53\x5f\x21\x55\ \x55\xd9\xb0\x61\x03\xaf\xbf\xfe\xba\xef\xab\x8f\xbd\x34\xdf\xdf\ \xb7\xb1\x71\x71\x59\xd6\x70\x19\x8f\x6c\xfb\x2a\xb3\xb4\xf3\xb9\ \xf6\xa4\xd5\xb4\xd4\x4c\x41\xa0\x54\xd4\x9c\x22\xde\xc9\xb3\xfa\ \xfb\xe9\xd3\xa7\xd7\x0b\x21\x28\x38\x65\x58\x38\x4c\x7d\x3c\x8c\ \xd3\x1f\xc1\xea\x0b\x93\x19\x4d\x22\x88\x11\xaa\x98\x65\x8a\x43\ \x92\x46\x71\x22\x1d\x66\x87\xcf\x38\x54\x57\x57\x13\x8f\xc7\x49\ \xa5\x52\x98\xa6\xf9\xec\x9e\x3d\x7b\x6e\x8a\xc7\xe3\xd7\x5a\x96\ \x35\xcb\xb2\xac\xaf\x5d\x70\xc1\x05\xac\x58\xb1\x82\x47\x1f\x7d\ \xd4\xb7\xab\x5e\x10\xe9\x51\x4a\x8a\xa2\x30\x3c\x3c\x4c\x34\x1a\ \x3d\x22\x5e\xeb\xb0\x00\xf1\xbe\xcc\x73\xf5\x4e\x3b\xed\x34\x5f\ \x2d\x8d\xb7\x1f\x42\x08\x7a\x7b\x7b\xfd\x3f\x1f\x38\x70\xc0\xf3\ \xbc\xd2\xfb\xf7\xef\xff\xab\xb1\xe8\x95\x50\x28\xf4\xf8\x89\x27\ \x9e\xa8\x7e\xf1\x8b\x5f\xe4\x73\x9f\xfb\x1c\xa6\x69\x56\xf2\x29\ \xaa\x8d\x10\x10\xd0\x22\x38\xae\x8d\x61\x9a\x64\x64\x8a\xae\x4c\ \xe2\x10\x85\x21\xf0\x77\xe6\xdb\x2d\x8f\x85\x05\x78\x23\xbf\x9f\ \xeb\x4f\xaf\xc1\xb2\x73\x94\x5a\x06\xa9\xd6\xc2\x08\x47\x45\x1a\ \x2a\xc5\x3d\x51\xf2\xdb\x62\x20\x2a\x3b\x3d\x95\x4a\xf9\xd7\x28\ \x95\x4a\xa4\x52\xa9\x09\xcf\x98\x4e\xa7\x7f\x01\x9c\xb8\x6a\xd5\ \xaa\xaf\xdd\x79\xe7\x9d\xe8\xba\x8e\x61\x18\x74\x75\x75\xb1\x74\ \xe9\x52\x7e\xf9\xcb\x5f\xf2\xbe\xf7\xbd\x8f\x2d\x5b\xb6\xb0\x67\ \xcf\x1e\x2e\xbd\xf4\x52\x86\x86\x86\x58\xbb\x76\xad\xbf\xa9\x17\ \x2c\x58\xf0\x66\x6b\x6b\xeb\xaa\x6c\x36\xbb\xe5\x98\x25\xc4\x33\ \x70\x93\x27\x4f\x66\x68\x68\xc8\xb7\x15\xde\x4d\xbb\xae\xeb\xff\ \xf2\x78\xac\xb1\x07\x7c\x42\xd7\xf5\xbd\xc3\xc3\xc3\xaf\x7a\xdf\ \xd9\xd2\xd2\x22\x9f\x7b\xee\x39\x5e\x7c\xf1\x45\x96\x2f\x5f\xce\ \x86\x0d\x1b\x2a\xbb\x2c\xe4\x32\xb4\xce\xe2\xf9\xf8\xd3\xc8\xc8\ \x28\xfd\xca\x1b\x18\x56\x89\xaa\x1a\x9d\x83\x43\x06\x85\xa2\xed\ \xdf\x53\x2c\x16\x3b\x77\xf1\xe2\xc5\x5f\x02\x84\x61\x18\x76\x5b\ \x5b\xdb\x57\x3c\xbe\xc9\xf7\x8e\x85\x44\x22\x59\x20\xae\x67\xc5\ \x7b\x4e\x21\x6d\xf6\xb3\x61\xdf\x6b\x64\x42\xfb\x29\x06\x6c\x2c\ \xcb\x44\x22\xd1\x34\xcd\x27\x32\x5d\xd7\x95\xaa\xaa\x0a\xc7\x71\ \x88\xc7\xe3\x24\x12\x89\xf1\xe8\x2b\x43\x43\x43\xec\xde\xbd\x9b\ \xe6\xe6\x66\x3c\x97\x7a\xe3\xc6\x8d\x00\xbc\xf6\xda\x6b\x94\xcb\ \x65\x74\x5d\xe7\xb5\xd7\x5e\xa3\xa1\xa1\xc1\xbf\x5f\xc3\x30\xa8\ \xa9\xa9\x89\x59\x96\xa5\x1f\xab\x97\x25\x6d\xdb\x46\x55\x55\x1e\ \x7e\xf8\x61\xa6\x4e\x9d\xca\x23\x8f\x3c\xc2\xce\x9d\x3b\x09\x85\ \x42\x24\x93\x49\xa2\xd1\x28\x83\x83\x83\x04\x02\x01\x62\xb1\x18\ \x23\x23\x23\x64\x32\x19\xd2\xe9\x34\x6d\x6d\x6d\x57\xbf\xdd\x2e\ \x7e\xf6\xd9\x67\xa9\xad\xad\x25\x97\xcb\xf9\x34\x45\xa9\x54\xa4\ \x2e\xb9\x08\x07\x0b\x91\xa9\xa1\xcf\x19\x22\x34\xbd\x8c\x99\x0d\ \xd0\xc8\x3c\x26\xaf\xa8\xe4\xb5\xa5\x25\x40\x61\x95\x10\xac\x1a\ \x8b\x69\x4a\x1e\x20\xe3\x25\x44\x00\x03\xfd\x79\xfe\xf3\x6f\x3e\ \xce\xc7\x7f\x7d\x36\x5f\x3e\xe7\x69\x6a\xec\x79\xa8\x35\x29\xbe\ \xb5\xe9\xab\x18\x86\x09\xa2\xc2\x59\x79\x2a\x54\xd3\x34\x31\x38\ \x38\xd8\x39\x38\x38\xb8\x4d\x08\xe1\x06\x02\x81\xb5\x6f\xdd\xa0\ \xab\x57\xaf\xc6\xb6\x6d\x42\xa1\xd0\x04\x35\x74\xf0\xe0\xc1\x09\ \x3f\x1f\x38\x70\xc0\xb7\x21\x85\x42\xe1\x0f\x3e\x7f\x54\x80\x28\ \x8a\x32\xc7\xb2\x2c\x54\x55\xa5\x54\x2a\x51\x2c\x16\x99\x3b\x77\ \x2e\x6f\xbe\xf9\x26\x4b\x96\x2c\x61\x78\x78\x98\xda\xda\x5a\xb2\ \xd9\x2c\xe5\x72\x99\x48\x24\xc2\x39\xe7\x9c\xc3\x33\xcf\x3c\xf3\ \x47\x2f\xae\x69\x1a\x5f\xfa\xd2\x97\x7c\x43\xe8\xc5\x0b\x05\xa5\ \x4c\xa9\x54\x42\xa4\x05\x42\x08\x76\xe8\xfb\x78\xef\x19\x35\xd8\ \xd6\x78\xcf\x4a\xe2\x18\x0a\x99\x17\xea\x30\x87\x03\x80\xfc\x03\ \xb0\x7d\x9a\x45\x07\xc7\x86\x80\xae\xa3\x29\x01\x10\x90\xc8\x0f\ \xf2\x46\xff\x03\x48\x57\x8e\xa9\xc0\x8a\x94\x37\x34\x34\xf8\x36\ \x40\x51\x94\x5f\x67\x32\x99\xd5\x6f\x17\x8e\x79\xae\xbc\xb7\xeb\ \x0f\x33\xed\xed\x7f\xff\xb1\x02\x32\xbd\xb6\xb6\xf6\x46\xc3\x30\ \xa4\xa6\x69\xe2\xee\xbb\xef\xa6\xb1\xb1\x91\xbd\x7b\xf7\xe2\xba\ \x2e\x83\x83\x83\x18\x86\x41\x38\x1c\xa6\x50\x28\x20\xa5\x64\x74\ \x74\x94\x5d\xbb\x76\xfd\x49\xee\xa9\x58\x2c\xe6\xa5\x94\xde\xb5\ \x83\xe1\x70\x58\x77\x1c\x07\xdd\x52\x71\xdc\x43\x2a\x47\x09\x09\ \x2c\xc7\x66\x45\xf4\x16\x16\xcf\x5c\xc0\x96\xe4\xe3\xe4\x92\x1a\ \xbb\xdd\xe7\x70\xb4\x18\x96\x09\x12\x39\xc1\xa6\xb8\xae\x2b\x7d\ \x03\x1a\x12\xc4\x27\x85\x28\x18\x65\x02\x21\x95\x9a\xaa\x5a\x4c\ \xa5\x88\x16\xd2\x40\xa9\x00\x22\x91\xbe\xfa\x6d\x6e\x6e\x66\x68\ \x68\x88\xb7\x2b\x4d\x02\x08\x04\x02\xed\x1d\x1d\x1d\x8d\xf3\xe7\ \xcf\x1f\x9a\x32\x65\x0a\x37\xdc\x70\x03\x8f\x3d\xf6\x98\x6f\x7b\ \xbc\x80\xd7\xdb\x60\x1e\xc3\xa1\xeb\xba\xcf\x8a\x1f\x2b\x20\x11\ \x21\x04\xb9\x5c\x4e\xd4\xd7\xd7\x33\x3c\x3c\xcc\xe0\xc0\x10\x20\ \x10\x02\x1f\xf1\x72\xb9\xec\x07\x54\xde\x36\x8e\xc5\x62\xa2\x54\ \x2a\xbd\x6d\xf4\x2c\x84\xf0\x2b\xe9\x66\xce\x9c\xf9\xa0\xaa\xaa\ \x1f\xbe\xfc\xf2\xcb\xfd\x00\xec\x90\x2b\x29\x49\x0d\x0a\xfe\xee\ \xe6\x9b\xb9\xe9\x67\x4b\xf9\x97\xb3\x1f\xa3\x18\x2f\x30\xd3\x6c\ \xe6\x27\xbf\x79\x02\xc3\x90\x9e\x0d\x08\x5d\x74\xd1\x45\x23\x80\ \x14\x8e\xa8\xb3\x2c\x89\xa2\xc0\x22\x3b\xc2\xf0\xc3\x0a\xd7\x3f\ \x74\x2d\x52\x09\x71\xfd\x7f\x5d\x83\x65\x5b\x04\xb5\x08\xdb\x5b\ \x7b\xa9\x8a\xc4\x7d\x4a\xdd\x23\x3f\xff\x18\x18\xe3\xf2\x22\x32\ \x16\x8b\xf1\xe2\x8b\x2f\xb2\x71\xe3\x46\x5a\x5a\x5a\xd8\xba\x75\ \x2b\xe1\x70\x98\xf5\xeb\xd7\xfb\x36\x31\x12\x89\xd0\xd2\xd2\xc2\ \xae\x5d\xbb\x18\x1d\x1d\xf5\x93\x74\xc7\xac\xb2\xa4\x94\xe8\xba\ \xce\xbe\x7d\xfb\x90\xae\xa4\x66\xb1\x4e\x24\xac\x92\xeb\xb7\x29\ \x0e\x39\x48\xf7\x6d\xd5\x9c\x28\x97\xcb\x9d\x8a\xa2\x7c\xf3\xed\ \x9c\x84\xb7\xb0\x00\xfa\x07\x3e\xf0\x01\xe6\xcc\x99\xc3\x07\x3f\ \xf8\x41\x6e\xbd\xf5\x56\x3f\xcf\x20\x10\x54\x05\xe3\xd8\xb2\x44\ \xbc\xaa\x8a\xe6\xda\xa9\x3c\xbf\xe3\xd7\x3c\xd1\x77\x1f\xb6\xd5\ \x88\x69\xb9\x50\xd9\xe1\xa2\xa6\xa6\x66\x92\x40\xd0\x35\x6d\x0f\ \xa7\x5d\x14\xc6\x18\xac\xd4\x55\xd9\x83\x01\x9c\xac\x06\x96\x8e\ \x5b\x52\x50\x75\x0d\x4d\x53\x29\xe5\x4d\x6c\x23\xe5\x25\xdd\x64\ \x34\x1a\x15\x23\x23\x23\x84\x42\xa1\x77\xe4\xf8\x1c\xc7\xe1\x95\ \x57\x5e\x21\x1e\x8f\xe3\xba\x2e\xfd\xfd\xfd\x7e\xa2\xca\x4b\xc8\ \x79\xec\x6f\x5d\x5d\x1d\x03\x03\x03\x68\x9a\xe6\x05\x91\xb1\xe3\ \x02\xc8\xec\xd9\xb3\x2b\x14\x40\xb2\x22\x9a\x51\x29\x89\xd4\xcb\ \x09\xa5\x3b\x9e\x97\x35\xb6\xe3\x7a\x06\x06\x06\xfe\xeb\x70\xe9\ \x8d\xb9\x73\xe7\xfa\x51\xed\x21\x75\x27\xb0\x5d\x13\x45\x68\x80\ \xa0\x64\x15\xd1\x54\x8d\xa0\x16\xc5\x71\x5c\x4c\xd3\xf2\x83\x39\ \xcb\xb2\x28\x38\x65\x6a\xa7\x29\x48\x5b\x25\x5a\x1f\x46\x99\x62\ \x61\xcc\x4f\xf8\x9b\x60\xe0\xa1\x49\x48\x5b\xf1\x3d\x41\x8f\x5b\ \x53\x55\x55\x74\x75\x75\x3d\xe0\xed\x11\x5d\xd7\x7f\xfd\xa7\xee\ \xd7\x75\x5d\x3e\xf9\xc9\x4f\xfa\xdf\x33\x7e\xed\xdb\x77\x28\x7f\ \xd6\xdf\xdf\xef\x7b\xa1\xf1\x78\x1c\xc7\x71\xa8\xaa\xaa\xfa\x72\ \x3a\x9d\x7e\xe1\x98\x00\x59\xb1\x62\x05\x97\x5d\x76\x19\x67\x9f\ \x7d\x36\xdf\xfb\xde\xf7\x68\x6d\x6d\xe5\xe4\x93\x4f\xe6\xf5\xd7\ \x5f\x67\xee\xdc\xb9\x64\x32\x19\x5a\x5b\x5b\x09\x87\xc3\xcc\x9c\ \x39\x93\x37\xde\x78\xe3\xb0\x29\xf2\x40\x20\xc0\x0b\x2f\xbc\xc0\ \xde\xbd\x7b\xe9\xed\xed\xf5\x19\x54\x21\x04\x85\xd6\x1c\x43\x46\ \x23\xff\xeb\xa5\x5b\x18\xcc\xea\x7c\xea\xb1\x4f\x60\x18\x36\x79\ \x27\x8e\x5d\x14\x44\x22\x87\xa4\xae\xa2\x1e\x25\xca\xeb\x93\x39\ \xf8\x86\xe0\xc1\x07\x7e\xc4\xfe\x9e\x2e\x7e\xbb\xf7\xa7\xfc\xed\ \xaa\x8f\x72\xf7\xba\x5b\x31\x4d\x0b\x69\x1f\x02\x42\xd7\x75\x9f\ \x4a\x1f\x19\x19\xf9\xc8\xe1\xde\x73\x3e\x9f\x27\x12\x89\x1c\x02\ \xc3\x01\xcb\x74\xc7\xee\x05\x84\x2a\x50\xd4\x31\x6d\x30\xf6\x91\ \xd1\xd1\x51\x5f\x3d\x1e\x8b\x84\xb8\xde\x2e\x88\xc7\xe3\x84\xc3\ \x61\x9f\x74\x0b\x04\x02\x2c\x5d\xba\x94\xee\xee\x6e\xaa\xab\xab\ \x7d\xa3\xa5\xeb\xfa\x3b\x5e\xf4\x2d\x2a\x2b\xa0\x28\x0a\x07\x0f\ \x1e\xf4\x8a\x06\x64\xb9\x5c\x16\x00\xe5\x62\x91\x69\x2b\x66\x92\ \xcb\xe5\x88\x69\x75\xa4\x67\x1c\xa0\x69\x46\x80\xaa\xb4\x8e\x93\ \x1d\xfb\x95\xd6\x70\x73\x1a\x62\xec\x49\xc2\xe1\x30\xe9\x54\x9a\ \xf5\x3d\xcf\xb2\xe9\xcd\x2e\x2e\x3e\xf9\x06\x1a\x63\xd3\x11\x28\ \x95\x58\xc7\xaa\x48\xd4\xbc\x79\xf3\xfc\x6c\xe6\xe1\x12\x95\x00\ \x35\x35\x35\x23\x1d\x1d\x1d\x4d\xa7\x9c\x72\xca\x40\x2a\x95\x42\ \x48\x70\x67\x0b\x96\x5f\x59\x43\x7e\xc0\xa1\x38\x6c\x93\x1b\x70\ \x28\x0e\xd9\x18\x19\x17\xcf\x24\x79\xef\xf0\x4f\xd9\xa8\x77\x04\ \x24\x1a\x8d\x2e\x07\xd8\xb6\x6d\x1b\x89\x44\x82\xe1\xe1\x61\x8a\ \xc5\x4a\xe9\xce\xee\xdd\xbb\xfd\xc0\xd0\x03\xcd\x75\x5d\x5e\x7e\ \xf9\xe5\x23\x4a\x26\x25\x93\xc9\x2f\x0e\x0d\x0d\x7d\x77\xcc\x8d\ \x9c\xbe\x60\xc1\x82\x87\x0f\x79\x23\x95\x0a\x96\x72\xb9\x4c\xca\ \xcd\x31\x75\xae\x89\xa8\xb2\x69\x9c\xd6\x44\xd9\xcd\x51\x72\x12\ \x28\x0a\xd8\x79\x95\xe1\x9f\x4e\xf6\x1f\x5c\x4a\x89\x57\xb2\x9e\ \x29\x8d\x92\x35\x12\x08\x45\x4c\x00\x24\x93\xc9\x70\xf5\xd5\x57\ \xf3\xf4\xd3\x4f\xfb\x31\xc8\xe1\xac\x4c\x26\x83\xae\xeb\x52\xd3\ \x34\x9a\x9a\x9a\x2a\x36\xa2\x7f\x80\x8e\x07\x15\x2c\xc7\x45\x28\ \x3a\xae\xab\xa2\x11\x40\x09\xbb\x7e\x4e\xc4\x53\xe5\xc7\x04\x88\ \xae\xeb\x7f\xe3\x65\xf8\xfa\xfa\xfa\xfc\xa4\xcc\xf8\xdf\x8f\x75\ \x15\x0a\x85\xbd\xc0\xde\xb1\x1f\xe7\x7b\x8c\x80\x10\x02\xa5\x50\ \x79\x89\xa6\x61\x62\x2b\x0e\xaa\xaa\xd0\xa2\xae\xe2\xef\x56\xde\ \x4a\x4d\xa0\x9e\x0f\x7c\xef\x7c\x6e\xbb\xea\x56\x7e\xb1\xfe\xfb\ \x13\xe8\x73\xdb\xb1\x59\x30\xe9\x34\x92\x53\x54\x06\x72\xdd\x04\ \xd5\xd3\x51\x50\x30\xcd\x32\xae\x55\x89\x83\x32\x99\x0c\x4f\x3f\ \xfd\xb4\xef\x96\x1e\xe9\x52\x14\x85\x07\x1e\x78\x80\xe1\xe1\x61\ \x84\x10\x3c\xfe\xf8\xe3\xc4\xe3\x71\x3a\x3b\x3b\x69\x6e\x6e\x66\ \xcb\x96\x2d\xcc\x9a\x35\x0b\xc7\x71\xd8\xb7\x6f\x1f\xcd\xcd\xcd\ \x74\x74\x74\x1c\x1b\x97\x25\xa5\x54\x63\xb1\x18\x7b\xf6\xec\x41\ \x08\x41\xf4\x7c\x9d\x15\x17\xd5\x20\x01\x23\x27\x49\xee\x35\x18\ \x6d\xb7\xc8\x74\x98\x98\x85\x43\xf9\x90\x9a\x9a\x9a\x11\xe0\x7d\ \x47\x5b\xa6\xfa\xd3\x9f\xfe\x94\x42\xa1\xc0\xe7\x3f\xff\x79\xbf\ \x30\xc1\x52\x4d\x84\x90\x9c\x3c\xe5\x6c\x7e\xd7\xfe\x00\xa9\xce\ \x06\xae\x58\xf8\xb7\xcc\xa8\x59\x84\xed\x54\xaa\x60\x3c\x09\xe9\ \xee\xee\xe6\x6b\xff\xfb\xbb\x95\xc4\x56\x29\xc5\xc6\x9f\xec\xe2\ \xc0\x20\x64\x53\x79\x8f\x02\x92\x8a\xa2\x08\xaf\x8e\xea\x9d\x76\ \xed\xdb\xd9\x55\x8f\x5d\x9e\x3f\x7f\x3e\x5d\x5d\x5d\x8c\x8c\x8c\ \x78\xcf\xce\xc9\x27\x9f\xcc\xe6\xcd\x9b\x19\x19\x19\x61\xd1\xa2\ \x45\xa4\x52\x29\x46\x47\x47\xd1\x34\xed\xd8\x24\xc4\x8b\xaa\x3d\ \xef\xc9\xd9\xa9\xf1\x66\x47\x96\x7c\xbf\x83\x55\x94\x28\x9e\xf1\ \x52\x55\x54\x55\xfa\xb9\x91\x40\x20\xe0\x0e\x0c\x0c\x1c\x55\xd3\ \xca\xbc\x79\xf3\x68\x6e\x6e\x66\x74\x74\x94\xe6\xe6\xe6\x4a\x85\ \xbd\x65\xe2\xe0\x00\x02\x4d\x09\x60\x4b\x0b\xdb\xb1\x18\x48\x0f\ \xd3\x95\x0e\xa3\x09\xdd\x8f\x5f\x34\x4d\x03\xcd\xa1\xa4\xa6\x70\ \xb2\x1a\x8a\xd4\xc9\xe6\xb2\xc4\xf4\x38\xb9\x5c\x07\x8a\xa2\xa0\ \xeb\xba\x28\x14\x0a\x3b\x37\x6d\xda\xf4\x4f\x42\x08\x55\x51\x94\ \x23\x2e\x91\x54\x55\x95\xaf\x7c\xe5\x2b\xcc\x9f\x3f\x9f\xcd\x9b\ \x37\xa3\xeb\x3a\x1d\x1d\x1d\xb8\xae\xcb\xab\xaf\xbe\x8a\xa6\x69\ \x24\x93\x49\xda\xdb\xdb\xc7\x9b\x00\xa4\x94\x35\xc7\x22\x21\x48\ \x29\xf9\xd1\x8f\x7e\xc4\xac\x59\xb3\xb8\xf7\xde\x7b\x2b\x65\x93\ \x73\x4a\x7e\x92\xaa\x58\x2c\x22\xa5\x64\xd2\xa4\x49\x74\x75\x75\ \x91\xc9\x64\x8e\xb8\x3a\x64\x42\x85\x79\x5b\x1b\x2f\xbd\xf4\x12\ \x2d\x2d\x2d\x1c\x38\x70\x80\x78\x3c\x5e\x49\xc3\x62\xa3\x6b\x21\ \x52\xc6\x20\xd3\xa7\xcc\xa3\x7e\x5a\x1d\x9b\x7a\xba\xa9\x8d\x4c\ \x1e\x8b\xba\x0f\xb9\xc0\x7a\x5c\x52\xf7\xfe\x01\x46\x9e\xae\xc3\ \xe8\xad\xd0\x2b\x8e\xe3\xf8\xe5\x3b\x63\xbf\x27\x8b\xc5\xe2\xf3\ \x47\x73\x8f\x63\x84\xab\xab\xeb\xba\xb2\x73\xe7\xce\x4a\x0b\x85\ \x00\x55\x55\x90\x12\x62\x91\x18\x96\x6b\x55\x1c\x2c\x81\x1f\xab\ \x8d\xd5\x78\x2d\x38\x26\x09\x99\x37\x6f\x1e\xb3\x67\xcf\xe6\x84\ \x13\x4e\x60\xd9\xb2\x65\x6c\xdd\xba\x95\x65\xcb\x96\xf1\xe4\x93\ \x4f\x72\xf1\xc5\x17\xb3\x7b\xf7\x6e\x36\x6c\xd8\xc0\x99\x67\x9e\ \x49\x73\x73\x33\xcf\x3e\xfb\xec\x9f\xa4\xc7\xdf\x69\x45\x22\x11\ \xbe\xf5\xad\x6f\x21\x84\x60\x68\x68\xa8\x52\x01\x68\x98\xd8\xaa\ \xcb\x86\xff\x1e\x64\x93\xf9\x55\xe6\x35\x2e\xc1\x95\x2e\x3b\xfa\ \x36\xd1\xfa\x68\x1b\x1d\x23\xbd\x14\x86\x0e\xbd\x2c\x4d\x53\x59\ \xf7\xb5\x61\x8c\xd1\x51\x9c\x92\x64\xc6\x8c\x19\xe8\xba\xce\xd1\ \xaa\xa8\xb7\xf1\x0c\x87\x76\xee\xdc\x79\xf1\xac\x59\xb3\x5e\x28\ \x95\x4a\xb8\x2a\x4c\xbd\x21\xca\xc9\xcb\x63\xd8\x8e\x43\x53\x78\ \x1e\xc3\x46\x3b\x8e\xe1\x72\xf0\xd5\x12\xfb\x9e\xc8\xa3\xa8\x15\ \xa7\x27\xe2\xf9\xea\x47\x0b\x48\x67\x67\x27\xbf\xfc\xe5\x2f\x39\ \xe5\x94\x53\x78\xe5\x95\x57\xc8\xe5\x72\xf4\xf4\xf4\x60\x18\x06\ \x4f\x3e\xf9\xa4\x5f\x99\xf1\x9b\xdf\xfc\x86\x40\x20\xe0\xa7\x3e\ \x8f\x72\x8d\x0e\x0d\x0d\x7d\x43\xd7\x75\x6b\xec\xc1\x3f\x98\x4a\ \xa5\x66\xe4\xf3\x79\x2c\x53\x32\x2b\x3c\x83\x19\x33\xa6\x57\xa4\ \x54\x1a\x2c\xbb\x7e\x16\x55\x91\x32\x0d\xc3\x27\x63\x0d\x06\x71\ \x46\x2b\x69\x27\xa1\x48\x50\x24\x4c\x83\x5c\x2e\xc7\xc8\xc8\x08\ \xd1\x68\x94\x78\x3c\xce\x69\xa7\x9d\xc6\xcb\x2f\xbf\x7c\x5c\x1c\ \x12\x3f\xcd\x2b\xa1\xd4\x6a\xb3\x63\x7f\x8e\x7c\xaf\x83\x6d\x6c\ \xc4\x2e\x4a\xec\x72\xa5\x3f\x51\x0f\x68\xbe\x23\x70\xac\xd4\x49\ \x34\x1a\x8d\xf2\xf4\xd3\x4f\xf3\xc4\xe3\x4f\x1c\xf2\x46\x84\xc7\ \x49\x29\x7e\x4f\x8b\x50\x2a\x89\xac\xea\xea\x6a\x69\xdb\xf6\xd1\ \xea\xac\x91\xde\xde\xde\xff\xed\xfd\x50\x5f\x5f\xbf\x5c\x51\x94\ \x19\xaa\xa2\x80\x03\xae\xe3\x92\xcf\xe7\x11\xc0\x66\xab\x8d\x4b\ \x5a\xa2\xb8\x4a\x8e\xe0\xd4\x22\x8a\x2c\xa1\x6a\x0a\x76\x5a\xa7\ \xbc\x2f\x46\x7e\x5b\xd4\x67\x01\xbc\xea\xc1\x7c\x3e\x5f\xc9\xe5\ \x8b\xe3\xd3\x31\x67\x59\x16\xb7\xdc\x72\x0b\x00\x2f\xae\x7d\x11\ \x51\x56\xd0\xb4\x02\x42\x17\x18\xba\x81\xac\x96\x7e\x6c\x94\x48\ \x24\x0e\x2b\x6b\xa8\xbd\x83\x41\xaf\xf7\x92\xf9\xae\x0a\x93\xe6\ \x05\xd0\x75\x81\xb4\x75\xe6\x4e\x5e\x48\x47\x6a\x07\x28\x50\x4e\ \x39\xa4\xbb\x7c\x1d\x2e\x6c\xdb\xee\xe0\xf8\x2c\x25\x10\x08\x30\ \x7d\xe1\xc2\x4a\x46\xcf\x1a\xa3\xd6\xbd\x88\x58\x11\xac\xac\xbe\ \x85\xf7\x9d\x7e\x2d\x3b\x86\x5f\xe6\x85\x2d\x2f\xa1\x35\x0f\xd3\ \xd6\x37\xe0\x13\x85\x5e\xc2\xca\x8b\x05\xd6\xaf\x5f\xef\xf1\x55\ \xc7\xbc\xce\x3e\xfb\x6c\x2e\xb8\xe0\x02\xe6\xcc\x99\x43\xa1\x50\ \xa0\xb7\xb7\x97\x8b\x2f\xbe\x98\xc7\x1f\x7f\x9c\xf3\xce\x3b\x8f\ \x62\xb1\xc8\xab\xaf\xbe\xca\xfc\xf9\xf3\x89\xc5\x62\x3c\xf5\xd4\ \x53\x7e\x09\xee\x51\x01\xa2\x28\x8a\xa8\xaf\xaf\xa7\x90\x2f\x60\ \x45\x24\x4b\x3f\x52\x47\x75\x8d\x86\x74\x25\xc9\xb6\x7e\xea\xf6\ \x55\x31\xbc\xdd\xc0\x1a\x75\x88\xc5\x82\xbe\x27\x31\x34\x34\xf4\ \xe6\xf1\x78\x60\xd7\x75\xb9\xfd\xf6\xdb\xa9\xae\xae\xe6\x8a\x2b\ \xae\xe0\xa2\x8b\x2e\xaa\xe4\x20\x24\x10\x82\x4c\xba\xc4\x65\x67\ \x5c\xcf\x2d\xbf\x3c\x93\x9f\xff\xcd\x2e\x4e\x08\x2d\x25\xe1\xec\ \x67\xf7\xfa\x3b\x30\x0c\xcd\x0f\x5a\xcb\xe5\x32\xd1\x68\x94\x60\ \x30\x78\x58\xae\xe7\xe1\xae\x6d\xdb\xb6\x31\x75\xea\x54\x06\x06\ \x06\x50\x55\x95\xda\xda\x5a\x76\xec\xd8\x81\x10\x82\x8e\x8e\x0e\ \xbf\x16\x78\xe7\xce\x9d\xac\x5c\xb9\xf2\xd8\xe9\x77\x8f\x7b\x29\ \x95\x4b\xe8\x8e\xce\x9b\x77\xa7\x91\x16\x58\xc5\x43\x39\x74\xa1\ \x4c\xd4\x8d\x47\x42\x9b\x1c\xce\x32\x4d\x93\xfa\xfa\x7a\xbf\xe2\ \xdc\xb2\xac\x4a\x27\x6f\x08\x0c\xc3\x25\x14\x0c\xe0\x52\xa9\x3f\ \xeb\x1e\x3a\xc8\xc6\xfc\x63\x28\xa8\x13\x92\x47\x1e\xf7\xd4\xd6\ \xd6\x36\xb7\x58\x2c\x8e\x00\xb2\xa1\xa1\xc1\x38\x1e\x36\xe4\xea\ \xab\xaf\x46\xd3\xb4\x3f\x48\x56\xb5\xb7\xb7\xfb\xc4\xa2\x94\x92\ \x5f\xfc\xe2\x17\x95\xf7\x75\xac\xf4\xfb\x25\x97\x5c\xc2\x87\x3e\ \xf4\x21\x5c\xd7\xe5\xc9\x27\x9f\xa4\xab\xab\x8b\x68\x34\xca\xc1\ \x83\x07\x89\xc5\x62\x54\x55\x55\x71\xe0\xc0\x01\x72\xb9\x1c\x8d\ \x8d\x8d\xf4\xf4\xf4\x1c\xb7\x1d\x28\x84\xe0\x87\x3f\xfc\x21\xcb\ \x96\x2d\xe3\x3f\xfe\xe3\x3f\x0e\x05\x7f\x08\xa4\x0b\xf1\x78\x88\ \x64\x26\x4d\x4d\xa0\xbe\xe2\x56\x4a\x9b\x80\x88\x20\x5d\x89\x65\ \xd9\x7e\xad\x98\x57\xd6\xa9\xeb\x7a\xce\x75\xdd\x0c\x70\x44\xfc\ \xd5\x1f\x91\x5e\x73\x7c\xbe\xc7\x09\x40\x75\x5c\xc5\x71\x5d\x26\ \x87\x67\x50\x72\xd3\x14\xac\x2c\x42\x48\x8c\xac\x8b\x5b\x92\x7e\ \xf6\xf0\xa8\x01\x19\x6b\xc0\xa4\xb5\xb5\x95\x1b\x6e\xb8\x81\x6d\ \xdb\xb6\x51\x2a\x95\xa8\xab\xab\x63\xd9\xb2\x65\x6c\xdc\xb8\x91\ \x60\x30\x48\x38\x1c\x26\x93\xc9\x1c\xf7\xb6\x66\x29\xa5\x5b\x55\ \x55\x25\x77\xec\xd8\x51\x29\x59\xf0\x4a\x50\x25\x48\x21\x89\x45\ \xc2\x3c\xbe\xfb\x7e\xbe\x75\xe5\x6f\xf8\xcd\xfe\x87\x78\xa9\xe3\ \x65\xce\x3a\xe9\x74\x5e\x73\x37\x62\x18\xa6\xcf\xe8\x7a\x54\x8c\ \x6d\xdb\xb3\xea\xeb\xeb\x03\xa3\xa3\xa3\x3d\xc7\x7a\x6f\xb6\x6d\ \xb7\x09\x21\x2a\xc5\x82\x08\x46\x96\x48\xfe\xee\xf6\x66\x24\x2e\ \x51\x33\x42\x50\x0f\x72\x70\xb8\x80\x55\x90\xbc\x7e\x57\x0a\xe9\ \x54\x34\xce\x31\xb9\xbd\x42\x08\xbe\xf9\xcd\x6f\x72\xfb\xed\xb7\ \xf3\xc9\x4f\x7e\x92\xb6\xb6\x36\x5c\xd7\x9d\xd0\x5f\x38\xd6\x9f\ \x8d\x94\x92\x9e\x9e\x9e\xe3\x0a\x48\x2a\x95\xba\xe8\xf9\xe7\x9f\ \xa7\xa5\xa5\xa5\x7d\xce\x9c\x39\xb3\xb3\xd9\x6c\xa5\x6e\x58\x28\ \x14\x6a\x24\x25\x33\xca\x7e\xf9\x1c\xdf\x79\x56\x67\xd5\xe9\xa7\ \x90\x50\x76\xf0\xcc\xbe\x5d\x0c\xb5\xe5\x48\xa5\x2a\x01\x6b\x28\ \x14\xa2\xa5\xa5\x85\x52\xa9\xc4\xb5\xd7\x5e\xfb\x7a\x5b\x5b\xdb\ \x86\x75\xeb\xd6\xad\x3c\x1e\x0e\x87\xef\xfa\x0a\x85\xba\x83\x92\ \x7d\x4f\x15\x18\xda\x6e\x90\xef\xdb\x38\x41\x35\xa9\x42\x03\x8d\ \xb7\xcd\x9f\x1c\xb1\x0d\x89\xc7\xe3\xdc\x77\xdf\x7d\x7e\xc2\xc8\ \x2b\x34\x00\xd0\x35\x1d\x5d\x3b\xe4\x0a\x7b\xe5\xa3\xfb\xf7\xef\ \x9f\x71\x3c\xed\x88\x57\x2d\xef\xbd\x00\x81\x40\x35\x5c\xb4\x30\ \xe4\x7b\xe1\xf5\xce\x9f\xf3\xe4\x4f\x7e\x4c\x79\x44\x62\x95\x5c\ \xca\x69\xd7\x8f\x85\x54\x55\x25\x9d\x4e\x13\x0a\x85\x18\x19\x19\ \x39\xa2\x5e\x8d\xc3\x21\x18\x2f\xbc\xf0\x42\x6e\xbc\xf1\x46\xbe\ \xf5\xad\x6f\xa1\xf6\xa9\x10\xcc\xd1\x38\x1b\xbf\x7f\xbf\x5c\x2e\ \x53\x55\x55\x45\x2a\x95\xc2\xeb\x8b\x3f\x6a\x40\x62\xb1\x98\xdc\ \xb4\x69\x53\xa5\x92\x23\x06\x0b\xcf\x8f\xa1\x07\x04\x4a\xa9\x9e\ \x0b\x16\x5f\xca\xef\x3a\x1f\x41\x0b\xa8\x8c\xee\x33\x19\xd8\x5c\ \xf6\x12\x4e\x52\xd7\xf5\x29\xc7\x13\x10\xd7\x75\x1d\x2f\x69\xa5\ \xaa\x2a\x8b\x17\x2f\xa6\xb3\xbd\x83\xd7\xfe\xa9\x5c\xd9\x1b\x0a\ \xa0\x54\x28\x12\xe1\xba\x84\x42\xd2\xa7\x2a\xbc\x11\x18\x63\x3d\ \x7f\xc7\xb5\x91\xb4\x58\x2c\xf2\xe9\x4f\x7f\x9a\x54\x2a\xc5\x27\ \x3f\xf9\x49\x1e\x7a\xe8\x21\x4e\x5f\x79\x3a\xdd\xdd\xdd\xcc\x9b\ \x37\x8f\xdf\xfe\xf6\xb7\xac\x5c\xb9\x12\x5d\xd7\xc9\x66\xb3\xec\ \xd8\xb1\xc3\xaf\x9a\x3f\x2a\x40\x42\x63\x4f\x66\xdb\x36\xb2\xda\ \x65\xc6\x39\x35\x68\x08\xac\xb2\xc1\xeb\x23\x3f\x43\xb5\xc3\x24\ \x5a\x0d\x92\xfb\x2b\xc1\xcf\x98\xdb\x2b\x86\x87\x87\x8f\xc9\xaa\ \xc7\xe3\xf1\x96\xb9\x73\xe7\x1e\x08\x04\x02\x15\x4f\x4f\x51\x71\ \x84\x4d\xa9\x54\xe2\xdb\xdf\xfe\x36\x4d\x4d\x4d\x24\x12\x09\x7e\ \xf7\xbb\xdf\xf9\x05\x04\x00\x23\x23\x23\x4c\x9d\x3a\x95\x64\x32\ \xc9\xc8\xc8\x08\xaa\xaa\x92\x4c\x26\x09\x85\x42\x94\xcb\x65\x02\ \x81\xc0\x71\x05\x24\x14\x0a\xb1\x63\xc7\x0e\x66\xcf\x9e\xcd\x0b\ \x2f\xbc\xe0\xf7\xd0\x8c\x8e\x8e\xfa\x95\x90\x83\x83\x83\xc4\xe3\ \x71\xbf\x78\xf0\x9d\x58\x8c\x77\xb4\x21\x9e\x1b\x5b\x9d\x83\x0d\ \x77\x64\xdf\x32\xaa\xa8\xa2\x46\x82\x11\x7d\x82\x18\x1f\x87\x9d\ \x17\x98\x3d\x7b\x36\xb6\x6d\xa3\x0a\x95\x56\x75\x3f\x67\xdd\x10\ \x64\xfd\x7f\x38\xbe\x2b\x3c\x6f\xde\x3c\x1e\x7b\xec\x31\x9a\x9b\ \x9b\xa9\xad\xad\x65\xde\xbc\x79\xdc\x73\xcf\x3d\x68\x9a\xc6\xc2\ \x85\x0b\xd1\x75\x9d\x5f\xff\xfa\xd7\x7e\x56\xd0\xab\x5e\x3f\x9e\ \x80\x84\xc3\x61\xbe\xfa\xd5\xaf\x4e\x50\x43\xad\xad\xad\x00\xec\ \xdf\xbf\xdf\xa7\xe8\xc7\xab\xde\x63\x02\x44\x4a\xc9\x3f\xfe\xe3\ \x3f\x32\x77\xee\x5c\x6a\x6b\x6b\x79\xe2\x89\x27\x38\x78\xf0\x20\ \x9a\xa6\xf9\x85\xd4\x73\xe7\xce\xa5\xbd\xbd\x9d\x44\x22\x41\x73\ \x73\x33\x07\x0e\x1c\x38\x2e\xee\xae\xd7\x68\xa3\x09\x95\x4c\x3c\ \x03\x72\x32\x91\x48\x84\xaf\x7f\xfd\xeb\x9c\x7a\xea\xa9\x7e\xed\ \x97\xc7\x24\xe8\xba\x4e\xb1\x58\x64\xd7\xae\x5d\x7e\x4f\xbc\xe7\ \xf7\x8f\xa7\x2c\x8e\x17\x20\xf1\x78\xfc\x1b\x5e\x40\x08\x20\x15\ \x89\xa6\x09\x5c\x17\xa2\xa1\x28\x05\x23\x57\x69\x10\x12\xe0\xda\ \x87\x18\xdf\x77\xba\xbe\xf6\x4e\x5c\xcd\xfc\xf9\xf3\xc9\xe5\x72\ \x5c\x7a\xe9\xa5\xfc\xfe\xf7\xbf\x67\xce\x9c\x39\xd4\xd7\xd7\x33\ \x73\xe6\x4c\x1e\x7a\xe8\x21\x74\x5d\x67\xd2\xa4\x49\x78\xcd\xa0\ \xc7\xc3\xcb\x1a\x3f\xa1\x47\x52\x29\x42\x28\x17\x25\xf9\xb4\x41\ \x44\x53\x59\xbb\xb6\x52\xdd\xa9\xab\x41\x14\xad\x52\x23\xe6\x35\ \x00\x79\x76\xc2\x8b\xc8\xbd\xde\x72\x2f\x4d\x6b\x59\xd6\xa4\x50\ \x28\x74\x9d\x10\x22\x00\x88\xba\xba\xba\x57\xfb\xfa\xfa\xba\x8e\ \xc2\xae\x4d\xaf\xaf\xaf\xaf\xd8\x04\x45\xa0\x9d\xaf\x73\xe9\xb5\ \x93\x28\x96\x0d\x2e\x3b\xf1\x66\xb6\x8c\x3e\x45\x62\x28\x49\xae\ \xdf\x61\xc3\x37\x93\x3e\xcb\x1c\x8d\x46\x87\x8f\x1a\x90\x60\x30\ \xc8\xbd\xf7\xde\xcb\xe7\x3e\xf7\x39\x3e\xf6\xb1\x8f\xd1\xdf\xdf\ \xef\x8b\xbd\x37\xdb\x24\x95\x4a\x61\x59\x16\xa5\x52\xc9\xcf\x85\ \xc8\xb1\xc8\xf0\xad\x0d\x34\x47\x0a\x88\x61\x18\x38\x28\xa8\xaa\ \x42\x6f\xa7\xc1\xdd\xf7\x7e\x95\x6f\x3c\xfb\x2f\x7c\xe1\xfc\x2f\ \x13\x8f\xc4\xf9\x9f\xad\x5f\xa4\xfd\x37\x39\x8c\x11\xc5\xaf\xec\ \x38\xe1\x84\x13\x68\x6c\x6c\x64\xfb\xf6\xed\xd4\x35\x55\x71\xc2\ \xcc\x46\x96\x4f\x5a\x86\xea\xea\x15\x07\x40\x30\x47\x22\x1f\xf3\ \xd2\xd0\x3f\xfc\xe1\x0f\xff\x06\xe8\x3a\x8a\x7b\x94\xe3\x55\xb4\ \x18\x81\xe1\x5d\x65\x12\x7b\x2d\xbe\xfa\xdd\x7b\x49\xee\xb3\x70\ \x4d\x89\xa2\x0b\x14\x4d\xf1\x35\x8e\x10\x42\x39\x26\x1b\x92\x48\ \x24\xb8\xfd\xf6\xdb\x0f\xeb\x26\xbd\x1e\x43\x21\xc4\xa2\xa6\x43\ \x9e\x9f\xf6\x00\x00\x16\x56\x49\x44\x41\x54\xa6\xa6\x53\x06\x06\ \x06\xb6\x1d\x43\xe0\x55\x01\x5d\x2a\xa8\x42\x05\x14\x96\x2e\x3b\ \x85\x86\xb6\x2a\x96\xaf\x38\x8d\x29\xb5\x4d\xac\x29\xa9\x54\x6d\ \x8c\xa0\xe4\x85\xaf\xd3\x67\xcc\x98\xe1\x79\x88\x4c\x3a\xdd\xa5\ \xf9\xec\x34\xd2\x4d\xe3\x00\xc6\x40\x00\xa3\x2d\x42\xb9\x33\x82\ \x10\x15\xde\x4d\x55\xd5\xa3\x76\x40\x54\x55\x65\xe1\xc2\x85\x9c\ \x74\xd2\x49\x6c\xd8\xb0\x81\x81\x35\x15\xa3\x1e\x70\x1c\x26\x37\ \xba\x7e\xac\xe6\x6d\x62\x6f\xb0\xdb\x51\x03\x32\x3c\x3c\xdc\xe5\ \xba\xee\x5c\xaf\x80\xa0\xe9\xac\x30\x91\x88\x82\xb4\x75\xde\x77\ \xd2\x2d\x3c\xb3\xef\x87\xd8\x18\xd8\x86\xe4\xc0\xef\x8b\x7e\x0a\ \xb5\xaa\xaa\x2a\x52\x2c\x16\xe3\x47\xf2\x70\x53\xa6\x4c\x79\x5f\ \xb1\x58\xbc\x10\x70\xa5\x94\xf1\xce\xce\xce\x8a\xee\x97\x82\xb2\ \x61\x32\xd0\x53\xe2\xab\x89\x6f\xb2\xa7\x6d\x98\x3b\xb6\xfd\x2b\ \xb1\x50\x35\x1b\x0e\x1e\xc4\xee\x8b\xa2\x58\x41\x7f\x07\xb6\xb6\ \xb6\xfa\x6d\x69\x81\x62\x33\x77\x5e\xf4\x23\x36\xf6\xfe\x96\x1f\ \xbc\xf8\x9f\xcc\x9c\x51\xc7\x50\x39\x4f\x76\x77\xa5\xb6\x57\xd7\ \xf5\x63\xa2\xe2\x83\xc1\x20\xdf\xfc\xe6\x37\x89\xc5\x62\x34\x35\ \x35\xb1\x63\xc7\x0e\xce\x3d\xf7\x5c\xd6\xac\x59\xc3\xca\x95\x2b\ \x69\x6b\x6b\x43\x08\xc1\xac\x59\xb3\xc8\x64\x32\xbc\xf2\xca\x2b\ \xc7\xc6\xf6\x5a\x96\xd5\x16\x0c\x06\x2f\xf6\xf2\xd5\xd3\x2f\x09\ \x13\x8f\x6b\xe4\xfa\x6d\x7e\x33\x70\x1f\xb6\x22\x28\x0c\x38\xa4\ \xbb\x0f\x19\x4d\x5d\xd7\x7d\x37\xf4\x08\x49\xc4\x5b\xaf\xbb\xee\ \xba\xf3\xa5\x94\xb8\x48\x32\x55\xa3\x34\x4d\x0f\xe0\x66\xbc\xfa\ \x2b\x9d\x74\x22\xc7\xa9\x0d\xe7\x60\x16\x6d\x92\xc5\x24\xa7\x37\ \x5d\xc9\xb3\x5b\x9f\x21\x18\x74\x7c\x09\x55\x14\x05\xe9\x4a\xa4\ \x25\xb8\x76\xf9\x87\xf8\xef\x0d\xff\xca\xf2\xd8\x87\x58\xd9\x70\ \x23\x2b\x17\x2c\xe0\x3b\xad\x5f\x99\xd0\xfb\x7e\xb4\x80\x48\x29\ \x9d\xf1\x05\xe5\x9e\x93\xe3\xb9\xd9\x5e\xc7\x59\x77\x77\x37\x81\ \x40\x80\xda\xda\x5a\x7f\x9c\xd3\xb1\x90\x8b\x0a\x40\x75\x75\x35\ \xb6\x6d\xb3\xe3\x6b\x45\xac\x9c\x44\x68\x20\x14\x10\xaa\x57\x81\ \xa1\xa2\x69\x87\x22\xe3\xa3\x1c\xab\xa4\x27\x93\x49\x5c\xd7\x25\ \x63\x17\x99\x73\x45\x06\x5b\xad\xa8\x7d\x29\x6c\x74\x4d\x85\xa4\ \x82\x71\x20\x4a\xfe\x8d\xb1\xe1\x30\x63\xde\xd4\xf8\x12\xa0\x48\ \x38\x4c\x32\x98\xe3\xcc\x8f\x45\x38\xff\xfc\x0b\xf8\xd1\xce\xe7\ \x18\x4a\x8d\xb0\xb7\x7f\x27\xc5\x9a\xed\xa8\x54\x6a\x6c\x85\x7a\ \x74\xe3\x93\x00\x1a\x1b\x1b\x4f\x94\x52\xae\x08\x85\x42\x5c\x73\ \xcd\x35\xc4\x62\x31\x7f\x13\x6e\xdd\xba\x15\x45\x51\xd8\xb3\x67\ \x8f\x5f\xf4\x31\x30\x30\xe0\x17\x18\x1e\x2b\x20\x9c\x7c\xf2\xc9\ \x7c\xe9\x4b\x5f\xc2\xb6\x6d\x1e\x7e\xf8\x61\x0e\x1c\x38\x30\x61\ \x8e\x47\x3c\x1e\x67\x70\x70\xd0\x8f\x46\xbd\xc1\x34\x47\x9b\x81\ \x73\x5d\x17\xcb\xb1\x51\x84\x42\xba\x27\xcc\x23\x7f\xff\x3b\xfa\ \x0b\xfb\xb9\xe3\xb7\x7f\xc7\xec\xc6\x79\x74\x96\xdb\x30\xca\x21\ \x14\xfd\xd0\x44\x37\xdf\xf5\xf4\x8a\xf6\x14\x85\x40\x40\xc7\xb2\ \x0c\x34\x25\x88\xaa\x68\x04\xb4\x20\x21\xad\x42\x85\x1b\xa6\x81\ \xa2\x8a\xa3\x9e\xc7\x55\x2a\x95\x5a\x9a\x9b\x9b\xc3\x85\x42\x81\ \xaa\xaa\x2a\x9f\x32\x1a\xbf\xfe\x58\xb1\xa0\x17\x30\x1e\x35\x20\ \x2b\x56\xac\x60\xf7\xee\xdd\xfc\xf5\x5f\xff\x35\x67\x9c\x71\x06\ \x42\x08\x22\x91\x4a\x1b\xb1\x97\x0e\x2d\x16\x8b\x0c\x0f\x0f\xd3\ \xdc\xdc\xcc\xe0\xe0\xa0\x6f\x97\x8f\xf4\x41\xbd\xb9\x8d\x16\x15\ \x55\xb0\x7a\xd5\xbf\xf2\xf3\x3d\x5f\x27\x79\xa0\x9a\x05\xda\x55\ \xdc\x72\xc6\xdf\xf3\x99\x9e\xf7\x57\x5c\x5c\xa7\xa2\x72\x82\xc1\ \xa0\x3f\xdb\xd1\x75\x5d\x0c\xd3\xc4\x2a\x18\x64\xd7\x2c\xe6\x8b\ \xbf\xfa\x22\x8e\xb4\xd9\xcb\xe3\x94\x0d\x8b\xcc\xcb\x02\xe4\x0c\ \x66\xcf\x96\x20\x2a\xb9\x8a\xcb\x2f\xbf\xfc\x7f\x84\x10\xdf\x07\ \xd8\xbc\x79\x73\xc7\xde\xbd\x7b\x97\x1c\xae\x27\x98\x4e\xa7\x2b\ \x1d\xc4\x01\xb8\xe4\xdf\x1a\x50\x54\x01\xa8\x34\xd7\x9f\x40\x7f\ \xea\x00\xa3\xfb\x4c\x0e\xbc\x58\x24\xd5\x5e\x91\xe0\xaa\xaa\x2a\ \xf2\xf9\xfc\xfd\xc7\x14\x18\xfe\xfc\xe7\x3f\xe7\x03\x1f\xf8\x00\ \x77\xdf\x7d\x37\xeb\xd7\xaf\xf7\xfd\xfd\x7c\x3e\xef\xd3\x25\xa9\ \x54\x6a\x42\xaa\x54\x4a\x49\x63\x63\xe3\x97\x14\x45\xb9\xa5\xaf\ \xaf\xef\xc0\x91\x4a\x88\x83\x83\x22\x55\xa6\xd4\x4d\x61\xff\x40\ \x91\xa2\x05\x1d\xc3\xfb\x79\x72\xef\x7d\xd8\xb6\x85\x65\x55\x74\ \xa9\x69\x9a\x04\x83\x41\x4e\x3d\xf5\x54\xf6\xee\xdd\x4b\x22\x91\ \x20\x9d\x4e\x63\xba\x26\x56\xd9\xc6\x30\x2a\x53\x91\x76\xd7\x6e\ \xe6\x9c\x6b\x63\x38\xb9\xca\x1c\x45\x37\x13\xc4\xc9\x6a\x38\x19\ \x0d\x6b\x20\x18\x40\xfa\x33\x15\x83\x47\x60\x43\xd0\xf5\x4a\x3d\ \x98\xee\x2a\x0c\x6c\x36\x29\x0e\x3b\x14\x86\x1c\x5e\x1d\x48\x50\ \x4e\xb9\x95\xba\x35\x4d\xf8\xb5\x08\x63\x36\xcb\x3e\x26\x1b\xe2\ \xba\x2e\x0f\x3c\xf0\xc0\xa1\x1b\xf1\x02\x4d\x01\x99\x74\x06\xcf\ \xad\x16\xa2\xc2\xdb\x78\x63\xfd\xe6\xcc\x99\x73\x51\x26\x93\xa9\ \x07\xfe\x28\x20\xf3\xe6\xcd\x7b\x8f\xae\xeb\x42\x51\x14\x67\x78\ \x78\x38\x56\x2e\x55\x26\x7b\x5a\x8a\x4d\xbe\x3f\xc0\xae\x5d\xbb\ \x48\xe4\x0a\xa4\x07\x6c\xb2\x89\x2c\xa3\x07\x8b\xe4\x07\x2d\x4c\ \x4b\xa2\xc8\x0a\x80\x86\x61\xd0\xdc\xdc\xcc\x8e\x1d\x3b\x50\x94\ \x4a\xeb\xb4\x23\x0f\x4d\x1e\x32\x5c\x8b\x70\x2d\x28\x8e\xc6\xb4\ \xfa\x05\xc4\x67\x54\xb1\x73\x70\x03\x75\x91\xc9\x64\xe5\x10\x03\ \x0f\x36\xe0\x14\x0e\xa9\xbc\x23\x49\x2f\x57\x55\x55\xb1\x7a\xf5\ \x6a\xf6\xec\xd9\xc3\x81\x7d\x07\x08\x18\x65\x54\xc7\x21\x56\xef\ \x62\xd7\xd8\xfe\x98\x0e\x2f\x6e\xf3\xbc\xbf\x63\x89\x43\xfe\xfb\ \xe0\xc1\x83\x17\xdb\xb6\x3d\x53\x08\x41\x31\x26\x39\xf5\xf2\x6a\ \xf4\x80\xa0\x30\x52\xc5\xfb\xcf\xb8\x9e\x67\xf6\xfd\x00\x4d\xd5\ \x19\xda\x6e\x30\xb8\xb5\x84\x50\x84\x3f\xed\xe7\x1d\x1e\xb0\xe1\ \xb4\xd3\x4e\xdb\xac\xaa\xaa\x10\x42\x20\x91\x50\x5b\x82\x6c\x10\ \xe1\xa8\xb0\x11\x9e\xd8\xf8\xd4\x21\xde\x88\x1a\x76\x1c\x18\x64\ \xf4\x60\x0c\x47\xe6\xb0\x4d\xe9\x1b\x73\xaf\x16\xcc\x9b\xec\xe6\ \x9a\xae\xdf\x6a\x6d\x49\x07\x45\x55\xe8\xde\x6f\xf3\x95\x5b\xbf\ \xc2\x0f\xd6\x7e\x97\x4b\x5a\x6e\xe1\x8a\x25\x1f\xe4\x53\xcf\x5f\ \x80\x65\x5a\xd8\xa6\x73\x44\x80\x78\x69\xd9\x6f\x7f\xfb\xdb\x24\ \x12\x09\x6e\xbe\xf9\x66\x9e\x7b\xee\x39\xbf\x72\xb1\xbd\xbd\x9d\ \x65\xcb\x96\x11\x0e\x87\xd9\xb6\x6d\x1b\xdb\xb6\x6d\x63\xe9\xd2\ \xa5\x6c\xd8\xb0\xe1\x1d\xbf\xfb\x4f\x02\x92\xcb\xe5\xb6\x36\x37\ \x37\x0f\x1b\x86\x31\x53\x08\x81\xa1\x3b\xb4\x9c\x57\x8d\x9d\x92\ \xa4\x3b\x0c\x1e\xfd\xed\x03\x14\x07\x04\x56\xd1\xa2\x9c\x92\x84\ \xc2\x21\xdf\xf5\x7d\xa7\xf6\x30\x40\x2f\x97\xcb\xc2\xb6\x6d\x10\ \xb0\x35\xb2\x8b\xf7\x9e\x5f\x87\x2b\x5d\x1c\xc3\x45\xb1\x83\x48\ \x4b\xc1\xc9\xab\xe4\x5e\xab\xc1\x29\xaa\x3e\xb1\x98\x4c\x26\xfd\ \xe0\xd1\xa3\x47\x5c\xd7\xa5\xb3\xb3\x53\xbe\xf1\xc6\x1b\xe2\xd1\ \x47\x1f\x65\xdb\xb6\x6d\x15\xc7\x43\x3a\xa8\x9a\xc2\x09\xd5\x4b\ \x38\x98\xdf\xcd\x48\xf9\x20\xff\xeb\xc4\xdb\x09\x6a\x21\x42\xa1\ \x20\x96\x63\x63\x99\xbe\x1a\x7a\x47\x44\x6a\x6a\x6a\x3e\x71\xde\ \x79\xe7\x7d\xa3\xb5\xb5\x95\x47\x1e\x79\x84\x2f\x7f\xf9\xcb\xf4\ \xf5\xf5\xb1\x77\xef\x5e\x72\xb9\x9c\x4f\xd3\xac\x5d\xbb\x96\x99\ \x33\x67\xd2\xd3\xd3\x83\xe3\x38\x8c\x8e\x8e\x1e\x9f\xc1\x01\x95\ \x0d\x21\x38\xe9\xa4\x93\xe8\xee\xea\x66\xe3\xe7\xca\x48\x7b\xcc\ \x21\x16\x1a\x08\x15\xe9\x38\x84\x42\xba\xff\x72\xbc\x12\xfc\x77\ \xba\x01\xbf\x3c\x07\x49\xa0\xae\xd2\xd9\x74\xd6\xa4\x0f\xd3\x32\ \xb9\x85\xa7\x3b\xbe\xc7\xcc\xaa\x53\xd8\x9a\xfa\x0d\xf6\xc6\x30\ \xb6\x75\xa8\x8a\x64\x7c\x1b\x44\xa1\x50\xb8\x63\xff\xfe\xfd\xff\ \x3e\x69\xd2\xa4\x81\x91\x91\x91\xc6\xb5\x6b\xd7\xb2\x68\xd1\x22\ \x36\x6e\xdc\x58\xe9\xc8\x52\x5d\x92\xbf\x2f\xd3\x3b\x75\x90\x7b\ \x36\x3f\x40\x32\x97\xe0\x0b\xbf\xfb\x12\xaa\xa2\xd2\x9d\x05\x2b\ \x7f\x48\x32\x26\x4f\x9e\x7c\xc2\xb4\x69\xd3\x5e\x05\x54\xc7\x71\ \xd8\xb9\x73\xe7\x75\xc9\x64\xb2\xe7\x2d\x6c\x44\xad\xa2\x28\x9a\ \x94\x92\x4d\x9b\x36\x71\xd1\x45\x17\xf9\xe4\xe6\x5b\xd7\xfe\xfd\ \xfb\x7d\x82\x73\xeb\xd6\xad\xc4\xe3\xf1\x63\x07\xc4\xb2\x2c\x6e\ \xbd\xf5\x56\xce\x3b\xef\x3c\x0c\xc3\xe0\xb1\xc7\x1e\xf3\x07\x41\ \x7a\x9e\xd6\x9c\x39\x73\xd8\xbb\x77\xaf\x4f\x0d\x0c\x0c\x0c\x78\ \x37\xb9\x70\xda\xb4\x69\xc9\xde\xde\xde\xb7\xe3\x8a\x1c\x8f\xaf\ \x92\x02\x14\x45\xd0\xbd\xcf\xe6\xdb\x7f\x75\x13\xff\xf0\xe3\x2b\ \xf8\xe2\xc5\xdf\x27\x1e\x6e\x60\x74\x7b\x27\x83\xd6\x08\x86\xe1\ \xf8\x80\x7b\xc4\xe1\xb8\x2a\x49\xa9\xaa\x2a\xb7\xde\x7a\x2b\x9f\ \xff\xfc\xe7\xb9\xff\xfe\xfb\xfd\x09\x3c\xb6\x70\xd1\x8a\x1a\xb1\ \x13\xaa\x28\x65\x2c\x0a\x96\x60\x64\xd9\x56\xe2\xb1\x10\x93\x46\ \xc2\xd8\xf5\x41\xdc\xb2\x8a\x34\x14\xa4\xa9\x84\x91\xe2\x4c\x2f\ \xf7\xfd\xe6\x9b\x6f\x86\xde\x4e\x5d\x8d\x4f\x5b\x2b\xaa\x4a\xb8\ \x4a\x23\x10\x14\x87\x3a\xa6\x04\xd8\x25\x17\xbb\xa4\xf8\x36\x57\ \x51\x14\x4a\xa5\x92\x65\x9a\x66\xe2\x98\x00\x71\x1c\x87\xd9\xb3\ \x67\x93\x4c\x26\x39\xe5\x94\x53\x08\x85\x42\x9c\x72\xca\x29\x44\ \xa3\x51\xa2\xd1\x28\x3f\xf9\xc9\x4f\xa8\xa9\xa9\xe1\xcc\x33\xcf\ \x24\x9d\x4e\xb3\x6e\xdd\x3a\x02\x81\x00\xb6\x6d\xf3\x9e\xf7\xbc\ \xe7\xe1\xf6\xf6\xf6\x1f\xf7\xf6\xf6\xde\x34\x46\x36\x36\x01\xab\ \x01\xd3\x75\xdd\xea\xde\xde\xde\x4a\x01\x32\x60\x5a\x26\xf9\x6c\ \x8c\x3b\xef\xfc\x1a\x3d\xed\x59\x1e\x1f\x7a\x86\x4c\x21\xc3\x2b\ \x7d\x6f\x52\xe8\x0a\x13\xd3\x6b\x3d\xb6\x94\xaa\xaa\x2a\x72\xb9\ \xdc\x84\xd1\x17\x42\x08\xda\xda\xda\xb8\xe9\xa6\x9b\xc8\xe5\x72\ \x54\x55\x55\x55\x66\x09\x0b\x17\xdc\xb1\x39\xed\xc0\x90\x36\xc2\ \xc2\x85\x01\x6c\xcb\x84\xa6\x89\x23\x93\xb2\x9b\xc7\x2a\x1e\x85\ \xf4\x0b\x23\xfe\x18\xf1\xd9\xd0\xd0\x30\xd6\x82\x00\xb5\xe7\x04\ \x59\x7c\x5a\x8c\xa1\xed\x65\x46\xf7\x59\x8c\xee\x36\x71\xed\xb7\ \x75\x04\x0e\x0c\x0e\x0e\xde\x77\x4c\x80\x44\x22\x11\xee\xbc\xf3\ \x4e\xce\x3c\xf3\x4c\xee\xb9\xe7\x1e\x8a\xc5\xe2\x84\xec\x97\x69\ \x9a\xac\x5d\xbb\x16\xdb\xae\x64\xf4\x82\xc1\xa0\xff\xef\xde\xf0\ \x2e\x6f\x8d\x8c\x8c\xac\xba\xf2\xca\x2b\x3f\xef\x51\xf4\x19\xb3\ \x28\x6b\x67\x3b\x42\x26\xc3\xb8\xc9\x00\xd8\x92\x3d\x6f\x76\x32\ \x59\x59\xc0\xa6\x8d\x95\xe9\xdc\xd3\xb4\xe5\x74\xba\x1d\x7e\x8e\ \x3a\x12\x89\x10\x0c\x06\xfd\x91\x47\xe3\xa8\x97\x7b\x86\x87\x87\ \xab\x84\x10\x6e\xb9\x5c\x5e\x2e\x84\xb8\xb8\x5c\x2e\xe3\x28\x2e\ \xa1\x50\x78\xac\xe2\x51\x80\x2a\x31\x4d\x87\x0f\xcc\xbc\x93\xee\ \xec\x2e\x4e\x9a\xb5\x98\xff\x7a\xee\x3e\xec\xba\x76\x1c\x27\x82\ \x65\x5a\xa0\xc8\xb7\x8d\xe2\x03\x81\xc0\x6c\x29\x65\x9d\x37\x48\ \x61\xd1\xa2\x45\x04\x02\x01\xfa\x5e\xeb\x65\xe7\xcb\x12\x49\x00\ \x29\x75\x62\x91\x90\xaf\x52\xbd\x5f\x5e\x61\xc8\x31\xab\x2c\xdb\ \xb6\x83\x8a\xa2\xf0\xda\x6b\xaf\x55\xf4\xa1\x64\xc2\x64\x05\x21\ \xc0\xb6\x1c\x84\x52\x31\xe6\x5e\x2e\xa2\x50\x28\x4c\x18\x2c\xec\ \x3d\x93\x33\x6e\x50\x72\x76\x61\xa7\x98\x79\x46\x14\xcb\x35\x71\ \x1c\x17\x25\x17\xab\xc4\x07\xde\xec\xdc\x31\xf7\xd2\x9b\x17\x2c\ \xa5\xf4\xe9\xfe\xfa\xfa\x7a\x12\x89\x43\xd2\x9f\x4c\x26\xbf\xe6\ \x19\xfb\xc9\x93\x27\x7f\x4c\xd3\xb4\x8b\xa5\x94\x08\x1b\xb0\x5d\ \xff\x50\x00\xcb\x34\x48\xec\x93\x54\x35\x57\xf3\xfc\x9b\x3f\xc6\ \x3a\xf0\x31\x16\x28\x67\x12\x53\x56\xf0\x58\xef\x4f\xc9\x8d\x06\ \x40\x48\x6f\xf0\xd8\x29\x81\x40\xa0\x5e\x55\x55\x51\x2a\x95\x76\ \x5f\x72\xc9\x25\xed\x3d\x3d\x3d\x58\x96\x25\x6b\x6b\x6b\xc5\x3f\ \xff\xf3\x3f\xb3\x78\xf1\x62\xee\xbd\xf7\x5e\xf6\xed\xdb\xc7\xe2\ \xc5\x8b\x79\xf3\xcd\x37\x69\x69\x69\xa1\xbf\xbf\x9f\x74\x3a\xcd\ \x94\x29\x53\x90\x52\xb2\x7d\xfb\x76\xd2\xe9\xf4\xb1\x03\x72\xe0\ \xc0\x81\xbf\xcb\xe5\x72\x9b\xd4\x31\x7e\x22\x1f\x72\x39\xff\x96\ \x3a\xca\x59\x07\xbb\x24\x29\xa5\x5c\xd2\x5d\x16\x99\x03\x36\xee\ \x38\xa0\x26\x4d\x9a\xe4\xd1\x2b\x55\xb1\x58\x6c\x46\x3e\x9f\x3f\ \x00\xb8\xe3\x77\x4a\x38\xa2\x61\x14\x25\x97\xcf\xfc\x27\x26\xd7\ \xd5\xf2\xf3\x3d\xdf\x20\x56\xd7\x48\xa2\xf9\x00\xc9\xf5\x21\x5f\ \xf7\x3a\x8e\x43\x3a\x9d\x46\x4a\x49\x38\x1c\xc6\xb2\x2c\x06\x06\ \x06\xbc\x06\x98\xb7\xf3\x1c\x54\x4d\xd3\x58\xbd\x7a\x35\xf9\x7c\ \x9e\x1f\xff\xf8\xc7\x7e\x02\xad\xae\x5c\x45\x68\xeb\x1c\xee\xda\ \x7c\x17\xaa\x32\x87\xdf\x06\x9e\x20\xa4\xeb\x48\x43\x65\x8a\xb3\ \x98\xa6\x13\xbd\x9d\x26\xb9\xfa\xea\xab\x7f\xee\x55\x8e\xac\x59\ \xb3\x66\xaa\x57\x13\x6c\xdb\xb6\x37\x8f\xd7\x1f\x36\xe3\xba\x2e\ \x93\x27\x4f\x66\xfa\xf4\xe9\x94\xcb\x65\x66\xcc\x98\xc1\xba\x75\ \xeb\xfc\x64\xde\xce\x9d\x3b\x8f\x8f\x97\x65\x59\xd6\xd6\x40\x20\ \x20\x3d\x35\x13\x01\x76\xfe\xc0\x7c\x4b\x6e\x5d\x27\xa4\xeb\xa0\ \x1f\x8a\x62\x35\x4d\xc3\xb2\x2c\x1a\x1b\x1b\xff\x7a\xc9\x92\x25\ \x97\xbd\xf8\xe2\x8b\x5f\xb5\x2c\xeb\xe4\xbe\xbe\xbe\x43\x63\x9d\ \xde\x28\xa0\xb5\x57\xb3\xeb\xc4\x36\xbe\xb1\xf9\x19\x66\x4e\x99\ \x89\x1e\x9f\x42\xdb\xe0\x4e\xf2\x43\xae\x5f\x1c\xf0\x16\xd1\x2f\ \x77\x74\x74\x3c\x34\x96\xf1\x33\x34\x4d\x7b\xe5\xed\x72\x29\xef\ \x7b\xdf\xfb\xa8\xaf\xaf\xe7\xb6\xdb\x6e\xe3\xd1\x47\x1f\xf5\x53\ \xba\xae\x74\x29\x9b\x15\xb5\xbb\x21\xdd\xca\x7b\x6f\x0d\x4f\x18\ \x33\x65\x0d\x07\x30\x7b\xc2\x98\x07\xc3\x38\x39\x8d\xf1\xb3\x54\ \xbc\x2a\x7a\x2f\xc6\xb9\xed\xb6\xdb\x08\x06\x83\x94\x4a\x25\xa4\ \x94\x7c\xff\xfb\xdf\x9f\xd0\x7f\x22\xa5\x64\xf7\xee\xdd\xb4\xb6\ \xb6\x1e\x36\x89\x79\x58\xed\xa7\xae\xeb\xf2\xd1\x8f\x7e\x94\x73\ \xce\x39\x87\x8e\x8e\x0e\x76\xee\xdc\x49\x67\x67\xa7\x3f\xdb\xc3\ \x34\x4d\x4e\x3f\xfd\x74\xf6\xef\xdf\xcf\x9e\x3d\x7b\x98\x3d\x7b\ \x36\x1d\x1d\x1d\x7e\x26\xb1\x5c\x2e\xeb\xe7\x9e\x7b\xee\xbf\x2a\ \x42\x61\x58\x1b\x96\xd3\x4f\x12\xc2\x1c\x0c\x63\x0f\x07\x71\x53\ \x3a\x3b\x36\xee\xa5\x45\x3d\x11\x67\xc4\xa5\x77\x78\x90\x26\x6d\ \x29\xeb\x13\xaf\xfb\xbd\xe0\x5e\x71\xc0\x18\x91\x98\x4f\x24\x12\ \x1f\xfd\x93\x0f\xa5\x69\xbc\xf2\xca\x2b\xac\x59\xb3\x86\x91\x91\ \x11\x8a\xc5\xa2\x3f\x4f\xd7\x2b\x0b\x02\x28\x0a\x93\x68\x6d\x0c\ \x92\xd3\xb8\xfd\x92\x2f\xf1\x8b\xd6\xff\x64\x24\x32\x82\x3d\x37\ \x4b\x62\xb3\x45\xea\xe5\x2a\x54\xad\xd2\xb2\x7d\xd6\x59\x67\xad\ \xf3\x00\x29\x95\x4a\x84\xc3\x95\xf3\x4e\x8a\xb9\x4a\x39\xa9\x50\ \x15\x94\x3f\x5e\xd6\x2c\x01\x91\x4a\xa5\xa2\xc7\x05\x10\x29\x25\ \xd7\x5c\x73\x0d\x5b\xb6\x6c\x61\xd5\xaa\x55\x24\x12\x09\x16\x2d\ \x5a\xc4\xf6\xed\xdb\x69\x68\x68\xa0\xb3\xb3\x93\x9a\x9a\x1a\x16\ \x2e\x5c\x48\x20\x10\xa0\xbd\xbd\xdd\x1f\x00\xe9\xbd\x80\xfe\xfe\ \x7e\x1c\x29\xa9\xbe\x3e\x2f\xc2\x8b\x63\x84\x97\x66\x41\x91\x48\ \x53\xc1\x4e\x6b\xb8\x59\x9d\xd4\xf3\xb5\x38\x96\x8b\xe6\x1e\xaa\ \x2e\xf1\x06\xa7\xd5\xd5\xd5\x4d\x98\x35\xff\x4e\x91\x74\xb1\x58\ \xe4\x9c\x73\xce\xf1\xc1\xf4\xf4\x77\x5d\x5d\x9d\xff\xbd\x4d\x3d\ \x2a\x7b\xbf\x5c\x87\xed\x66\xf8\x9b\xff\xfc\x18\x02\x9d\x40\x40\ \x45\x1a\x51\x14\x5d\x22\x74\x13\xd5\x55\xbc\xd1\xb8\xb3\x0c\xc3\ \xc0\xb6\x6d\xaa\xab\xab\x2b\xc9\x27\x04\xb5\x4b\x82\xe8\x08\xf2\ \x03\x0e\x46\xc6\x9d\x30\x9c\xc8\x93\x0a\x4d\xd3\x44\x36\x9b\x7d\ \xc3\xb6\xed\xf7\x1e\x17\x40\x74\x5d\xe7\xfa\xeb\xaf\xe7\xae\xbb\ \xee\xe2\xeb\x5f\xff\x3a\xdd\xdd\xdd\x13\x4e\xd2\x29\x16\x8b\x74\ \x75\x75\x91\xcb\xe5\xfc\x29\x9d\xaa\xaa\x92\xcd\x66\xa9\xad\xad\ \xc5\x75\x5d\x7f\xc8\xc0\xc0\x03\x36\x6f\x28\xde\xb0\x49\x65\x1c\ \x65\x61\x52\xb4\xda\x68\x69\x69\x99\x90\xb7\xf7\x26\xea\x7c\xf4\ \xa3\x1f\xe5\xfe\xfb\xef\x3f\xac\x32\x23\xc3\x30\x72\xb6\x6d\x77\ \x7b\x75\x5d\x42\x88\xe9\xaa\xaa\x0a\x4f\xda\xfd\x89\xa8\x01\x8d\ \x9a\x49\x31\x2c\xcb\x62\xb4\x6c\x53\x7d\xe9\x10\xb5\x93\x74\x64\ \x49\xc3\x1c\x0a\x52\xec\x08\x40\x22\xea\x57\x4e\x7a\xf1\x47\x28\ \x14\xe2\x13\x9f\xf8\x04\xc9\x64\x92\xed\xdb\xb7\x57\x1a\x95\x6a\ \x2d\x64\xfc\x90\xf3\xe1\xfd\xee\x15\x5d\x94\x4a\xa5\x72\x22\x91\ \x48\x1d\x17\x40\xbc\xb9\x8a\x5f\xf8\xc2\x17\xfe\xe8\x67\xf2\xf9\ \xbc\x4f\xcd\x4b\x29\x19\x3b\xb1\xcc\x67\x81\x87\x86\x86\x2a\x85\ \xda\x93\xe3\xa8\xaa\x4a\x5b\x5b\x9b\x3f\xcb\x77\xf1\xe2\xc5\x63\ \xd4\x47\xd9\xcf\x71\xd8\xb6\xed\xe7\x0e\xaa\xaa\xaa\xfc\xf9\x86\ \x87\x03\x48\x3a\x9d\x7e\x70\xd3\xa6\x4d\x0f\x7a\x3f\x4f\x9f\x3e\ \xdd\x52\x55\x55\x3b\xf1\xc4\x13\xe9\xee\xee\xf6\x81\xf6\x5e\x98\ \x61\x18\x24\x7b\x46\x68\x5e\x7f\x12\xd9\x52\x89\x75\xeb\xd6\xa1\ \x69\x1a\xcb\x97\x2f\xa7\xa1\xa1\x81\x36\xa5\x7d\xc2\x09\x3a\x17\ \x5e\x78\x21\x93\x27\x4f\xe6\x33\x9f\xf9\x0c\x8f\x3c\xf2\x08\x2f\ \xbf\xfc\x32\x97\x5d\x76\x19\xbf\xff\xfd\xef\x99\x35\x6b\x16\xc3\ \xc3\xc3\x6c\xd8\xb0\x81\x58\x2c\xc6\xa9\xa7\x9e\xca\x73\xcf\x3d\ \x77\xd8\xa4\xe5\x61\x01\x92\x4a\xa5\xfe\x2a\x9b\xcd\x7e\xa3\xa6\ \xa6\x66\x51\x3e\x9f\xf7\xcb\xfb\xc7\x1b\xaa\xb7\x1a\x2d\xaf\xc8\ \xce\x03\xc5\xcb\x75\x54\x55\x55\x11\x08\x04\x68\x6a\x6a\x62\xe6\ \xcc\x99\x0c\x0c\x0c\x4c\x38\xbf\x6a\xfc\x31\x42\xe3\x8b\x93\xbd\ \xe1\x68\x47\x93\xfc\x92\x52\xb2\x6e\xdd\x3a\x86\x86\x86\x58\xbd\ \x7a\xb5\x1f\x4b\x79\x52\xe8\x79\x4a\xb6\x6d\x93\x4e\xa7\x39\xeb\ \xac\xb3\x88\xc5\x62\x24\x93\x49\x32\x99\xcc\xf8\x53\xd8\x30\x4d\ \x93\xbd\x7b\xf7\x72\xdb\x6d\xb7\x31\x32\x32\xc2\x8e\x1d\x3b\x70\ \x5d\x97\xed\xdb\xb7\xe3\xba\x2e\xfb\xf6\xed\xf3\x9f\xbd\x50\x28\ \x30\x38\x38\x78\x44\x2c\xf2\x61\x01\x92\x4c\x26\x7f\x5d\x57\x57\ \xf7\xcf\xcb\x97\x2f\x67\xf5\xea\xd5\x74\x77\x77\xb3\x79\xf3\x66\ \xba\xba\xba\x08\x04\x02\x7e\xe2\x7e\xea\xd4\xa9\xf4\xf5\xf5\x91\ \x4e\xa7\x99\x34\x69\x92\xcf\x7e\xfa\x25\x3d\xe3\x62\x10\xcb\xb2\ \xc8\xe7\xf3\x3e\x4d\x3e\xfe\x40\xb1\xf1\xb3\x73\xc7\x5e\x5c\x6a\ \x60\x60\xe0\xe1\xb1\xff\x37\x72\x34\x65\x45\x86\x61\xf8\x52\x9b\ \xcd\x66\xfd\xef\x1f\x3f\x74\xdf\x53\x4b\xa6\x69\xfa\x07\xd5\x78\ \xff\xe6\xfd\xbd\xe3\x38\x8c\x8c\x8c\xb0\x6a\xd5\x2a\x5f\x9d\x43\ \xa5\x9b\xca\x1f\xeb\x31\x6e\x83\xbe\xf1\xc6\x1b\x47\xd4\x42\x77\ \x24\xe7\x87\x88\x8f\x7f\xfc\xe3\x0c\x0e\x0e\x72\xed\xb5\xd7\x62\ \xdb\x36\xb3\x67\xcf\xa6\xaf\xaf\x0f\xdb\xb6\xa9\xab\xab\x43\x88\ \x4a\x32\xa6\xbd\xbd\x9d\x62\xb1\x48\x30\x18\xf4\x87\x24\x7b\xb6\ \xc0\x03\xc4\x2f\xf3\x19\x03\x61\xbc\x24\xa9\xaa\x4a\x20\x10\xa0\ \xa5\xa5\xc5\x9b\x3b\x35\xd2\xd9\xd9\xf9\x89\xf1\x31\xce\xf8\xa0\ \xf0\x70\x54\xee\xc5\x17\x5f\xcc\x45\x17\x5d\xc4\xf0\xf0\xb0\xaf\ \xb2\x42\xa1\x90\x9f\x8a\xf6\xd4\xd7\x78\x06\x60\xfc\xe1\x67\xde\ \xdc\xc9\xb1\x0d\x3a\x81\x4c\x1c\x5f\x72\xfb\x76\xd7\x96\x52\x76\ \x49\x29\xfb\x8e\x37\x20\x7c\xfa\xd3\x9f\xe6\xa1\x87\x1e\xe2\x67\ \x3f\xfb\x19\x2f\xbe\xf8\xe2\x84\xf3\x38\x0c\xc3\x20\x1a\x8d\xfa\ \x65\xf7\xde\x91\x0e\x9e\x6b\xec\xa9\xaf\xf1\xa7\xd5\x8c\x3f\xe9\ \xcd\x7b\x21\xc5\x62\xd1\xaf\x32\x9c\x3a\x75\x2a\xe9\x74\xfa\x0f\ \x28\x87\x23\x01\xc3\x53\x59\x4d\x4d\x4d\xec\xde\xbd\xdb\x57\xb7\ \x9e\x5a\x1c\x7f\x14\x85\xf7\x67\x4f\x5a\xc7\xd7\x54\x99\xa6\x89\ \xae\xeb\xdc\x71\xc7\x1d\x5c\x70\xc1\x05\xfc\xfb\xbf\xff\xbb\x97\ \x5f\xf7\x59\x6b\x4f\xda\xab\xaa\xaa\xfc\x41\x3d\x52\x4a\x76\xed\ \xda\x35\xeb\xb8\x4b\x88\xe7\x3e\x7e\xf0\x83\x1f\x9c\xd0\x53\x38\ \xde\x76\x78\x93\x9d\x3d\xfd\x59\x55\x55\xe5\x9f\xc4\xe3\xed\x4a\ \x8f\x23\xf2\xa4\xc5\xf3\x46\x3c\x8a\xc4\xa3\xab\xa5\x94\x13\x06\ \x80\x1d\xcb\x4a\xa7\xd3\xb3\x36\x6f\xde\x8c\x65\x59\xf9\xe9\xd3\ \xa7\x77\xac\x5a\xb5\xaa\xf6\x53\x9f\xfa\x14\x37\xdf\x7c\x33\xf9\ \x7c\xde\xef\xb5\xf7\xdc\x5a\x6f\x1c\xdf\x78\x9a\x3f\x93\xc9\x30\ \x7b\xf6\x6c\xce\x3d\xf7\x5c\xa2\xd1\x28\xe7\x9c\x73\x0e\x1b\x36\ \x6c\xe0\x92\x4b\x2e\x61\xcd\x9a\x35\x5c\x79\xe5\x95\x3c\xf9\xe4\ \x93\x2c\x5c\xb8\x90\xda\xda\x5a\x54\x55\xe5\xb9\xe7\x9e\x9b\x30\ \xed\xfa\x78\x03\xf2\x8b\xee\xee\xee\xed\x8a\xa2\xac\x7e\xef\x7b\ \xdf\x8b\xa2\x28\xb4\xb7\xb7\x63\x18\x86\x3f\xef\xdc\xd3\xb7\x35\ \x35\x35\x38\x8e\xe3\x0f\x94\x14\x42\x90\x4a\xa5\xfc\x01\xf5\xe3\ \x55\x96\x57\xd1\xe7\x7d\x6e\xac\x5a\x5d\x3a\x8e\x23\x6c\xdb\xa6\ \xa6\xa6\x86\x4c\x26\xa3\x1e\x23\x20\x7e\x4e\xa3\x58\x2c\xba\x77\ \xdd\x75\x17\xeb\xd7\xaf\xa7\xba\xba\x9a\x91\x91\x11\x7f\xf7\x8f\ \xb7\x67\x1e\xd5\x32\x3e\xd1\x96\xcb\xe5\xd8\xbd\x7b\x37\x0d\x0d\ \x0d\x78\xe7\xff\x7a\x6e\xef\xd6\xad\x5b\xb1\x6d\x9b\x9e\x9e\x1e\ \x7f\x10\x8d\x37\x92\xe9\x88\xec\xdd\xe1\x7c\xc8\x3b\xab\x0f\xd0\ \x96\x2c\x59\x62\x6d\xdd\xba\x95\x9d\x3b\x77\xd2\xd3\xd3\xc3\x96\ \x2d\x5b\x58\xb1\x62\x05\x3f\xf8\xc1\x0f\xb8\xf0\xc2\x0b\xe9\xec\ \xec\x24\x18\x0c\x32\x7f\xfe\x7c\x3a\x3b\x3b\x79\xf5\xd5\x57\xc9\ \x66\xb3\x0c\x0f\x0f\x13\x8b\xc5\xfc\x72\x53\xef\x04\x01\x29\xa5\ \xdf\x9f\x18\x8d\x46\xfd\x03\x51\x06\x07\x07\x13\xe5\x72\xf9\xe5\ \x31\xbb\xb4\x67\x64\x64\xe4\x8b\x1c\x87\x75\xe2\x89\x27\x26\x6e\ \xbc\xf1\xc6\xfa\x4b\x2f\xbd\x94\xcf\x7e\xf6\xb3\x0c\x0c\x0c\x50\ \x2a\x95\xfc\x39\xed\x85\x42\x81\x29\x53\xa6\xf8\x07\x9c\x79\xe3\ \x6f\x55\x55\x65\xe6\xcc\x99\x64\x32\x19\xff\xb4\x87\xb7\x73\x1e\ \xde\x9a\xe6\xb5\x6d\x9b\x37\xdf\x7c\x53\x1c\x57\x09\x19\xa7\xc3\ \x15\xcb\xb2\x78\xf8\xe1\x87\xb9\xec\xb2\xcb\x78\xf4\xd1\x47\x49\ \x26\x93\xbc\xf0\xc2\x0b\x48\x29\x19\x1c\x1c\x64\xdf\xbe\x7d\x95\ \xb3\xa5\xba\xbb\x27\x64\x10\xbd\xc1\x2f\xe9\x74\x9a\x60\x30\xe8\ \x0f\x4f\x8e\x46\xa3\xbe\xd4\x18\x86\xc1\xec\xd9\xb3\xc9\x64\x32\ \x28\x8a\xb2\x2b\x93\xc9\x5c\xc3\x71\x5e\xba\xae\xf3\xe4\x93\x4f\ \xf2\xd4\x53\x4f\x91\xcf\xe7\x99\x3c\x79\x32\x9f\xfd\xec\x67\xf9\ \xf4\xa7\x3f\xed\x0f\x4b\xf3\x46\x8d\xc7\xe3\x71\x9f\x36\xff\xfc\ \xe7\x3f\xcf\x7d\xf7\xdd\xe7\x8f\x05\xf4\xc8\x4e\x4f\x9d\xd7\xd5\ \xd5\x91\xcf\xe7\x49\xa5\x52\x34\x36\x36\x32\x3c\x3c\x8c\x65\x59\ \x7d\x8a\xa2\xe4\xff\x2c\x2a\xcb\x5b\xa1\x50\x88\x7b\xee\xb9\x87\ \xfb\xee\xbb\xcf\x8f\x13\x3c\x75\xd3\xdd\xdd\x8d\x10\xc2\x1f\x76\ \xe6\xdd\x6c\x55\x55\x95\x4f\xc5\x7b\x27\x2d\x7b\xb6\xc6\x3b\xef\ \xd0\x8b\x3f\xf6\xec\xd9\x73\x5c\xfb\x00\xdf\xba\x46\x47\x47\x5f\ \x09\x87\xc3\xd5\x63\xf7\xb6\xf2\x1b\xdf\xf8\x46\xe4\xb5\xd7\x5e\ \xf3\xef\xcd\xb3\x61\xe3\x8f\xde\x70\x5d\x97\x55\xab\x56\x31\x75\ \xea\x54\x96\x2d\x5b\xc6\xd3\x4f\x3f\x4d\x75\x75\x35\x5b\xb6\x6c\ \xa1\xb9\xb9\x99\xad\x5b\xb7\xb2\x72\xe5\x4a\x0a\x85\x02\xed\xed\ \xed\x1c\x3c\x78\x90\x68\x34\x4a\x6f\x6f\xef\xe5\xdd\xdd\xdd\xdb\ \x8f\xa4\x0b\xe0\x48\xad\xa5\x36\x73\xe6\xcc\x27\xa3\xd1\x68\x40\ \xd7\xf5\x0b\x4f\x3c\xf1\x44\x42\xa1\x10\xaa\xaa\xb2\x6f\xdf\x3e\ \xbf\x05\x39\x1c\x0e\xfb\xd3\x0d\x42\xa1\x90\x7f\x1c\x50\x26\x93\ \x21\x97\xcb\x4d\xe0\xb9\x82\xc1\x20\x17\x5e\x78\x21\x2f\xbf\xfc\ \x32\xde\xbc\x75\x21\x04\xed\xed\xed\xaf\x0f\x0c\x0c\x9c\x79\x3c\ \xc1\x18\xb3\x47\xfe\xcf\x73\xe7\xce\xdd\x7b\xed\xb5\xd7\xce\xbb\ \xf9\xe6\x9b\xb9\xfc\xf2\xcb\x59\xb6\x6c\x19\x2d\x2d\x2d\x3c\xf9\ \xe4\x93\xbe\x6a\xfd\x87\x7f\xf8\x07\x1e\x79\xe4\x11\x0c\xc3\xe0\ \x86\x1b\x6e\x60\xd3\xa6\x4d\x8c\x8e\x8e\xfa\xae\xb0\xd7\x93\xa2\ \xeb\xba\x7f\x0c\x47\x30\x18\x44\xd7\x75\xba\xbb\xbb\x97\x1d\x3c\ \x78\x70\xcb\x91\xdc\xe3\x91\xf6\x9f\xd9\x5d\x5d\x5d\x97\xb7\xb6\ \xb6\x5e\x17\x0a\x85\x38\xf3\xcc\x33\xf9\xc2\x17\xbe\x40\x34\x1a\ \xa5\xb9\xb9\x99\xa9\x53\xa7\x52\x55\x55\xc5\x8c\x19\x33\xa8\xa9\ \xa9\xa1\xae\xae\x8e\x48\x24\x82\xeb\xba\x7e\xa0\xb5\x72\xe5\x4a\ \x02\x81\x00\x0d\x0d\x0d\xc4\xe3\x71\x82\xc1\x20\xd1\x68\x94\xb1\ \x81\xcd\x6f\x6e\xdf\xbe\xfd\x9f\xb6\x6f\xdf\xfe\x19\xe0\x2b\xc7\ \x5b\x3a\xc6\x83\x31\x26\xa1\xca\x0b\x2f\xbc\xc0\x25\x97\x5c\x82\ \x10\x82\x7b\xee\xb9\xc7\x3f\x9f\x70\xc3\x86\x0d\x18\x86\xc1\x15\ \x57\x5c\x41\x22\x91\x40\xd7\x75\x7e\xf9\xcb\x5f\xd2\xdd\xdd\x4d\ \xa1\x50\xa0\x58\x2c\x4e\x38\x39\xc7\x73\xed\x43\xa1\x90\xf4\xcb\ \x91\x5c\xf7\x88\xdd\xc3\xa3\x3a\x2d\x5a\xd3\xb4\x80\x10\x82\x07\ \x1f\x7c\x90\x87\x1e\x7a\xe8\x0f\xfe\xfd\xe0\xc1\x83\x7f\xd4\x39\ \x78\xab\x1b\xeb\x38\x0e\xbf\xfd\xed\x6f\xbd\x49\x6f\xbb\x72\xb9\ \xdc\x7f\x02\xef\x58\xb6\x7f\x3c\x56\x36\x9b\x95\x9e\x8a\x12\x42\ \xf0\xfc\xf3\xcf\x73\xfa\xe9\xa7\xb3\x63\xc7\x0e\x36\x6c\xd8\x40\ \x7d\x7d\x3d\x0f\x3e\xf8\x20\x0f\x3e\xf8\x20\x77\xdf\x7d\xb7\xef\ \xba\x7b\x41\xaf\x77\xc2\x90\x97\xbe\x1e\x53\x6f\xa2\xad\xad\xed\ \x5c\xc7\x71\xf6\xd4\xd5\xd5\x25\x8e\xf4\x9e\x8e\xca\xc1\x0f\x85\ \x42\xb1\xba\xba\xba\xcf\x85\x42\x21\x17\xb8\x2a\x1c\x0e\x2f\x3e\ \x1c\xbd\xef\x4d\x7a\xf0\xa6\x3c\x7b\x99\x36\xcf\xdf\x4f\xa7\xd3\ \x6b\x7a\x7a\x7a\xde\xcf\x5f\x7e\x45\x16\x2f\x5e\x5c\xf0\xfa\x45\ \x74\x5d\x67\xe9\xd2\xa5\xec\xdb\xb7\xcf\x77\xc9\x0f\xc7\x7d\xf5\ \xf8\xb1\xed\xdb\xb7\xcf\x2d\x14\x0a\xed\x47\x73\x23\xc7\x3c\x38\ \xaa\xb9\xb9\xf9\xe7\x4d\x4d\x4d\xd7\x1e\xce\x5c\x73\x0f\x14\x8f\ \xce\xae\xa9\xa9\xa1\x50\x28\x14\x5b\x5b\x5b\xa3\xe3\xbe\x8f\xfe\ \xfe\xfe\xbf\x34\x20\xc1\x96\x96\x96\x6d\xf1\x78\x5c\xe4\xf3\xf9\ \x48\x63\x63\xe3\x09\x52\x4a\x0a\x85\xc2\x61\x8f\x90\x2d\x14\x0a\ \x98\xa6\x79\x9f\xeb\xba\x56\xb9\x5c\xfe\x8f\xa1\xa1\xa1\xd1\xff\ \x2b\x80\x00\x81\xb1\xef\x09\x86\x42
codeparrot/github-code-clean
from __future__ import print_function __author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>" __date__ = "$Jul 30, 2014 19:35:11 EDT$" import imp import nose import nose.plugins import nose.plugins.attrib import numpy import scipy import scipy.spatial import scipy.spatial.distance import scipy.stats import nanshe.util.iters import nanshe.util.xnumpy import nanshe.imp.segment import nanshe.syn.data has_spams = False try: imp.find_module("spams") has_spams = True except ImportError: pass class TestSegment(object): def test_remove_zeroed_lines_1(self): a = numpy.ones((1, 100, 101)) erosion_shape = [21, 1] dilation_shape = [1, 3] r = numpy.array([[0, 0, 0], [a.shape[1]-2, 3, 4]]).T.copy() print(r) ar = a.copy() for each_r in r: nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0 b = nanshe.imp.segment.remove_zeroed_lines(ar, erosion_shape=erosion_shape, dilation_shape=dilation_shape) assert (a == b).all() def test_remove_zeroed_lines_2(self): a = numpy.ones((1, 100, 101)) erosion_shape = [21, 1] dilation_shape = [1, 3] r = numpy.array([[0, 0, 0], [1, 3, 4]]).T.copy() print(r) ar = a.copy() for each_r in r: nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0 b = nanshe.imp.segment.remove_zeroed_lines(ar, erosion_shape=erosion_shape, dilation_shape=dilation_shape) assert (a == b).all() def test_remove_zeroed_lines_3(self): a = numpy.ones((1, 100, 101)) p = 0.2 erosion_shape = [21, 1] dilation_shape = [1, 3] nr = numpy.random.geometric(p) r = numpy.array([numpy.repeat(0, nr), numpy.random.random_integers(1, a.shape[1] - 2, nr)]).T.copy() print(r) ar = a.copy() for each_r in r: nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0 b = nanshe.imp.segment.remove_zeroed_lines(ar, erosion_shape=erosion_shape, dilation_shape=dilation_shape) assert (a == b).all() def test_remove_zeroed_lines_4(self): a = numpy.ones((1, 100, 101)) erosion_shape = [21, 1] dilation_shape = [1, 3] r = numpy.array([[0, 0, 0], [a.shape[1], 0, 4]]).T.copy() print(r) ar = a.copy() for each_r in r: nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0 b = nanshe.imp.segment.remove_zeroed_lines(ar, dilation_shape=dilation_shape, erosion_shape=erosion_shape) assert (a == b).all() def test_remove_zeroed_lines_5(self): a = numpy.ones((1, 100, 101)) erosion_shape = [21, 1] dilation_shape = [1, 3] r = numpy.array([[0, 0, 0, 0], [a.shape[1], a.shape[1]-1, 0, 1]]).T.copy() print(r) ar = a.copy() for each_r in r: nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0 b = nanshe.imp.segment.remove_zeroed_lines(ar, dilation_shape=dilation_shape, erosion_shape=erosion_shape) assert (a == b).all() def test_remove_zeroed_lines_6(self): a = numpy.repeat(numpy.arange(100)[None].T, 101, axis=1)[None].astype(float) erosion_shape = [21, 1] dilation_shape = [1, 3] r = numpy.array([[0, 0, 0], [1, 3, 4]]).T.copy() print(r) ar = a.copy() for each_r in r: nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0 b = nanshe.imp.segment.remove_zeroed_lines(ar, erosion_shape=erosion_shape, dilation_shape=dilation_shape) assert numpy.allclose(a, b, rtol=0, atol=1e-13) def test_remove_zeroed_lines_7(self): a = numpy.repeat(numpy.arange(100)[None], 101, axis=0)[None].astype(float) a[0, :, 0] = 1 nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(a, 0, 0), -1, 0)[:] = 1 erosion_shape = [21, 1] dilation_shape = [1, 3] r = numpy.array([[0, 0, 0, 0], [0, 2, 3, 4]]).T.copy() print(r) ar = a.copy() for each_r in r: nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0 b = nanshe.imp.segment.remove_zeroed_lines(ar, erosion_shape=erosion_shape, dilation_shape=dilation_shape) assert numpy.allclose(a, b, rtol=0, atol=1e-13) def test_remove_zeroed_lines_8(self): a = numpy.ones((1, 100, 101)) erosion_shape = [21, 1] dilation_shape = [1, 3] r = numpy.array([[0, 0, 0], [a.shape[1]-2, 3, 4]]).T.copy() print(r) ar = a.copy() for each_r in r: nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0 b = numpy.zeros_like(a) nanshe.imp.segment.remove_zeroed_lines(ar, erosion_shape=erosion_shape, dilation_shape=dilation_shape, out=b) assert (a == b).all() def test_remove_zeroed_lines_9(self): a = numpy.ones((1, 100, 101)) erosion_shape = [21, 1] dilation_shape = [1, 3] r = numpy.array([[0, 0, 0], [a.shape[1]-2, 3, 4]]).T.copy() print(r) ar = a.copy() for each_r in r: nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0 b = ar nanshe.imp.segment.remove_zeroed_lines(b, erosion_shape=erosion_shape, dilation_shape=dilation_shape, out=b) assert (a == b).all() @nose.plugins.attrib.attr("3D") def test_remove_zeroed_lines_10(self): a = numpy.ones((1, 100, 101, 102)) erosion_shape = [21, 1, 1] dilation_shape = [1, 3, 1] r = numpy.array([[0, 0, 0], [a.shape[1]-2, 3, 4], [0, 0, 0]]).T.copy() print(r) ar = a.copy() for each_r in r: nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1]), -1, each_r[-2])[:] = 0 b = ar nanshe.imp.segment.remove_zeroed_lines(b, erosion_shape=erosion_shape, dilation_shape=dilation_shape, out=b) assert (a == b).all() def test_estimate_f0_1(self): spatial_smoothing_gaussian_filter_stdev = 5.0 spatial_smoothing_gaussian_filter_window_size = 5.0 which_quantile = 0.5 temporal_smoothing_gaussian_filter_stdev = 5.0 temporal_smoothing_gaussian_filter_window_size = 5.0 half_window_size = 20 a = numpy.ones((100, 101, 102)) b = nanshe.imp.segment.estimate_f0( a, spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev, spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size, which_quantile=which_quantile, temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev, temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size, half_window_size=half_window_size ) assert (b == a).all() def test_estimate_f0_1b(self): spatial_smoothing_gaussian_filter_stdev = 5.0 spatial_smoothing_gaussian_filter_window_size = 5.0 which_quantile = 0.5 temporal_smoothing_gaussian_filter_stdev = 5.0 temporal_smoothing_gaussian_filter_window_size = 5.0 half_window_size = 20 a = numpy.ones((100, 101, 102)) b = a.copy() nanshe.imp.segment.estimate_f0( a, spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev, spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size, which_quantile=which_quantile, temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev, temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size, half_window_size=half_window_size, out=b ) assert (b == a).all() def test_estimate_f0_1c(self): spatial_smoothing_gaussian_filter_stdev = 5.0 spatial_smoothing_gaussian_filter_window_size = 5.0 which_quantile = 0.5 temporal_smoothing_gaussian_filter_stdev = 5.0 temporal_smoothing_gaussian_filter_window_size = 5.0 half_window_size = 20 a = numpy.ones((100, 101, 102)) b = a.copy() nanshe.imp.segment.estimate_f0( b, spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev, spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size, which_quantile=which_quantile, temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev, temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size, half_window_size=half_window_size, out=b ) assert (b == a).all() def test_estimate_f0_2(self): spatial_smoothing_gaussian_filter_stdev = 5.0 spatial_smoothing_gaussian_filter_window_size = 5.0 which_quantile = 0.5 temporal_smoothing_gaussian_filter_stdev = 5.0 temporal_smoothing_gaussian_filter_window_size = 5.0 half_window_size = 49 mean = 0.0 stdev = 1.0 a = numpy.random.normal(mean, stdev, (100, 101, 102)) b = nanshe.imp.segment.estimate_f0( a, spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev, spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size, which_quantile=which_quantile, temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev, temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size, half_window_size=half_window_size ) # Seems to be basically 2 orders of magnitude in reduction. However, it may be a little above exactly two. # Hence, multiplication by 99 instead of 100. assert ((99.0*b.std()) < a.std()) @nose.plugins.attrib.attr("3D") def test_estimate_f0_3(self): spatial_smoothing_gaussian_filter_stdev = 5.0 spatial_smoothing_gaussian_filter_window_size = 5.0 which_quantile = 0.5 temporal_smoothing_gaussian_filter_stdev = 5.0 temporal_smoothing_gaussian_filter_window_size = 5.0 half_window_size = 20 a = numpy.ones((100, 101, 102, 103)) b = nanshe.imp.segment.estimate_f0( a, spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev, spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size, which_quantile=which_quantile, temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev, temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size, half_window_size=half_window_size ) assert (b == a).all() @nose.plugins.attrib.attr("3D") def test_estimate_f0_4(self): spatial_smoothing_gaussian_filter_stdev = 5.0 spatial_smoothing_gaussian_filter_window_size = 5.0 which_quantile = 0.5 temporal_smoothing_gaussian_filter_stdev = 5.0 temporal_smoothing_gaussian_filter_window_size = 5.0 half_window_size = 49 mean = 0.0 stdev = 1.0 a = numpy.random.normal(mean, stdev, (100, 101, 102, 103)) b = nanshe.imp.segment.estimate_f0( a, spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev, spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size, which_quantile=which_quantile, temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev, temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size, half_window_size=half_window_size ) # Seems to be basically 2 orders of magnitude in reduction. However, it may be a little above exactly two. # Hence, multiplication by 99 instead of 100. assert ((99.0*b.std()) < a.std()) def test_extract_f0_1(self): spatial_smoothing_gaussian_filter_stdev = 5.0 spatial_smoothing_gaussian_filter_window_size = 5.0 which_quantile = 0.5 temporal_smoothing_gaussian_filter_stdev = 5.0 temporal_smoothing_gaussian_filter_window_size = 5.0 half_window_size = 20 bias = 100 a = numpy.ones((100, 101, 102)) b = nanshe.imp.segment.extract_f0( a, spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev, spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size, which_quantile=which_quantile, temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev, temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size, half_window_size=half_window_size, bias=bias ) assert (b == 0).all() def test_extract_f0_1b(self): spatial_smoothing_gaussian_filter_stdev = 5.0 spatial_smoothing_gaussian_filter_window_size = 5.0 which_quantile = 0.5 temporal_smoothing_gaussian_filter_stdev = 5.0 temporal_smoothing_gaussian_filter_window_size = 5.0 half_window_size = 20 bias = 100 a = numpy.ones((100, 101, 102)) b = a.copy() nanshe.imp.segment.extract_f0( a, spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev, spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size, which_quantile=which_quantile, temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev, temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size, half_window_size=half_window_size, bias=bias, out=b ) assert (b == 0).all() def test_extract_f0_1c(self): spatial_smoothing_gaussian_filter_stdev = 5.0 spatial_smoothing_gaussian_filter_window_size = 5.0 which_quantile = 0.5 temporal_smoothing_gaussian_filter_stdev = 5.0 temporal_smoothing_gaussian_filter_window_size = 5.0 half_window_size = 20 bias = 100 a = numpy.ones((100, 101, 102)) b = a.copy() nanshe.imp.segment.extract_f0( b, spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev, spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size, which_quantile=which_quantile, temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev, temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size, half_window_size=half_window_size, bias=bias, out=b ) assert (b == 0).all() def test_extract_f0_2(self): spatial_smoothing_gaussian_filter_stdev = 5.0 spatial_smoothing_gaussian_filter_window_size = 5.0 which_quantile = 0.5 temporal_smoothing_gaussian_filter_stdev = 5.0 temporal_smoothing_gaussian_filter_window_size = 5.0 half_window_size = 49 bias = 100 mean = 0.0 stdev = 1.0 a = numpy.random.normal(mean, stdev, (100, 101, 102)) b = nanshe.imp.segment.extract_f0( a, spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev, spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size, which_quantile=which_quantile, temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev, temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size, half_window_size=half_window_size, bias=bias ) # Seems to be basically 2 orders of magnitude in reduction. However, it may be a little above exactly two. # Hence, multiplication by 99 instead of 100. assert ((99.0*b.std()) < a.std()) # Turns out that a difference greater than 0.1 will be over 10 standard deviations away. assert (((a - 100.0*b) < 0.1).all()) @nose.plugins.attrib.attr("3D") def test_extract_f0_3(self): spatial_smoothing_gaussian_filter_stdev = 5.0 spatial_smoothing_gaussian_filter_window_size = 5.0 which_quantile = 0.5 temporal_smoothing_gaussian_filter_stdev = 5.0 temporal_smoothing_gaussian_filter_window_size = 5.0 half_window_size = 20 bias = 100 a = numpy.ones((100, 101, 102, 103)) b = nanshe.imp.segment.extract_f0( a, spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev, spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size, which_quantile=which_quantile, temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev, temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size, half_window_size=half_window_size, bias=bias ) assert (b == 0).all() @nose.plugins.attrib.attr("3D") def test_extract_f0_4(self): spatial_smoothing_gaussian_filter_stdev = 5.0 spatial_smoothing_gaussian_filter_window_size = 5.0 which_quantile = 0.5 temporal_smoothing_gaussian_filter_stdev = 5.0 temporal_smoothing_gaussian_filter_window_size = 5.0 half_window_size = 49 bias = 100 mean = 0.0 stdev = 1.0 a = numpy.random.normal(mean, stdev, (100, 101, 102, 103)) b = nanshe.imp.segment.extract_f0( a, spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev, spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size, which_quantile=which_quantile, temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev, temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size, half_window_size=half_window_size, bias=bias ) # Seems to be basically 2 orders of magnitude in reduction. However, it may be a little above exactly two. # Hence, multiplication by 99 instead of 100. assert ((99.0*b.std()) < a.std()) # Turns out that a difference greater than 0.1 will be over 10 standard deviations away. assert (((a - 100.0*b) < 0.1).all()) def test_preprocess_data_1(self): ## Does NOT test accuracy. config = { "normalize_data" : { "renormalized_images" : { "ord" : 2 } }, "extract_f0" : { "spatial_smoothing_gaussian_filter_stdev" : 5.0, "spatial_smoothing_gaussian_filter_window_size" : 5.0, "which_quantile" : 0.5, "temporal_smoothing_gaussian_filter_stdev" : 5.0, "temporal_smoothing_gaussian_filter_window_size" : 5.0, "half_window_size" : 20, "bias" : 100 }, "remove_zeroed_lines" : { "erosion_shape" : [ 21, 1 ], "dilation_shape" : [ 1, 3 ] }, "wavelet.transform" : { "scale" : [ 3, 4, 4 ] } } space = numpy.array([100, 100, 100]) radii = numpy.array([5, 6]) magnitudes = numpy.array([15, 16]) points = numpy.array([[20, 30, 24], [70, 59, 65]]) masks = nanshe.syn.data.generate_hypersphere_masks(space, points, radii) images = nanshe.syn.data.generate_gaussian_images(space, points, radii/3.0, magnitudes) * masks image_stack = images.max(axis=0) nanshe.imp.segment.preprocess_data(image_stack, **config) def test_preprocess_data_2(self): ## Does NOT test accuracy. config = { "normalize_data" : { "renormalized_images" : { "ord" : 2 } }, "remove_zeroed_lines" : { "erosion_shape" : [ 21, 1 ], "dilation_shape" : [ 1, 3 ] }, "wavelet.transform" : { "scale" : [ 3, 4, 4 ] } } space = numpy.array([100, 100, 100]) radii = numpy.array([5, 6]) magnitudes = numpy.array([15, 16]) points = numpy.array([[20, 30, 24], [70, 59, 65]]) masks = nanshe.syn.data.generate_hypersphere_masks(space, points, radii) images = nanshe.syn.data.generate_gaussian_images(space, points, radii/3.0, magnitudes) * masks image_stack = images.max(axis=0) nanshe.imp.segment.preprocess_data(image_stack, **config) def test_preprocess_data_3(self): ## Does NOT test accuracy. config = { "normalize_data" : { "renormalized_images" : { "ord" : 2 } }, "extract_f0" : { "spatial_smoothing_gaussian_filter_stdev" : 5.0, "spatial_smoothing_gaussian_filter_window_size" : 5.0, "which_quantile" : 0.5, "temporal_smoothing_gaussian_filter_stdev" : 5.0, "temporal_smoothing_gaussian_filter_window_size" : 5.0, "half_window_size" : 20, "bias" : 100 }, "wavelet.transform" : { "scale" : [ 3, 4, 4 ] } } space = numpy.array([100, 100, 100]) radii = numpy.array([5, 6]) magnitudes = numpy.array([15, 16]) points = numpy.array([[20, 30, 24], [70, 59, 65]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks image_stack = images.max(axis=0) nanshe.imp.segment.preprocess_data(image_stack, **config) def test_preprocess_data_4(self): ## Does NOT test accuracy. config = { "normalize_data" : { "renormalized_images" : { "ord" : 2 } }, "extract_f0" : { "spatial_smoothing_gaussian_filter_stdev" : 5.0, "spatial_smoothing_gaussian_filter_window_size" : 5.0, "which_quantile" : 0.5, "temporal_smoothing_gaussian_filter_stdev" : 5.0, "temporal_smoothing_gaussian_filter_window_size" : 5.0, "half_window_size" : 20, "bias" : 100 }, "remove_zeroed_lines" : { "erosion_shape" : [ 21, 1 ], "dilation_shape" : [ 1, 3 ] } } space = numpy.array([100, 100, 100]) radii = numpy.array([5, 6]) magnitudes = numpy.array([15, 16]) points = numpy.array([[20, 30, 24], [70, 59, 65]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks image_stack = images.max(axis=0) nanshe.imp.segment.preprocess_data(image_stack, **config) @nose.plugins.attrib.attr("3D") def test_preprocess_data_5(self): ## Does NOT test accuracy. config = { "normalize_data" : { "renormalized_images" : { "ord" : 2 } }, "extract_f0" : { "spatial_smoothing_gaussian_filter_stdev" : 5.0, "spatial_smoothing_gaussian_filter_window_size" : 5.0, "which_quantile" : 0.5, "temporal_smoothing_gaussian_filter_stdev" : 5.0, "temporal_smoothing_gaussian_filter_window_size" : 5.0, "half_window_size" : 20, "bias" : 100 }, "wavelet.transform" : { "scale" : [ 3, 4, 4, 4 ] } } space = numpy.array([100, 100, 100, 100]) radii = numpy.array([5, 6]) magnitudes = numpy.array([15, 16]) points = numpy.array([[20, 30, 24, 85], [70, 59, 65, 17]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks image_stack = images.max(axis=0) nanshe.imp.segment.preprocess_data(image_stack, **config) @nose.plugins.attrib.attr("3D") def test_preprocess_data_6(self): ## Does NOT test accuracy. config = { "normalize_data" : { "renormalized_images" : { "ord" : 2 } }, "wavelet.transform" : { "scale" : [ 3, 4, 4, 4 ] } } space = numpy.array([100, 100, 100, 100]) radii = numpy.array([5, 6]) magnitudes = numpy.array([15, 16]) points = numpy.array([[20, 30, 24, 85], [70, 59, 65, 17]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks image_stack = images.max(axis=0) nanshe.imp.segment.preprocess_data(image_stack, **config) @nose.plugins.attrib.attr("3D") def test_preprocess_data_7(self): ## Does NOT test accuracy. config = { "normalize_data" : { "renormalized_images" : { "ord" : 2 } }, "extract_f0" : { "spatial_smoothing_gaussian_filter_stdev" : 5.0, "spatial_smoothing_gaussian_filter_window_size" : 5.0, "which_quantile" : 0.5, "temporal_smoothing_gaussian_filter_stdev" : 5.0, "temporal_smoothing_gaussian_filter_window_size" : 5.0, "half_window_size" : 20, "bias" : 100 } } space = numpy.array([100, 100, 100, 100]) radii = numpy.array([5, 6]) magnitudes = numpy.array([15, 16]) points = numpy.array([[20, 30, 24, 85], [70, 59, 65, 17]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks image_stack = images.max(axis=0) nanshe.imp.segment.preprocess_data(image_stack, **config) def test_generate_dictionary_00(self): if not has_spams: raise nose.SkipTest( "Cannot run this test without SPAMS being installed." ) p = numpy.array([[27, 51], [66, 85], [77, 45]]) space = numpy.array((100, 100)) radii = numpy.array((5, 6, 7)) g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii) d = nanshe.imp.segment.generate_dictionary( g.astype(numpy.float32), **{ "spams.trainDL" : { "gamma2" : 0, "gamma1" : 0, "numThreads" : 1, "K" : len(g), "iter" : 10, "modeD" : 0, "posAlpha" : True, "clean" : True, "posD" : True, "batchsize" : 256, "lambda1" : 0.2, "lambda2" : 0, "mode" : 2 } } ) d = (d != 0) assert (g.shape == d.shape) assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(g)) matched = dict() for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) def test_generate_dictionary_01(self): if not has_spams: raise nose.SkipTest( "Cannot run this test without SPAMS being installed." ) p = numpy.array([[27, 51], [66, 85], [77, 45]]) space = numpy.array((100, 100)) radii = numpy.array((5, 6, 7)) g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii) d = nanshe.imp.segment.generate_dictionary( g.astype(float), **{ "spams.trainDL" : { "gamma2" : 0, "gamma1" : 0, "numThreads" : 1, "K" : len(g), "iter" : 10, "modeD" : 0, "posAlpha" : True, "clean" : True, "posD" : True, "batchsize" : 256, "lambda1" : 0.2, "lambda2" : 0, "mode" : 2 } } ) d = (d != 0) assert (g.shape == d.shape) assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(g)) matched = dict() for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) @nose.plugins.attrib.attr("3D") def test_generate_dictionary_02(self): if not has_spams: raise nose.SkipTest( "Cannot run this test without SPAMS being installed." ) p = numpy.array([[27, 51, 87], [66, 85, 55], [77, 45, 26]]) space = numpy.array((100, 100, 100)) radii = numpy.array((5, 6, 7)) g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii) d = nanshe.imp.segment.generate_dictionary( g.astype(numpy.float32), **{ "spams.trainDL" : { "gamma2" : 0, "gamma1" : 0, "numThreads" : 1, "K" : len(g), "iter" : 10, "modeD" : 0, "posAlpha" : True, "clean" : True, "posD" : True, "batchsize" : 256, "lambda1" : 0.2, "lambda2" : 0, "mode" : 2 } } ) d = (d != 0) assert (g.shape == d.shape) assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(g)) matched = dict() for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) @nose.plugins.attrib.attr("3D") def test_generate_dictionary_03(self): if not has_spams: raise nose.SkipTest( "Cannot run this test without SPAMS being installed." ) p = numpy.array([[27, 51, 87], [66, 85, 55], [77, 45, 26]]) space = numpy.array((100, 100, 100)) radii = numpy.array((5, 6, 7)) g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii) d = nanshe.imp.segment.generate_dictionary( g.astype(float), **{ "spams.trainDL" : { "gamma2" : 0, "gamma1" : 0, "numThreads" : 1, "K" : len(g), "iter" : 10, "modeD" : 0, "posAlpha" : True, "clean" : True, "posD" : True, "batchsize" : 256, "lambda1" : 0.2, "lambda2" : 0, "mode" : 2 } } ) d = (d != 0) assert (g.shape == d.shape) assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(g)) matched = dict() for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) def test_generate_dictionary_04(self): if not has_spams: raise nose.SkipTest( "Cannot run this test without SPAMS being installed." ) p = numpy.array([[27, 51], [66, 85], [77, 45]]) space = numpy.array((100, 100)) radii = numpy.array((5, 6, 7)) g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii) d = nanshe.imp.segment.generate_dictionary( g.astype(numpy.float32), g.astype(numpy.float32), len(g), **{ "spams.trainDL" : { "gamma2" : 0, "gamma1" : 0, "numThreads" : 1, "iter" : 10, "modeD" : 0, "posAlpha" : True, "clean" : True, "posD" : True, "batchsize" : 256, "lambda1" : 0.2, "lambda2" : 0, "mode" : 2 } } ) d = (d != 0) assert (g.shape == d.shape) assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(g)) matched = dict() for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) assert (g.astype(bool) == d.astype(bool)).all() def test_generate_dictionary_05(self): if not has_spams: raise nose.SkipTest( "Cannot run this test without SPAMS being installed." ) p = numpy.array([[27, 51], [66, 85], [77, 45]]) space = numpy.array((100, 100)) radii = numpy.array((5, 6, 7)) g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii) d = nanshe.imp.segment.generate_dictionary( g.astype(float), g.astype(float), len(g), **{ "spams.trainDL" : { "gamma2" : 0, "gamma1" : 0, "numThreads" : 1, "iter" : 10, "modeD" : 0, "posAlpha" : True, "clean" : True, "posD" : True, "batchsize" : 256, "lambda1" : 0.2, "lambda2" : 0, "mode" : 2 } } ) d = (d != 0) assert (g.shape == d.shape) assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(g)) matched = dict() for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) assert (g.astype(bool) == d.astype(bool)).all() @nose.plugins.attrib.attr("3D") def test_generate_dictionary_06(self): if not has_spams: raise nose.SkipTest( "Cannot run this test without SPAMS being installed." ) p = numpy.array([[27, 51, 87], [66, 85, 55], [77, 45, 26]]) space = numpy.array((100, 100, 100)) radii = numpy.array((5, 6, 7)) g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii) d = nanshe.imp.segment.generate_dictionary( g.astype(numpy.float32), g.astype(numpy.float32), len(g), **{ "spams.trainDL" : { "gamma2" : 0, "gamma1" : 0, "numThreads" : 1, "iter" : 10, "modeD" : 0, "posAlpha" : True, "clean" : True, "posD" : True, "batchsize" : 256, "lambda1" : 0.2, "lambda2" : 0, "mode" : 2 } } ) d = (d != 0) assert (g.shape == d.shape) assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(g)) matched = dict() for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) assert (g.astype(bool) == d.astype(bool)).all() @nose.plugins.attrib.attr("3D") def test_generate_dictionary_07(self): if not has_spams: raise nose.SkipTest( "Cannot run this test without SPAMS being installed." ) p = numpy.array([[27, 51, 87], [66, 85, 55], [77, 45, 26]]) space = numpy.array((100, 100, 100)) radii = numpy.array((5, 6, 7)) g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii) d = nanshe.imp.segment.generate_dictionary( g.astype(float), g.astype(float), len(g), **{ "spams.trainDL" : { "gamma2" : 0, "gamma1" : 0, "numThreads" : 1, "iter" : 10, "modeD" : 0, "posAlpha" : True, "clean" : True, "posD" : True, "batchsize" : 256, "lambda1" : 0.2, "lambda2" : 0, "mode" : 2 } } ) d = (d != 0) assert (g.shape == d.shape) assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(g)) matched = dict() for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) assert (g.astype(bool) == d.astype(bool)).all() def test_generate_dictionary_08(self): p = numpy.array([[27, 51], [66, 85], [77, 45]]) space = numpy.array((100, 100)) radii = numpy.array((5, 6, 7)) g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii) d = nanshe.imp.segment.generate_dictionary( g.astype(float), **{ "sklearn.decomposition.dict_learning_online" : { "n_jobs" : 1, "n_components" : len(g), "n_iter" : 20, "batch_size" : 256, "alpha" : 0.2 } } ) d = (d != 0) assert (g.shape == d.shape) assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(g)) matched = dict() for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) @nose.plugins.attrib.attr("3D") def test_generate_dictionary_09(self): p = numpy.array([[27, 51, 87], [66, 85, 55], [77, 45, 26]]) space = numpy.array((100, 100, 100)) radii = numpy.array((5, 6, 7)) g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii) d = nanshe.imp.segment.generate_dictionary( g.astype(float), **{ "sklearn.decomposition.dict_learning_online" : { "n_jobs" : 1, "n_components" : len(g), "n_iter" : 20, "batch_size" : 256, "alpha" : 0.2 } } ) d = (d != 0) assert (g.shape == d.shape) assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(g)) matched = dict() for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) def test_generate_dictionary_10(self): p = numpy.array([[27, 51], [66, 85], [77, 45]]) space = numpy.array((100, 100)) radii = numpy.array((5, 6, 7)) g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii) d = nanshe.imp.segment.generate_dictionary( g.astype(float), g.astype(float), len(g), **{ "sklearn.decomposition.dict_learning_online" : { "n_jobs" : 1, "n_iter" : 20, "batch_size" : 256, "alpha" : 0.2 } } ) d = (d != 0) assert (g.shape == d.shape) assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(g)) matched = dict() for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) assert (g.astype(bool) == d.astype(bool)).all() @nose.plugins.attrib.attr("3D") def test_generate_dictionary_11(self): p = numpy.array([[27, 51, 87], [66, 85, 55], [77, 45, 26]]) space = numpy.array((100, 100, 100)) radii = numpy.array((5, 6, 7)) g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii) d = nanshe.imp.segment.generate_dictionary( g.astype(float), g.astype(float), len(g), **{ "sklearn.decomposition.dict_learning_online" : { "n_jobs" : 1, "n_iter" : 20, "batch_size" : 256, "alpha" : 0.2 } } ) d = (d != 0) assert (g.shape == d.shape) assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all() unmatched_g = range(len(g)) matched = dict() for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): new_unmatched_g.append(j) else: matched[i] = j unmatched_g = new_unmatched_g print(unmatched_g) assert (len(unmatched_g) == 0) assert (g.astype(bool) == d.astype(bool)).all() def test_generate_local_maxima_vigra_1(self): p = numpy.array([[27, 51], [66, 85], [77, 45]]) space = numpy.array((100, 100)) radii = numpy.array((5, 6, 7)) magnitudes = numpy.array((1, 1, 1), dtype=float) g = nanshe.syn.data.generate_gaussian_images( space, p, radii/3.0, magnitudes/3 ) m = nanshe.imp.segment.generate_local_maxima_vigra(g.max(axis=0)) assert (numpy.array(m.nonzero()) == p.T).all() @nose.plugins.attrib.attr("3D") def test_generate_local_maxima_vigra_2(self): p = numpy.array([[27, 51, 87], [66, 85, 55], [77, 45, 26]]) space = numpy.array((100, 100, 100)) radii = numpy.array((5, 6, 7)) magnitudes = numpy.array((1, 1, 1), dtype=float) g = nanshe.syn.data.generate_gaussian_images( space, p, radii/3.0, magnitudes/3 ) m = nanshe.imp.segment.generate_local_maxima_vigra(g.max(axis=0)) assert (numpy.array(m.nonzero()) == p.T).all() def test_generate_local_maxima_scikit_image_1(self): p = numpy.array([[27, 51], [66, 85], [77, 45]]) space = numpy.array((100, 100)) radii = numpy.array((5, 6, 7)) magnitudes = numpy.array((1, 1, 1), dtype=float) g = nanshe.syn.data.generate_gaussian_images( space, p, radii/3.0, magnitudes/3 ) m = nanshe.imp.segment.generate_local_maxima_scikit_image(g.max(axis=0)) @nose.plugins.attrib.attr("3D") def test_generate_local_maxima_scikit_image_2(self): p = numpy.array([[27, 51, 87], [66, 85, 55], [77, 45, 26]]) space = numpy.array((100, 100, 100)) radii = numpy.array((5, 6, 7)) magnitudes = numpy.array((1, 1, 1), dtype=float) g = nanshe.syn.data.generate_gaussian_images( space, p, radii/3.0, magnitudes/3 ) m = nanshe.imp.segment.generate_local_maxima_scikit_image(g.max(axis=0)) assert (numpy.array(m.nonzero()) == p.T).all() def test_generate_local_maxima_1(self): p = numpy.array([[27, 51], [66, 85], [77, 45]]) space = numpy.array((100, 100)) radii = numpy.array((5, 6, 7)) magnitudes = numpy.array((1, 1, 1), dtype=float) g = nanshe.syn.data.generate_gaussian_images( space, p, radii/3.0, magnitudes/3 ) m = nanshe.imp.segment.generate_local_maxima(g.max(axis=0)) assert (numpy.array(m.nonzero()) == p.T).all() @nose.plugins.attrib.attr("3D") def test_generate_local_maxima_2(self): p = numpy.array([[27, 51, 87], [66, 85, 55], [77, 45, 26]]) space = numpy.array((100, 100, 100)) radii = numpy.array((5, 6, 7)) magnitudes = numpy.array((1, 1, 1), dtype=float) g = nanshe.syn.data.generate_gaussian_images( space, p, radii/3.0, magnitudes/3 ) m = nanshe.imp.segment.generate_local_maxima(g.max(axis=0)) assert (numpy.array(m.nonzero()) == p.T).all() def test_extended_region_local_maxima_properties_1(self): p = numpy.array([[27, 51], [66, 85], [77, 45]]) space = numpy.array((100, 100)) radii = numpy.array((5, 6, 7)) magnitudes = numpy.array((1, 1, 1), dtype=float) g = nanshe.syn.data.generate_gaussian_images( space, p, radii/3.0, magnitudes/3 ) m = (g > 0.00065) g *= m e = nanshe.imp.segment.extended_region_local_maxima_properties( g.max(axis=0), nanshe.util.xnumpy.enumerate_masks_max(m, axis=0)[0] ) assert (numpy.bincount(e["label"])[1:] == 1).all() assert (len(e) == len(p)) assert (e["local_max"] == p).all() assert (e["area"] == numpy.apply_over_axes(numpy.sum, m, axes=range(1, m.ndim)).squeeze().astype(float)).all() assert (e["centroid"] == e["local_max"]).all() assert (e["intensity"] == g.max(axis=0)[tuple(p.T)]).all() def test_extended_region_local_maxima_properties_2(self): p = numpy.array([[27, 51], [32, 53], [77, 45]]) space = numpy.array((100, 100)) radii = numpy.array((5, 6, 7)) magnitudes = numpy.array((1, 1, 1), dtype=float) g = nanshe.syn.data.generate_gaussian_images( space, p, radii/3.0, magnitudes/3 ) g = numpy.array([g[0] + g[1], g[2]]) m = (g > 0.00065) g *= m e = nanshe.imp.segment.extended_region_local_maxima_properties( g.max(axis=0), nanshe.util.xnumpy.enumerate_masks_max(m, axis=0)[0] ) assert (numpy.bincount(e["label"])[1:] == numpy.array([2, 1])).all() assert (len(e) == len(p)) assert (e["local_max"] == p).all() assert (e["area"][[0, 2]] == numpy.apply_over_axes(numpy.sum, m, axes=range(1, m.ndim)).squeeze().astype(float)).all() # Not exactly equal due to floating point round off error assert ((e["centroid"][0] - numpy.array(m[0].nonzero()).mean(axis=1)) < 1e-14).all() # Not exactly equal due to floating point round off error assert ((e["centroid"][1] - numpy.array(m[0].nonzero()).mean(axis=1)) < 1e-14).all() assert (e["centroid"][2] == e["local_max"][2]).all() assert (e["intensity"] == g.max(axis=0)[tuple(p.T)]).all() @nose.plugins.attrib.attr("3D") def test_extended_region_local_maxima_properties_3(self): p = numpy.array([[27, 51, 87], [66, 85, 55], [77, 45, 26]]) space = numpy.array((100, 100, 100)) radii = numpy.array((5, 6, 7)) magnitudes = numpy.array((1, 1, 1), dtype=float) g = nanshe.syn.data.generate_gaussian_images( space, p, radii/3.0, magnitudes/3 ) m = (g > 0.00065) g *= m e = nanshe.imp.segment.extended_region_local_maxima_properties( g.max(axis=0), nanshe.util.xnumpy.enumerate_masks_max(m, axis=0)[0] ) assert (numpy.bincount(e["label"])[1:] == 1).all() assert (len(e) == len(p)) assert (e["local_max"] == p).all() assert (e["area"] == numpy.apply_over_axes(numpy.sum, m, axes=range(1, m.ndim)).squeeze().astype(float)).all() assert (e["centroid"] == e["local_max"]).all() assert (e["intensity"] == g.max(axis=0)[tuple(p.T)]).all() @nose.plugins.attrib.attr("3D") def test_extended_region_local_maxima_properties_4(self): p = numpy.array([[27, 51, 87], [66, 85, 55], [77, 45, 26]]) space = numpy.array((100, 100, 100)) radii = numpy.array((5, 6, 7)) magnitudes = numpy.array((1, 1, 1), dtype=float) g = nanshe.syn.data.generate_gaussian_images( space, p, radii/3.0, magnitudes/3 ) g = numpy.array([g[0] + g[1], g[2]]) m = (g > 0.00065) g *= m e = nanshe.imp.segment.extended_region_local_maxima_properties( g.max(axis=0), nanshe.util.xnumpy.enumerate_masks_max(m, axis=0)[0] ) assert (numpy.bincount(e["label"])[1:] == numpy.array([2, 1])).all() assert (len(e) == len(p)) assert (e["local_max"] == p).all() assert (e["area"][[0, 2]] == numpy.apply_over_axes(numpy.sum, m, axes=range(1, m.ndim)).squeeze().astype(float)).all() # Not exactly equal due to floating point round off error assert ((e["centroid"][0] - numpy.array(m[0].nonzero()).mean(axis=1)) < 1e-14).all() # Not exactly equal due to floating point round off error assert ((e["centroid"][1] - numpy.array(m[0].nonzero()).mean(axis=1)) < 1e-14).all() assert (e["centroid"][2] == e["local_max"][2]).all() assert (e["intensity"] == g.max(axis=0)[tuple(p.T)]).all() def test_remove_low_intensity_local_maxima_1(self): space = numpy.array((100, 100)) radii = numpy.array((5, 10)) magnitudes = numpy.array((1, 1), dtype=float) points = numpy.array([[23, 36], [58, 64]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0] e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, 1.0) assert (len(points) == len(e.props)) assert (0 == len(e2.props)) def test_remove_low_intensity_local_maxima_2(self): space = numpy.array((100, 100)) radii = numpy.array((5, 10)) magnitudes = numpy.array((1, 1), dtype=float) points = numpy.array([[23, 36], [58, 64]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0] e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) percentage_pixels_below_max = numpy.zeros((len(masks),), float) for i in nanshe.util.iters.irange(len(masks)): pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum() pixels = masks[i].sum() percentage_pixels_below_max[i] = float(pixels_below_max) / float(pixels) percentage_pixels_below_max = numpy.sort(percentage_pixels_below_max) e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, percentage_pixels_below_max[0]) assert (len(points) == len(e.props)) assert (len(e.props) == len(e2.props)) def test_remove_low_intensity_local_maxima_3(self): space = numpy.array((100, 100)) radii = numpy.array((5, 10)) magnitudes = numpy.array((1, 1), dtype=float) points = numpy.array([[23, 36], [58, 64]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0] e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) percentage_pixels_below_max = numpy.zeros((len(masks),), float) for i in nanshe.util.iters.irange(len(masks)): pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum() pixels = masks[i].sum() percentage_pixels_below_max[i] = float(pixels_below_max) / float(pixels) percentage_pixels_below_max = numpy.sort(percentage_pixels_below_max) e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, percentage_pixels_below_max[1]) assert (len(points) == len(e.props)) assert ((len(e.props) - 1) == len(e2.props)) def test_remove_low_intensity_local_maxima_4(self): space = numpy.array((100, 100)) radii = numpy.array((5, 10)) magnitudes = numpy.array((1, 1), dtype=float) points = numpy.array([[23, 36], [58, 64]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0] e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) percentage_pixels_below_max = numpy.zeros((len(masks),), float) for i in nanshe.util.iters.irange(len(masks)): pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum() pixels = masks[i].sum() percentage_pixels_below_max[i] = float(pixels_below_max) / float(pixels) percentage_pixels_below_max = numpy.sort(percentage_pixels_below_max) e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, percentage_pixels_below_max[1] + \ numpy.finfo(float).eps) assert (len(points) == len(e.props)) assert ((len(e.props) - 2) == len(e2.props)) @nose.plugins.attrib.attr("3D") def test_remove_low_intensity_local_maxima_5(self): space = numpy.array((100, 100, 100)) radii = numpy.array((5, 10)) magnitudes = numpy.array((1, 1), dtype=float) points = numpy.array([[23, 36, 21], [58, 64, 62]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0] e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, 1.0) assert (len(points) == len(e.props)) assert (0 == len(e2.props)) @nose.plugins.attrib.attr("3D") def test_remove_low_intensity_local_maxima_6(self): space = numpy.array((100, 100, 100)) radii = numpy.array((5, 10)) magnitudes = numpy.array((1, 1), dtype=float) points = numpy.array([[23, 36, 21], [58, 64, 62]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0] e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) percentage_pixels_below_max = numpy.zeros((len(masks),), float) for i in nanshe.util.iters.irange(len(masks)): pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum() pixels = masks[i].sum() percentage_pixels_below_max[i] = float(pixels_below_max) / float(pixels) percentage_pixels_below_max = numpy.sort(percentage_pixels_below_max) e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, percentage_pixels_below_max[0]) assert (len(points) == len(e.props)) assert (len(e.props) == len(e2.props)) @nose.plugins.attrib.attr("3D") def test_remove_low_intensity_local_maxima_7(self): space = numpy.array((100, 100, 100)) radii = numpy.array((5, 10)) magnitudes = numpy.array((1, 1), dtype=float) points = numpy.array([[23, 36, 21], [58, 64, 62]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0] e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) percentage_pixels_below_max = numpy.zeros((len(masks),), float) for i in nanshe.util.iters.irange(len(masks)): pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum() pixels = masks[i].sum() percentage_pixels_below_max[i] = float(pixels_below_max) / float(pixels) percentage_pixels_below_max = numpy.sort(percentage_pixels_below_max) e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, percentage_pixels_below_max[1]) assert (len(points) == len(e.props)) assert ((len(e.props) - 1) == len(e2.props)) @nose.plugins.attrib.attr("3D") def test_remove_low_intensity_local_maxima_8(self): space = numpy.array((100, 100, 100)) radii = numpy.array((5, 10)) magnitudes = numpy.array((1, 1), dtype=float) points = numpy.array([[23, 36, 21], [58, 64, 62]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0] e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) percentage_pixels_below_max = numpy.zeros((len(masks),), float) for i in nanshe.util.iters.irange(len(masks)): pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum() pixels = masks[i].sum() percentage_pixels_below_max[i] = float(pixels_below_max) / float(pixels) percentage_pixels_below_max = numpy.sort(percentage_pixels_below_max) e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, percentage_pixels_below_max[1] + \ numpy.finfo(float).eps) assert (len(points) == len(e.props)) assert ((len(e.props) - 2) == len(e2.props)) def test_remove_too_close_local_maxima_1(self): space = numpy.array((100, 100)) radii = numpy.array((5, 5)) magnitudes = numpy.array((1, 1), dtype=float) points = numpy.array([[63, 69], [58, 64]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = masks.max(axis=0).astype(int) e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) dist = scipy.spatial.distance.pdist(points).max() i = 0 while (dist + i * numpy.finfo(type(dist)).eps) == dist: i += 1 dist += i * numpy.finfo(type(dist)).eps e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist) assert (len(points) == len(e.props)) assert (1 == len(e2.props)) def test_remove_too_close_local_maxima_2(self): space = numpy.array((100, 100)) radii = numpy.array((5, 5)) magnitudes = numpy.array((1, 1), dtype=float) points = numpy.array([[63, 69], [58, 64]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0] e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) dist = scipy.spatial.distance.pdist(points).max() i = 0 while (dist + i * numpy.finfo(type(dist)).eps) == dist: i += 1 dist += i * numpy.finfo(type(dist)).eps e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist) assert (len(points) == len(e.props)) assert (len(points) == len(e2.props)) def test_remove_too_close_local_maxima_3(self): space = numpy.array((100, 100)) radii = numpy.array((5, 5)) magnitudes = numpy.array((1, 1.01), dtype=float) points = numpy.array([[63, 69], [58, 64]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = masks.max(axis=0).astype(int) e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) dist = scipy.spatial.distance.pdist(points).max() i = 0 while (dist + i * numpy.finfo(type(dist)).eps) == dist: i += 1 dist += i * numpy.finfo(type(dist)).eps e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist) assert (len(points) == len(e.props)) assert (1 == len(e2.props)) assert (points[magnitudes == magnitudes.max()] == e2.props["local_max"][0]).all() def test_remove_too_close_local_maxima_4(self): space = numpy.array((100, 100)) radii = numpy.array((5, 5)) magnitudes = numpy.array((1.01, 1), dtype=float) points = numpy.array([[63, 69], [58, 64]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = masks.max(axis=0).astype(int) e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) dist = scipy.spatial.distance.pdist(points).max() i = 0 while (dist + i * numpy.finfo(type(dist)).eps) == dist: i += 1 dist += i * numpy.finfo(type(dist)).eps e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist) assert (len(points) == len(e.props)) assert (1 == len(e2.props)) assert (points[magnitudes == magnitudes.max()] == e2.props["local_max"][0]).all() @nose.plugins.attrib.attr("3D") def test_remove_too_close_local_maxima_5(self): space = numpy.array((100, 100, 100)) radii = numpy.array((5, 5)) magnitudes = numpy.array((1, 1), dtype=float) points = numpy.array([[63, 69, 26], [58, 64, 21]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = masks.max(axis=0).astype(int) e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) dist = scipy.spatial.distance.pdist(points).max() i = 0 while (dist + i * numpy.finfo(type(dist)).eps) == dist: i += 1 dist += i * numpy.finfo(type(dist)).eps e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist) assert (len(points) == len(e.props)) assert (1 == len(e2.props)) @nose.plugins.attrib.attr("3D") def test_remove_too_close_local_maxima_6(self): space = numpy.array((100, 100, 100)) radii = numpy.array((5, 5)) magnitudes = numpy.array((1, 1), dtype=float) points = numpy.array([[63, 69, 26], [58, 64, 21]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0] e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) dist = scipy.spatial.distance.pdist(points).max() i = 0 while (dist + i * numpy.finfo(type(dist)).eps) == dist: i += 1 dist += i * numpy.finfo(type(dist)).eps e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist) assert (len(points) == len(e.props)) assert (len(points) == len(e2.props)) @nose.plugins.attrib.attr("3D") def test_remove_too_close_local_maxima_7(self): space = numpy.array((100, 100, 100)) radii = numpy.array((5, 5)) magnitudes = numpy.array((1, 1.01), dtype=float) points = numpy.array([[63, 69, 26], [58, 64, 21]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = masks.max(axis=0).astype(int) e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) dist = scipy.spatial.distance.pdist(points).max() i = 0 while (dist + i * numpy.finfo(type(dist)).eps) == dist: i += 1 dist += i * numpy.finfo(type(dist)).eps e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist) assert (len(points) == len(e.props)) assert (1 == len(e2.props)) assert (points[magnitudes == magnitudes.max()] == e2.props["local_max"][0]).all() @nose.plugins.attrib.attr("3D") def test_remove_too_close_local_maxima_8(self): space = numpy.array((100, 100, 100)) radii = numpy.array((5, 5)) magnitudes = numpy.array((1.01, 1), dtype=float) points = numpy.array([[63, 69, 26], [58, 64, 21]]) masks = nanshe.syn.data.generate_hypersphere_masks( space, points, radii ) images = nanshe.syn.data.generate_gaussian_images( space, points, radii/3.0, magnitudes ) * masks labels = masks.max(axis=0).astype(int) e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) dist = scipy.spatial.distance.pdist(points).max() i = 0 while (dist + i * numpy.finfo(type(dist)).eps) == dist: i += 1 dist += i * numpy.finfo(type(dist)).eps e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist) assert (len(points) == len(e.props)) assert (1 == len(e2.props)) assert (points[magnitudes == magnitudes.max()] == e2.props["local_max"][0]).all() def test_wavelet_thresholding_1(self): params = { "significance_threshold" : 3.0, "wavelet_scale" : 5, "noise_threshold" : 3.0 } shape = numpy.array((500, 500)) neuron_centers = numpy.array([[177, 52], [127, 202], [343, 271]]) original_neurons_image = nanshe.syn.data.generate_gaussian_images(shape, neuron_centers, (50.0/3.0,)*len(neuron_centers), (1.0/3.0,)*len(neuron_centers)).sum(axis=0) original_neurons_mask = (original_neurons_image >= 0.00014218114898827068) wtt_image, wtt_mask = nanshe.imp.segment.wavelet_thresholding( original_neurons_image, **params ) assert (wtt_mask[-2] == original_neurons_mask).all() assert ((wtt_mask[-1] & original_neurons_mask) == original_neurons_mask).all() def test_match_regions_properties_1(self): props = numpy.array( [ (1, 1990.0, [3.7402010050251255, 127.0, 202.0], 0.9990127357638044, 39.484721299262105), (2, 1988.0, [3.7399396378269616, 177.0, 52.021126760563384], 0.9990128314664918, 39.49948424388854), (3, 1990.0, [3.7402010050251255, 343.0, 271.0], 0.9990127357638044, 39.484721299262105) ], dtype=[ ('label', '<i8'), ('area', '<f8'), ('centroid', '<f8', (3,)), ('eccentricity', '<f8'), ('major_axis_length', '<f8') ] ) params = { "area": { "min" : 1990, "max" : 2000 } } matches = nanshe.imp.segment.match_regions_properties(props, params) assert len(matches) == len(props) assert (matches == numpy.array([ True, False, True])).all() def test_wavelet_denoising_1(self): params = { "remove_low_intensity_local_maxima" : { "percentage_pixels_below_max" : 0 }, "wavelet.transform" : { "scale" : 5 }, "accepted_region_shape_constraints" : { "major_axis_length" : { "max" : 25.0, "min" : 0.0 } }, "accepted_neuron_shape_constraints" : { "eccentricity" : { "max" : 0.9, "min" : 0.0 }, "area" : { "max" : 600, "min" : 30 } }, "estimate_noise" : { "significance_threshold" : 3.0 }, "significant_mask" : { "noise_threshold" : 3.0 }, "remove_too_close_local_maxima" : { "min_local_max_distance" : 100.0 }, "use_watershed" : True } shape = numpy.array((500, 500)) neuron_centers = numpy.array([[177, 52], [127, 202], [343, 271]]) original_neuron_image = nanshe.syn.data.generate_gaussian_images(shape, neuron_centers, (50.0/3.0,)*len(neuron_centers), (1.0/3.0,)*len(neuron_centers)).sum(axis=0) original_neurons_mask = (original_neuron_image >= 0.00014218114898827068) neurons = nanshe.imp.segment.wavelet_denoising(original_neuron_image, **params) assert (len(neuron_centers) == len(neurons)) assert (original_neurons_mask == neurons["mask"].max(axis=0)).all() assert ((original_neurons_mask*original_neuron_image) == neurons["image"].max(axis=0)).all() def test_wavelet_denoising_2(self): params = { "remove_low_intensity_local_maxima" : { "percentage_pixels_below_max" : 0 }, "wavelet.transform" : { "scale" : 5 }, "accepted_region_shape_constraints" : { "major_axis_length" : { "max" : 150.0, "min" : 0.0 } }, "accepted_neuron_shape_constraints" : { "eccentricity" : { "max" : 0.9, "min" : 0.0 }, "area" : { "max" : 10000, "min" : 0 } }, "estimate_noise" : { "significance_threshold" : 3.0 }, "significant_mask" : { "noise_threshold" : 3.0 }, "remove_too_close_local_maxima" : { "min_local_max_distance" : 100.0 }, "use_watershed" : True } shape = numpy.array((500, 500)) neuron_centers = numpy.array([[127, 202], [177, 52], [343, 271]]) neuron_radii = numpy.array((50.0,)*len(neuron_centers)) neuron_magnitudes = numpy.array((1.0/3.0,)*len(neuron_centers)) neuron_spreads = neuron_radii / 3.0 neuron_images = nanshe.syn.data.generate_gaussian_images(shape, neuron_centers, neuron_spreads, neuron_magnitudes) neuron_masks = (neuron_images >= (neuron_magnitudes.max() * scipy.stats.norm.pdf(3 * neuron_spreads.max(), scale=neuron_spreads.max())**len(shape))) neuron_images *= neuron_masks neurons = nanshe.imp.segment.wavelet_denoising(neuron_images.max(axis=0), **params) # Resort neuron image order based on most similar. result_neurons_distance = scipy.spatial.distance.cdist(neuron_images.reshape(neurons.shape + (-1,)), neurons["image"].reshape(neurons.shape + (-1,))) neuron_centers_old = neuron_centers neuron_radii_old = neuron_radii neuron_magnitudes_old = neuron_magnitudes neuron_images_old = neuron_images neuron_masks_old = neuron_masks neuron_centers = numpy.zeros(neuron_centers_old.shape, dtype=neuron_centers_old.dtype) neuron_radii = numpy.zeros(neuron_radii_old.shape, dtype=neuron_radii_old.dtype) neuron_magnitudes = numpy.zeros(neuron_magnitudes_old.shape, dtype=neuron_magnitudes_old.dtype) neuron_images = numpy.zeros(neuron_images_old.shape, dtype=neuron_images_old.dtype) neuron_masks = numpy.zeros(neuron_masks_old.shape, dtype=neuron_masks_old.dtype) for i1, i2 in enumerate(result_neurons_distance.argmin(axis=1)): neuron_centers[i1] = neuron_centers_old[i2] neuron_radii[i1] = neuron_radii_old[i2] neuron_magnitudes[i1] = neuron_magnitudes_old[i2] neuron_images[i1] = neuron_images_old[i2] neuron_masks[i1] = neuron_masks_old[i2] neuron_centers_old = None neuron_radii_old = None neuron_magnitudes_old = None neuron_images_old = None neuron_masks_old = None assert (len(neuron_centers) == len(neurons)) assert (numpy.abs(neurons["image"].max(axis=0) - neuron_images.max(axis=0)).max() < 1.0e-4) assert (numpy.abs(neurons["image"] - neuron_images).max() < 1.0e-4) @nose.plugins.attrib.attr("3D") def test_wavelet_denoising_3(self): params = { "remove_low_intensity_local_maxima" : { "percentage_pixels_below_max" : 0 }, "wavelet.transform" : { "scale" : 5 }, "accepted_region_shape_constraints" : { "major_axis_length" : { "max" : 30.0, "min" : 0.0 } }, "accepted_neuron_shape_constraints" : { "eccentricity" : { "max" : 0.9, "min" : 0.0 }, "area" : { "max" : 30000, "min" : 10000 } }, "estimate_noise" : { "significance_threshold" : 3.0 }, "significant_mask" : { "noise_threshold" : 3.0 }, "remove_too_close_local_maxima" : { "min_local_max_distance" : 100.0 }, "use_watershed" : True } shape = numpy.array((100, 100, 100)) neuron_centers = numpy.array([[21, 17, 46], [46, 71, 83], [77, 52, 17]]) neuron_radii = numpy.array((10.0,)*len(neuron_centers)) neuron_magnitudes = numpy.array((1.0/3.0,)*len(neuron_centers)) neuron_spreads = neuron_radii / 3.0 neuron_images = nanshe.syn.data.generate_gaussian_images(shape, neuron_centers, neuron_spreads, neuron_magnitudes) neuron_masks = (neuron_images >= (neuron_magnitudes.max() * scipy.stats.norm.pdf(3 * neuron_spreads.max(), scale=neuron_spreads.max())**len(shape))) neuron_images *= neuron_masks neurons = nanshe.imp.segment.wavelet_denoising(neuron_images.max(axis=0), **params) # Resort neuron image order based on most similar. result_neurons_distance = scipy.spatial.distance.cdist(neuron_images.reshape(neurons.shape + (-1,)), neurons["image"].reshape(neurons.shape + (-1,))) neuron_centers_old = neuron_centers neuron_radii_old = neuron_radii neuron_magnitudes_old = neuron_magnitudes neuron_images_old = neuron_images neuron_masks_old = neuron_masks neuron_centers = numpy.zeros(neuron_centers_old.shape, dtype=neuron_centers_old.dtype) neuron_radii = numpy.zeros(neuron_radii_old.shape, dtype=neuron_radii_old.dtype) neuron_magnitudes = numpy.zeros(neuron_magnitudes_old.shape, dtype=neuron_magnitudes_old.dtype) neuron_images = numpy.zeros(neuron_images_old.shape, dtype=neuron_images_old.dtype) neuron_masks = numpy.zeros(neuron_masks_old.shape, dtype=neuron_masks_old.dtype) for i1, i2 in enumerate(result_neurons_distance.argmin(axis=1)): neuron_centers[i1] = neuron_centers_old[i2] neuron_radii[i1] = neuron_radii_old[i2] neuron_magnitudes[i1] = neuron_magnitudes_old[i2] neuron_images[i1] = neuron_images_old[i2] neuron_masks[i1] = neuron_masks_old[i2] neuron_centers_old = None neuron_radii_old = None neuron_magnitudes_old = None neuron_images_old = None neuron_masks_old = None assert (len(neuron_centers) == len(neurons)) assert (numpy.abs(neurons["image"].max(axis=0) - neuron_images.max(axis=0)).max() < 1.0e-6) assert (numpy.abs(neurons["image"] - neuron_images).max() < 1.0e-6) def test_extract_neurons_1(self): image = 5 * numpy.ones((100, 100)) xy = numpy.indices(image.shape) circle_centers = numpy.array([[25, 25], [74, 74]]) circle_radii = numpy.array([25, 25]) circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \ nanshe.util.xnumpy.expand_view(xy, reps_before=len(circle_centers)) circle_offsets_squared = circle_offsets**2 circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape)) circle_images = circle_masks * image circle_mask_mean = numpy.zeros((len(circle_masks), image.ndim,)) circle_mask_cov = numpy.zeros((len(circle_masks), image.ndim, image.ndim,)) for circle_mask_i in nanshe.util.iters.irange(len(circle_masks)): each_circle_mask_points = numpy.array(circle_masks[circle_mask_i].nonzero(), dtype=float) circle_mask_mean[circle_mask_i] = each_circle_mask_points.mean(axis=1) circle_mask_cov[circle_mask_i] = numpy.cov(each_circle_mask_points) neurons = nanshe.imp.segment.extract_neurons(image, circle_masks) assert (len(circle_masks) == len(neurons)) assert (circle_masks == neurons["mask"]).all() assert (circle_images == neurons["image"]).all() assert (numpy.apply_over_axes(numpy.sum, circle_masks, range(1, circle_masks.ndim)) == neurons["area"]).all() assert (numpy.apply_over_axes(numpy.max, circle_images, range(1, circle_masks.ndim)) == neurons["max_F"]).all() assert (circle_mask_mean == neurons["gaussian_mean"]).all() assert (circle_mask_cov == neurons["gaussian_cov"]).all() assert (neurons["centroid"] == neurons["gaussian_mean"]).all() @nose.plugins.attrib.attr("3D") def test_extract_neurons_2(self): image = 5 * numpy.ones((100, 100, 100)) xyz = numpy.indices(image.shape) circle_centers = numpy.array([[25, 25, 25], [74, 74, 74]]) circle_radii = numpy.array([25, 25]) circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \ nanshe.util.xnumpy.expand_view(xyz, reps_before=len(circle_centers)) circle_offsets_squared = circle_offsets**2 circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape)) circle_images = circle_masks * image circle_mask_mean = numpy.zeros((len(circle_masks), image.ndim,)) circle_mask_cov = numpy.zeros((len(circle_masks), image.ndim, image.ndim,)) for circle_mask_i in nanshe.util.iters.irange(len(circle_masks)): each_circle_mask_points = numpy.array(circle_masks[circle_mask_i].nonzero(), dtype=float) circle_mask_mean[circle_mask_i] = each_circle_mask_points.mean(axis=1) circle_mask_cov[circle_mask_i] = numpy.cov(each_circle_mask_points) neurons = nanshe.imp.segment.extract_neurons(image, circle_masks) assert (len(circle_masks) == len(neurons)) assert (circle_masks == neurons["mask"]).all() assert (circle_images == neurons["image"]).all() assert (numpy.apply_over_axes(numpy.sum, circle_masks, range(1, circle_masks.ndim)) == neurons["area"]).all() assert (numpy.apply_over_axes(numpy.max, circle_images, range(1, circle_masks.ndim)) == neurons["max_F"]).all() assert (circle_mask_mean == neurons["gaussian_mean"]).all() assert (circle_mask_cov == neurons["gaussian_cov"]).all() assert (neurons["centroid"] == neurons["gaussian_mean"]).all() def test_fuse_neurons_1(self): fraction_mean_neuron_max_threshold = 0.01 image = 5 * numpy.ones((100, 100)) xy = numpy.indices(image.shape) circle_centers = numpy.array([[25, 25], [74, 74]]) circle_radii = numpy.array([25, 25]) circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \ nanshe.util.xnumpy.expand_view(xy, reps_before=len(circle_centers)) circle_offsets_squared = circle_offsets**2 circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape)) circle_mask_mean = numpy.zeros((len(circle_masks), image.ndim,)) circle_mask_cov = numpy.zeros((len(circle_masks), image.ndim, image.ndim,)) for circle_mask_i in nanshe.util.iters.irange(len(circle_masks)): each_circle_mask_points = numpy.array(circle_masks[circle_mask_i].nonzero(), dtype=float) circle_mask_mean[circle_mask_i] = each_circle_mask_points.mean(axis=1) circle_mask_cov[circle_mask_i] = numpy.cov(each_circle_mask_points) neurons = nanshe.imp.segment.extract_neurons(image, circle_masks) fused_neurons = nanshe.imp.segment.fuse_neurons(neurons[0], neurons[1], fraction_mean_neuron_max_threshold) assert (neurons["mask"].sum(axis=0) == fused_neurons["mask"]).all() assert (neurons["image"].mean(axis=0) == fused_neurons["image"]).all() assert (numpy.array(neurons["area"].sum()) == fused_neurons["area"]) assert (fused_neurons["image"].max() == fused_neurons["max_F"]) assert (neurons["gaussian_mean"].mean(axis=0) == fused_neurons["gaussian_mean"]).all() assert (fused_neurons["centroid"] == fused_neurons["gaussian_mean"]).all() @nose.plugins.attrib.attr("3D") def test_fuse_neurons_2(self): fraction_mean_neuron_max_threshold = 0.01 image = 5 * numpy.ones((100, 100, 100)) xy = numpy.indices(image.shape) circle_centers = numpy.array([[25, 25, 25], [74, 74, 74]]) circle_radii = numpy.array([25, 25]) circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \ nanshe.util.xnumpy.expand_view(xy, reps_before=len(circle_centers)) circle_offsets_squared = circle_offsets**2 circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape)) circle_mask_mean = numpy.zeros((len(circle_masks), image.ndim,)) circle_mask_cov = numpy.zeros((len(circle_masks), image.ndim, image.ndim,)) for circle_mask_i in nanshe.util.iters.irange(len(circle_masks)): each_circle_mask_points = numpy.array(circle_masks[circle_mask_i].nonzero(), dtype=float) circle_mask_mean[circle_mask_i] = each_circle_mask_points.mean(axis=1) circle_mask_cov[circle_mask_i] = numpy.cov(each_circle_mask_points) neurons = nanshe.imp.segment.extract_neurons(image, circle_masks) fused_neurons = nanshe.imp.segment.fuse_neurons(neurons[0], neurons[1], fraction_mean_neuron_max_threshold) assert (neurons["mask"].sum(axis=0) == fused_neurons["mask"]).all() assert (neurons["image"].mean(axis=0) == fused_neurons["image"]).all() assert (numpy.array(neurons["area"].sum()) == fused_neurons["area"]) assert (fused_neurons["image"].max() == fused_neurons["max_F"]) assert (neurons["gaussian_mean"].mean(axis=0) == fused_neurons["gaussian_mean"]).all() assert (fused_neurons["centroid"] == fused_neurons["gaussian_mean"]).all() def test_merge_neuron_sets_1(self): alignment_min_threshold = 0.6 overlap_min_threshold = 0.6 fuse_neurons = {"fraction_mean_neuron_max_threshold" : 0.01} image = 5 * numpy.ones((100, 100)) xy = numpy.indices(image.shape) circle_centers = numpy.array([[25, 25], [74, 74]]) circle_radii = numpy.array([25, 25]) circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \ nanshe.util.xnumpy.expand_view(xy, reps_before=len(circle_centers)) circle_offsets_squared = circle_offsets**2 circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape)) neurons = nanshe.imp.segment.extract_neurons(image, circle_masks) merged_neurons = nanshe.imp.segment.merge_neuron_sets(neurons[:1], neurons[1:], alignment_min_threshold, overlap_min_threshold, fuse_neurons=fuse_neurons) assert (len(neurons) == len(circle_centers)) assert (neurons == merged_neurons).all() def test_merge_neuron_sets_2(self): alignment_min_threshold = 0.6 overlap_min_threshold = 0.6 fuse_neurons = {"fraction_mean_neuron_max_threshold" : 0.01} image = 5 * numpy.ones((100, 100)) xy = numpy.indices(image.shape) circle_centers = numpy.array([[25, 25]]) circle_radii = numpy.array([25]) circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \ nanshe.util.xnumpy.expand_view(xy, reps_before=len(circle_centers)) circle_offsets_squared = circle_offsets**2 circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape)) neurons = nanshe.imp.segment.extract_neurons(image, circle_masks) merged_neurons = nanshe.imp.segment.merge_neuron_sets(neurons, neurons, alignment_min_threshold, overlap_min_threshold, fuse_neurons=fuse_neurons) assert (len(neurons) == len(circle_centers)) assert (neurons == merged_neurons).all() @nose.plugins.attrib.attr("3D") def test_merge_neuron_sets_3(self): alignment_min_threshold = 0.6 overlap_min_threshold = 0.6 fuse_neurons = {"fraction_mean_neuron_max_threshold" : 0.01} image = 5 * numpy.ones((100, 100, 100)) xyz = numpy.indices(image.shape) circle_centers = numpy.array([[25, 25, 25], [74, 74, 74]]) circle_radii = numpy.array([25, 25]) circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \ nanshe.util.xnumpy.expand_view(xyz, reps_before=len(circle_centers)) circle_offsets_squared = circle_offsets**2 circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape)) neurons = nanshe.imp.segment.extract_neurons(image, circle_masks) merged_neurons = nanshe.imp.segment.merge_neuron_sets(neurons[:1], neurons[1:], alignment_min_threshold, overlap_min_threshold, fuse_neurons=fuse_neurons) assert (len(neurons) == len(circle_centers)) assert (neurons == merged_neurons).all() @nose.plugins.attrib.attr("3D") def test_merge_neuron_sets_4(self): alignment_min_threshold = 0.6 overlap_min_threshold = 0.6 fuse_neurons = {"fraction_mean_neuron_max_threshold" : 0.01} image = 5 * numpy.ones((100, 100, 100)) xyz = numpy.indices(image.shape) circle_centers = numpy.array([[25, 25, 25]]) circle_radii = numpy.array([25]) circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \ nanshe.util.xnumpy.expand_view(xyz, reps_before=len(circle_centers)) circle_offsets_squared = circle_offsets**2 circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape)) neurons = nanshe.imp.segment.extract_neurons(image, circle_masks) merged_neurons = nanshe.imp.segment.merge_neuron_sets(neurons, neurons, alignment_min_threshold, overlap_min_threshold, fuse_neurons=fuse_neurons) assert (len(neurons) == len(circle_centers)) assert (neurons == merged_neurons).all() def test_postprocess_data_1(self): config = { "wavelet_denoising" : { "remove_low_intensity_local_maxima" : { "percentage_pixels_below_max" : 0.0 }, "wavelet.transform" : { "scale" : 4 }, "accepted_region_shape_constraints" : { "major_axis_length" : { "max" : 25.0, "min" : 0.0 } }, "accepted_neuron_shape_constraints" : { "eccentricity" : { "max" : 0.9, "min" : 0.0 }, "area" : { "max" : 600, "min" : 30 } }, "estimate_noise" : { "significance_threshold" : 3.0 }, "significant_mask" : { "noise_threshold" : 3.0 }, "remove_too_close_local_maxima" : { "min_local_max_distance" : 10.0 }, "use_watershed" : True }, "merge_neuron_sets" : { "alignment_min_threshold" : 0.6, "fuse_neurons" : { "fraction_mean_neuron_max_threshold" : 0.01 }, "overlap_min_threshold" : 0.6 } } space = numpy.array([100, 100]) radii = numpy.array([7, 6, 6, 6, 7, 6]) magnitudes = numpy.array([15, 16, 15, 17, 16, 16]) points = numpy.array([[30, 24], [59, 65], [21, 65], [13, 12], [72, 16], [45, 32]]) masks = nanshe.syn.data.generate_hypersphere_masks(space, points, radii) images = nanshe.syn.data.generate_gaussian_images(space, points, radii/3.0, magnitudes) * masks bases_indices = [[1,3,4], [0,2], [5]] bases_masks = numpy.zeros((len(bases_indices),) + masks.shape[1:], dtype=masks.dtype) bases_images = numpy.zeros((len(bases_indices),) + images.shape[1:], dtype=images.dtype) for i, each_basis_indices in enumerate(bases_indices): bases_masks[i] = masks[list(each_basis_indices)].max(axis=0) bases_images[i] = images[list(each_basis_indices)].max(axis=0) neurons = nanshe.imp.segment.postprocess_data(bases_images, **config) assert (len(points) == len(neurons)) neuron_max_matches = nanshe.util.xnumpy.all_permutations_equal(neurons["max_F"], neurons["image"]) neuron_max_matches = neuron_max_matches.max(axis=0).max(axis=0) neuron_points = numpy.array(neuron_max_matches.nonzero()).T.copy() matched = dict() unmatched_points = numpy.arange(len(points)) for i in nanshe.util.iters.irange(len(neuron_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_points[i] == points[j]).all(): new_unmatched_points.append(j) else: matched[i] = j unmatched_points = new_unmatched_points assert (len(unmatched_points) == 0) def test_postprocess_data_2(self): config = { "wavelet_denoising" : { "remove_low_intensity_local_maxima" : { "percentage_pixels_below_max" : 0.0 }, "wavelet.transform" : { "scale" : 4 }, "accepted_region_shape_constraints" : { "major_axis_length" : { "max" : 25.0, "min" : 0.0 } }, "accepted_neuron_shape_constraints" : { "eccentricity" : { "max" : 0.9, "min" : 0.0 }, "area" : { "max" : 600, "min" : 30 } }, "estimate_noise" : { "significance_threshold" : 3.0 }, "significant_mask" : { "noise_threshold" : 3.0 }, "remove_too_close_local_maxima" : { "min_local_max_distance" : 10.0 }, "use_watershed" : True }, "merge_neuron_sets" : { "alignment_min_threshold" : 0.6, "fuse_neurons" : { "fraction_mean_neuron_max_threshold" : 0.01 }, "overlap_min_threshold" : 0.6 } } space = numpy.array([100, 100]) radii = numpy.array([25]) magnitudes = numpy.array([15]) points = numpy.array([[25, 25]]) masks = nanshe.syn.data.generate_hypersphere_masks(space, numpy.vstack([points, points]), numpy.hstack([radii, radii])) images = nanshe.syn.data.generate_gaussian_images(space, numpy.vstack([points, points]), numpy.hstack([radii, radii])/3.0, numpy.hstack([magnitudes, magnitudes])) * masks print(masks.shape) bases_indices = [[0], [1]] bases_masks = numpy.zeros((len(bases_indices),) + masks.shape[1:], dtype=masks.dtype) bases_images = numpy.zeros((len(bases_indices),) + images.shape[1:], dtype=images.dtype) for i, each_basis_indices in enumerate(bases_indices): bases_masks[i] = masks[list(each_basis_indices)].max(axis=0) bases_images[i] = images[list(each_basis_indices)].max(axis=0) neurons = nanshe.imp.segment.postprocess_data(bases_images, **config) assert (len(points) == len(neurons)) neuron_max_matches = nanshe.util.xnumpy.all_permutations_equal(neurons["max_F"], neurons["image"]) neuron_max_matches = neuron_max_matches.max(axis=0).max(axis=0) neuron_points = numpy.array(neuron_max_matches.nonzero()).T.copy() matched = dict() unmatched_points = numpy.arange(len(points)) for i in nanshe.util.iters.irange(len(neuron_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_points[i] == points[j]).all(): new_unmatched_points.append(j) else: matched[i] = j unmatched_points = new_unmatched_points assert (len(unmatched_points) == 0) @nose.plugins.attrib.attr("3D") def test_postprocess_data_3(self): config = { "wavelet_denoising" : { "remove_low_intensity_local_maxima" : { "percentage_pixels_below_max" : 0.0 }, "wavelet.transform" : { "scale" : 4 }, "accepted_region_shape_constraints" : { "major_axis_length" : { "max" : 30.0, "min" : 0.0 } }, "accepted_neuron_shape_constraints" : { "eccentricity" : { "max" : 0.9, "min" : 0.0 }, "area" : { "max" : 6000.0, "min" : 1000.0 } }, "estimate_noise" : { "significance_threshold" : 3.0 }, "significant_mask" : { "noise_threshold" : 3.0 }, "remove_too_close_local_maxima" : { "min_local_max_distance" : 20.0 }, "use_watershed" : True }, "merge_neuron_sets" : { "alignment_min_threshold" : 0.6, "fuse_neurons" : { "fraction_mean_neuron_max_threshold" : 0.01 }, "overlap_min_threshold" : 0.6 } } space = numpy.array([100, 100, 100]) radii = numpy.array([7, 6, 6, 6, 7, 6]) magnitudes = numpy.array([15, 16, 15, 17, 16, 16]) points = numpy.array([[30, 24, 68], [59, 65, 47], [21, 65, 21], [13, 12, 21], [72, 16, 67], [45, 32, 27]]) masks = nanshe.syn.data.generate_hypersphere_masks(space, points, radii) images = nanshe.syn.data.generate_gaussian_images(space, points, radii/3.0, magnitudes) * masks bases_indices = [[1,3,4], [0,2], [5]] bases_masks = numpy.zeros((len(bases_indices),) + masks.shape[1:], dtype=masks.dtype) bases_images = numpy.zeros((len(bases_indices),) + images.shape[1:], dtype=images.dtype) for i, each_basis_indices in enumerate(bases_indices): bases_masks[i] = masks[list(each_basis_indices)].max(axis=0) bases_images[i] = images[list(each_basis_indices)].max(axis=0) neurons = nanshe.imp.segment.postprocess_data(bases_images, **config) assert (len(points) == len(neurons)) neuron_max_matches = nanshe.util.xnumpy.all_permutations_equal(neurons["max_F"], neurons["image"]) neuron_max_matches = neuron_max_matches.max(axis=0).max(axis=0) neuron_points = numpy.array(neuron_max_matches.nonzero()).T.copy() matched = dict() unmatched_points = numpy.arange(len(points)) for i in nanshe.util.iters.irange(len(neuron_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_points[i] == points[j]).all(): new_unmatched_points.append(j) else: matched[i] = j unmatched_points = new_unmatched_points assert (len(unmatched_points) == 0) @nose.plugins.attrib.attr("3D") def test_postprocess_data_4(self): config = { "wavelet_denoising" : { "remove_low_intensity_local_maxima" : { "percentage_pixels_below_max" : 0.0 }, "wavelet.transform" : { "scale" : 4 }, "accepted_region_shape_constraints" : { "major_axis_length" : { "max" : 30.0, "min" : 0.0 } }, "accepted_neuron_shape_constraints" : { "eccentricity" : { "max" : 0.9, "min" : 0.0 }, "area" : { "max" : 70000.0, "min" : 10000.0 } }, "estimate_noise" : { "significance_threshold" : 3.0 }, "significant_mask" : { "noise_threshold" : 3.0 }, "remove_too_close_local_maxima" : { "min_local_max_distance" : 20.0 }, "use_watershed" : True }, "merge_neuron_sets" : { "alignment_min_threshold" : 0.6, "fuse_neurons" : { "fraction_mean_neuron_max_threshold" : 0.01 }, "overlap_min_threshold" : 0.6 } } space = numpy.array([100, 100, 100]) radii = numpy.array([25]) magnitudes = numpy.array([15]) points = numpy.array([[25, 25, 25]]) masks = nanshe.syn.data.generate_hypersphere_masks(space, numpy.vstack([points, points]), numpy.hstack([radii, radii])) images = nanshe.syn.data.generate_gaussian_images(space, numpy.vstack([points, points]), numpy.hstack([radii, radii])/3.0, numpy.hstack([magnitudes, magnitudes])) * masks bases_indices = [[0], [1]] bases_masks = numpy.zeros((len(bases_indices),) + masks.shape[1:], dtype=masks.dtype) bases_images = numpy.zeros((len(bases_indices),) + images.shape[1:], dtype=images.dtype) for i, each_basis_indices in enumerate(bases_indices): bases_masks[i] = masks[list(each_basis_indices)].max(axis=0) bases_images[i] = images[list(each_basis_indices)].max(axis=0) neurons = nanshe.imp.segment.postprocess_data(bases_images, **config) assert (len(points) == len(neurons)) neuron_max_matches = nanshe.util.xnumpy.all_permutations_equal(neurons["max_F"], neurons["image"]) neuron_max_matches = neuron_max_matches.max(axis=0).max(axis=0) neuron_points = numpy.array(neuron_max_matches.nonzero()).T.copy() matched = dict() unmatched_points = numpy.arange(len(points)) for i in nanshe.util.iters.irange(len(neuron_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_points[i] == points[j]).all(): new_unmatched_points.append(j) else: matched[i] = j unmatched_points = new_unmatched_points assert (len(unmatched_points) == 0)
codeparrot/github-code-clean
"""Tests for events.py.""" import functools import gc import io import os import platform import re import signal import socket try: import ssl except ImportError: ssl = None import subprocess import sys import threading import time import errno import unittest from unittest import mock import weakref import asyncio from asyncio import proactor_events from asyncio import selector_events from asyncio import sslproto from asyncio import test_utils try: from test import support except ImportError: from asyncio import test_support as support def data_file(filename): if hasattr(support, 'TEST_HOME_DIR'): fullname = os.path.join(support.TEST_HOME_DIR, filename) if os.path.isfile(fullname): return fullname fullname = os.path.join(os.path.dirname(__file__), filename) if os.path.isfile(fullname): return fullname raise FileNotFoundError(filename) def osx_tiger(): """Return True if the platform is Mac OS 10.4 or older.""" if sys.platform != 'darwin': return False version = platform.mac_ver()[0] version = tuple(map(int, version.split('.'))) return version < (10, 5) ONLYCERT = data_file('ssl_cert.pem') ONLYKEY = data_file('ssl_key.pem') SIGNED_CERTFILE = data_file('keycert3.pem') SIGNING_CA = data_file('pycacert.pem') PEERCERT = {'serialNumber': 'B09264B1F2DA21D1', 'version': 1, 'subject': ((('countryName', 'XY'),), (('localityName', 'Castle Anthrax'),), (('organizationName', 'Python Software Foundation'),), (('commonName', 'localhost'),)), 'issuer': ((('countryName', 'XY'),), (('organizationName', 'Python Software Foundation CA'),), (('commonName', 'our-ca-server'),)), 'notAfter': 'Nov 13 19:47:07 2022 GMT', 'notBefore': 'Jan 4 19:47:07 2013 GMT'} class MyBaseProto(asyncio.Protocol): connected = None done = None def __init__(self, loop=None): self.transport = None self.state = 'INITIAL' self.nbytes = 0 if loop is not None: self.connected = asyncio.Future(loop=loop) self.done = asyncio.Future(loop=loop) def connection_made(self, transport): self.transport = transport assert self.state == 'INITIAL', self.state self.state = 'CONNECTED' if self.connected: self.connected.set_result(None) def data_received(self, data): assert self.state == 'CONNECTED', self.state self.nbytes += len(data) def eof_received(self): assert self.state == 'CONNECTED', self.state self.state = 'EOF' def connection_lost(self, exc): assert self.state in ('CONNECTED', 'EOF'), self.state self.state = 'CLOSED' if self.done: self.done.set_result(None) class MyProto(MyBaseProto): def connection_made(self, transport): super().connection_made(transport) transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n') class MyDatagramProto(asyncio.DatagramProtocol): done = None def __init__(self, loop=None): self.state = 'INITIAL' self.nbytes = 0 if loop is not None: self.done = asyncio.Future(loop=loop) def connection_made(self, transport): self.transport = transport assert self.state == 'INITIAL', self.state self.state = 'INITIALIZED' def datagram_received(self, data, addr): assert self.state == 'INITIALIZED', self.state self.nbytes += len(data) def error_received(self, exc): assert self.state == 'INITIALIZED', self.state def connection_lost(self, exc): assert self.state == 'INITIALIZED', self.state self.state = 'CLOSED' if self.done: self.done.set_result(None) class MyReadPipeProto(asyncio.Protocol): done = None def __init__(self, loop=None): self.state = ['INITIAL'] self.nbytes = 0 self.transport = None if loop is not None: self.done = asyncio.Future(loop=loop) def connection_made(self, transport): self.transport = transport assert self.state == ['INITIAL'], self.state self.state.append('CONNECTED') def data_received(self, data): assert self.state == ['INITIAL', 'CONNECTED'], self.state self.nbytes += len(data) def eof_received(self): assert self.state == ['INITIAL', 'CONNECTED'], self.state self.state.append('EOF') def connection_lost(self, exc): if 'EOF' not in self.state: self.state.append('EOF') # It is okay if EOF is missed. assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state self.state.append('CLOSED') if self.done: self.done.set_result(None) class MyWritePipeProto(asyncio.BaseProtocol): done = None def __init__(self, loop=None): self.state = 'INITIAL' self.transport = None if loop is not None: self.done = asyncio.Future(loop=loop) def connection_made(self, transport): self.transport = transport assert self.state == 'INITIAL', self.state self.state = 'CONNECTED' def connection_lost(self, exc): assert self.state == 'CONNECTED', self.state self.state = 'CLOSED' if self.done: self.done.set_result(None) class MySubprocessProtocol(asyncio.SubprocessProtocol): def __init__(self, loop): self.state = 'INITIAL' self.transport = None self.connected = asyncio.Future(loop=loop) self.completed = asyncio.Future(loop=loop) self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)} self.data = {1: b'', 2: b''} self.returncode = None self.got_data = {1: asyncio.Event(loop=loop), 2: asyncio.Event(loop=loop)} def connection_made(self, transport): self.transport = transport assert self.state == 'INITIAL', self.state self.state = 'CONNECTED' self.connected.set_result(None) def connection_lost(self, exc): assert self.state == 'CONNECTED', self.state self.state = 'CLOSED' self.completed.set_result(None) def pipe_data_received(self, fd, data): assert self.state == 'CONNECTED', self.state self.data[fd] += data self.got_data[fd].set() def pipe_connection_lost(self, fd, exc): assert self.state == 'CONNECTED', self.state if exc: self.disconnects[fd].set_exception(exc) else: self.disconnects[fd].set_result(exc) def process_exited(self): assert self.state == 'CONNECTED', self.state self.returncode = self.transport.get_returncode() class EventLoopTestsMixin: def setUp(self): super().setUp() self.loop = self.create_event_loop() self.set_event_loop(self.loop) def tearDown(self): # just in case if we have transport close callbacks if not self.loop.is_closed(): test_utils.run_briefly(self.loop) self.loop.close() gc.collect() super().tearDown() def test_run_until_complete_nesting(self): @asyncio.coroutine def coro1(): yield @asyncio.coroutine def coro2(): self.assertTrue(self.loop.is_running()) self.loop.run_until_complete(coro1()) self.assertRaises( RuntimeError, self.loop.run_until_complete, coro2()) # Note: because of the default Windows timing granularity of # 15.6 msec, we use fairly long sleep times here (~100 msec). def test_run_until_complete(self): t0 = self.loop.time() self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop)) t1 = self.loop.time() self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0) def test_run_until_complete_stopped(self): @asyncio.coroutine def cb(): self.loop.stop() yield from asyncio.sleep(0.1, loop=self.loop) task = cb() self.assertRaises(RuntimeError, self.loop.run_until_complete, task) def test_call_later(self): results = [] def callback(arg): results.append(arg) self.loop.stop() self.loop.call_later(0.1, callback, 'hello world') t0 = time.monotonic() self.loop.run_forever() t1 = time.monotonic() self.assertEqual(results, ['hello world']) self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0) def test_call_soon(self): results = [] def callback(arg1, arg2): results.append((arg1, arg2)) self.loop.stop() self.loop.call_soon(callback, 'hello', 'world') self.loop.run_forever() self.assertEqual(results, [('hello', 'world')]) def test_call_soon_threadsafe(self): results = [] lock = threading.Lock() def callback(arg): results.append(arg) if len(results) >= 2: self.loop.stop() def run_in_thread(): self.loop.call_soon_threadsafe(callback, 'hello') lock.release() lock.acquire() t = threading.Thread(target=run_in_thread) t.start() with lock: self.loop.call_soon(callback, 'world') self.loop.run_forever() t.join() self.assertEqual(results, ['hello', 'world']) def test_call_soon_threadsafe_same_thread(self): results = [] def callback(arg): results.append(arg) if len(results) >= 2: self.loop.stop() self.loop.call_soon_threadsafe(callback, 'hello') self.loop.call_soon(callback, 'world') self.loop.run_forever() self.assertEqual(results, ['hello', 'world']) def test_run_in_executor(self): def run(arg): return (arg, threading.get_ident()) f2 = self.loop.run_in_executor(None, run, 'yo') res, thread_id = self.loop.run_until_complete(f2) self.assertEqual(res, 'yo') self.assertNotEqual(thread_id, threading.get_ident()) def test_reader_callback(self): r, w = test_utils.socketpair() r.setblocking(False) bytes_read = bytearray() def reader(): try: data = r.recv(1024) except BlockingIOError: # Spurious readiness notifications are possible # at least on Linux -- see man select. return if data: bytes_read.extend(data) else: self.assertTrue(self.loop.remove_reader(r.fileno())) r.close() self.loop.add_reader(r.fileno(), reader) self.loop.call_soon(w.send, b'abc') test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3) self.loop.call_soon(w.send, b'def') test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6) self.loop.call_soon(w.close) self.loop.call_soon(self.loop.stop) self.loop.run_forever() self.assertEqual(bytes_read, b'abcdef') def test_writer_callback(self): r, w = test_utils.socketpair() w.setblocking(False) def writer(data): w.send(data) self.loop.stop() data = b'x' * 1024 self.loop.add_writer(w.fileno(), writer, data) self.loop.run_forever() self.assertTrue(self.loop.remove_writer(w.fileno())) self.assertFalse(self.loop.remove_writer(w.fileno())) w.close() read = r.recv(len(data) * 2) r.close() self.assertEqual(read, data) def _basetest_sock_client_ops(self, httpd, sock): if not isinstance(self.loop, proactor_events.BaseProactorEventLoop): # in debug mode, socket operations must fail # if the socket is not in blocking mode self.loop.set_debug(True) sock.setblocking(True) with self.assertRaises(ValueError): self.loop.run_until_complete( self.loop.sock_connect(sock, httpd.address)) with self.assertRaises(ValueError): self.loop.run_until_complete( self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n')) with self.assertRaises(ValueError): self.loop.run_until_complete( self.loop.sock_recv(sock, 1024)) with self.assertRaises(ValueError): self.loop.run_until_complete( self.loop.sock_accept(sock)) # test in non-blocking mode sock.setblocking(False) self.loop.run_until_complete( self.loop.sock_connect(sock, httpd.address)) self.loop.run_until_complete( self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n')) data = self.loop.run_until_complete( self.loop.sock_recv(sock, 1024)) # consume data self.loop.run_until_complete( self.loop.sock_recv(sock, 1024)) sock.close() self.assertTrue(data.startswith(b'HTTP/1.0 200 OK')) def test_sock_client_ops(self): with test_utils.run_test_server() as httpd: sock = socket.socket() self._basetest_sock_client_ops(httpd, sock) @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_unix_sock_client_ops(self): with test_utils.run_test_unix_server() as httpd: sock = socket.socket(socket.AF_UNIX) self._basetest_sock_client_ops(httpd, sock) def test_sock_client_fail(self): # Make sure that we will get an unused port address = None try: s = socket.socket() s.bind(('127.0.0.1', 0)) address = s.getsockname() finally: s.close() sock = socket.socket() sock.setblocking(False) with self.assertRaises(ConnectionRefusedError): self.loop.run_until_complete( self.loop.sock_connect(sock, address)) sock.close() def test_sock_accept(self): listener = socket.socket() listener.setblocking(False) listener.bind(('127.0.0.1', 0)) listener.listen(1) client = socket.socket() client.connect(listener.getsockname()) f = self.loop.sock_accept(listener) conn, addr = self.loop.run_until_complete(f) self.assertEqual(conn.gettimeout(), 0) self.assertEqual(addr, client.getsockname()) self.assertEqual(client.getpeername(), listener.getsockname()) client.close() conn.close() listener.close() @unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL') def test_add_signal_handler(self): caught = 0 def my_handler(): nonlocal caught caught += 1 # Check error behavior first. self.assertRaises( TypeError, self.loop.add_signal_handler, 'boom', my_handler) self.assertRaises( TypeError, self.loop.remove_signal_handler, 'boom') self.assertRaises( ValueError, self.loop.add_signal_handler, signal.NSIG+1, my_handler) self.assertRaises( ValueError, self.loop.remove_signal_handler, signal.NSIG+1) self.assertRaises( ValueError, self.loop.add_signal_handler, 0, my_handler) self.assertRaises( ValueError, self.loop.remove_signal_handler, 0) self.assertRaises( ValueError, self.loop.add_signal_handler, -1, my_handler) self.assertRaises( ValueError, self.loop.remove_signal_handler, -1) self.assertRaises( RuntimeError, self.loop.add_signal_handler, signal.SIGKILL, my_handler) # Removing SIGKILL doesn't raise, since we don't call signal(). self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL)) # Now set a handler and handle it. self.loop.add_signal_handler(signal.SIGINT, my_handler) os.kill(os.getpid(), signal.SIGINT) test_utils.run_until(self.loop, lambda: caught) # Removing it should restore the default handler. self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT)) self.assertEqual(signal.getsignal(signal.SIGINT), signal.default_int_handler) # Removing again returns False. self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT)) @unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM') def test_signal_handling_while_selecting(self): # Test with a signal actually arriving during a select() call. caught = 0 def my_handler(): nonlocal caught caught += 1 self.loop.stop() self.loop.add_signal_handler(signal.SIGALRM, my_handler) signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once. self.loop.run_forever() self.assertEqual(caught, 1) @unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM') def test_signal_handling_args(self): some_args = (42,) caught = 0 def my_handler(*args): nonlocal caught caught += 1 self.assertEqual(args, some_args) self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args) signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once. self.loop.call_later(0.5, self.loop.stop) self.loop.run_forever() self.assertEqual(caught, 1) def _basetest_create_connection(self, connection_fut, check_sockname=True): tr, pr = self.loop.run_until_complete(connection_fut) self.assertIsInstance(tr, asyncio.Transport) self.assertIsInstance(pr, asyncio.Protocol) self.assertIs(pr.transport, tr) if check_sockname: self.assertIsNotNone(tr.get_extra_info('sockname')) self.loop.run_until_complete(pr.done) self.assertGreater(pr.nbytes, 0) tr.close() def test_create_connection(self): with test_utils.run_test_server() as httpd: conn_fut = self.loop.create_connection( lambda: MyProto(loop=self.loop), *httpd.address) self._basetest_create_connection(conn_fut) @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_create_unix_connection(self): # Issue #20682: On Mac OS X Tiger, getsockname() returns a # zero-length address for UNIX socket. check_sockname = not osx_tiger() with test_utils.run_test_unix_server() as httpd: conn_fut = self.loop.create_unix_connection( lambda: MyProto(loop=self.loop), httpd.address) self._basetest_create_connection(conn_fut, check_sockname) def test_create_connection_sock(self): with test_utils.run_test_server() as httpd: sock = None infos = self.loop.run_until_complete( self.loop.getaddrinfo( *httpd.address, type=socket.SOCK_STREAM)) for family, type, proto, cname, address in infos: try: sock = socket.socket(family=family, type=type, proto=proto) sock.setblocking(False) self.loop.run_until_complete( self.loop.sock_connect(sock, address)) except: pass else: break else: assert False, 'Can not create socket.' f = self.loop.create_connection( lambda: MyProto(loop=self.loop), sock=sock) tr, pr = self.loop.run_until_complete(f) self.assertIsInstance(tr, asyncio.Transport) self.assertIsInstance(pr, asyncio.Protocol) self.loop.run_until_complete(pr.done) self.assertGreater(pr.nbytes, 0) tr.close() def check_ssl_extra_info(self, client, check_sockname=True, peername=None, peercert={}): if check_sockname: self.assertIsNotNone(client.get_extra_info('sockname')) if peername: self.assertEqual(peername, client.get_extra_info('peername')) else: self.assertIsNotNone(client.get_extra_info('peername')) self.assertEqual(peercert, client.get_extra_info('peercert')) # test SSL cipher cipher = client.get_extra_info('cipher') self.assertIsInstance(cipher, tuple) self.assertEqual(len(cipher), 3, cipher) self.assertIsInstance(cipher[0], str) self.assertIsInstance(cipher[1], str) self.assertIsInstance(cipher[2], int) # test SSL object sslobj = client.get_extra_info('ssl_object') self.assertIsNotNone(sslobj) self.assertEqual(sslobj.compression(), client.get_extra_info('compression')) self.assertEqual(sslobj.cipher(), client.get_extra_info('cipher')) self.assertEqual(sslobj.getpeercert(), client.get_extra_info('peercert')) self.assertEqual(sslobj.compression(), client.get_extra_info('compression')) def _basetest_create_ssl_connection(self, connection_fut, check_sockname=True, peername=None): tr, pr = self.loop.run_until_complete(connection_fut) self.assertIsInstance(tr, asyncio.Transport) self.assertIsInstance(pr, asyncio.Protocol) self.assertTrue('ssl' in tr.__class__.__name__.lower()) self.check_ssl_extra_info(tr, check_sockname, peername) self.loop.run_until_complete(pr.done) self.assertGreater(pr.nbytes, 0) tr.close() def _test_create_ssl_connection(self, httpd, create_connection, check_sockname=True, peername=None): conn_fut = create_connection(ssl=test_utils.dummy_ssl_context()) self._basetest_create_ssl_connection(conn_fut, check_sockname, peername) # ssl.Purpose was introduced in Python 3.4 if hasattr(ssl, 'Purpose'): def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *, cafile=None, capath=None, cadata=None): """ A ssl.create_default_context() replacement that doesn't enable cert validation. """ self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH) return test_utils.dummy_ssl_context() # With ssl=True, ssl.create_default_context() should be called with mock.patch('ssl.create_default_context', side_effect=_dummy_ssl_create_context) as m: conn_fut = create_connection(ssl=True) self._basetest_create_ssl_connection(conn_fut, check_sockname, peername) self.assertEqual(m.call_count, 1) # With the real ssl.create_default_context(), certificate # validation will fail with self.assertRaises(ssl.SSLError) as cm: conn_fut = create_connection(ssl=True) # Ignore the "SSL handshake failed" log in debug mode with test_utils.disable_logger(): self._basetest_create_ssl_connection(conn_fut, check_sockname, peername) self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED') @unittest.skipIf(ssl is None, 'No ssl module') def test_create_ssl_connection(self): with test_utils.run_test_server(use_ssl=True) as httpd: create_connection = functools.partial( self.loop.create_connection, lambda: MyProto(loop=self.loop), *httpd.address) self._test_create_ssl_connection(httpd, create_connection, peername=httpd.address) def test_legacy_create_ssl_connection(self): with test_utils.force_legacy_ssl_support(): self.test_create_ssl_connection() @unittest.skipIf(ssl is None, 'No ssl module') @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_create_ssl_unix_connection(self): # Issue #20682: On Mac OS X Tiger, getsockname() returns a # zero-length address for UNIX socket. check_sockname = not osx_tiger() with test_utils.run_test_unix_server(use_ssl=True) as httpd: create_connection = functools.partial( self.loop.create_unix_connection, lambda: MyProto(loop=self.loop), httpd.address, server_hostname='127.0.0.1') self._test_create_ssl_connection(httpd, create_connection, check_sockname, peername=httpd.address) def test_legacy_create_ssl_unix_connection(self): with test_utils.force_legacy_ssl_support(): self.test_create_ssl_unix_connection() def test_create_connection_local_addr(self): with test_utils.run_test_server() as httpd: port = support.find_unused_port() f = self.loop.create_connection( lambda: MyProto(loop=self.loop), *httpd.address, local_addr=(httpd.address[0], port)) tr, pr = self.loop.run_until_complete(f) expected = pr.transport.get_extra_info('sockname')[1] self.assertEqual(port, expected) tr.close() def test_create_connection_local_addr_in_use(self): with test_utils.run_test_server() as httpd: f = self.loop.create_connection( lambda: MyProto(loop=self.loop), *httpd.address, local_addr=httpd.address) with self.assertRaises(OSError) as cm: self.loop.run_until_complete(f) self.assertEqual(cm.exception.errno, errno.EADDRINUSE) self.assertIn(str(httpd.address), cm.exception.strerror) @mock.patch('asyncio.base_events.socket') def create_server_multiple_hosts(self, family, hosts, mock_sock): @asyncio.coroutine def getaddrinfo(host, port, *args, **kw): if family == socket.AF_INET: return [[family, socket.SOCK_STREAM, 6, '', (host, port)]] else: return [[family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0)]] def getaddrinfo_task(*args, **kwds): return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop) if family == socket.AF_INET: mock_sock.socket().getsockbyname.side_effect = [(host, 80) for host in hosts] else: mock_sock.socket().getsockbyname.side_effect = [(host, 80, 0, 0) for host in hosts] self.loop.getaddrinfo = getaddrinfo_task self.loop._start_serving = mock.Mock() self.loop._stop_serving = mock.Mock() f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80) server = self.loop.run_until_complete(f) self.addCleanup(server.close) server_hosts = [sock.getsockbyname()[0] for sock in server.sockets] self.assertEqual(server_hosts, hosts) def test_create_server_multiple_hosts_ipv4(self): self.create_server_multiple_hosts(socket.AF_INET, ['1.2.3.4', '5.6.7.8']) def test_create_server_multiple_hosts_ipv6(self): self.create_server_multiple_hosts(socket.AF_INET6, ['::1', '::2']) def test_create_server(self): proto = MyProto(self.loop) f = self.loop.create_server(lambda: proto, '0.0.0.0', 0) server = self.loop.run_until_complete(f) self.assertEqual(len(server.sockets), 1) sock = server.sockets[0] host, port = sock.getsockname() self.assertEqual(host, '0.0.0.0') client = socket.socket() client.connect(('127.0.0.1', port)) client.sendall(b'xxx') self.loop.run_until_complete(proto.connected) self.assertEqual('CONNECTED', proto.state) test_utils.run_until(self.loop, lambda: proto.nbytes > 0) self.assertEqual(3, proto.nbytes) # extra info is available self.assertIsNotNone(proto.transport.get_extra_info('sockname')) self.assertEqual('127.0.0.1', proto.transport.get_extra_info('peername')[0]) # close connection proto.transport.close() self.loop.run_until_complete(proto.done) self.assertEqual('CLOSED', proto.state) # the client socket must be closed after to avoid ECONNRESET upon # recv()/send() on the serving socket client.close() # close server server.close() @unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT') def test_create_server_reuse_port(self): proto = MyProto(self.loop) f = self.loop.create_server( lambda: proto, '0.0.0.0', 0) server = self.loop.run_until_complete(f) self.assertEqual(len(server.sockets), 1) sock = server.sockets[0] self.assertFalse( sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEPORT)) server.close() test_utils.run_briefly(self.loop) proto = MyProto(self.loop) f = self.loop.create_server( lambda: proto, '0.0.0.0', 0, reuse_port=True) server = self.loop.run_until_complete(f) self.assertEqual(len(server.sockets), 1) sock = server.sockets[0] self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEPORT)) server.close() def _make_unix_server(self, factory, **kwargs): path = test_utils.gen_unix_socket_path() self.addCleanup(lambda: os.path.exists(path) and os.unlink(path)) f = self.loop.create_unix_server(factory, path, **kwargs) server = self.loop.run_until_complete(f) return server, path @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_create_unix_server(self): proto = MyProto(loop=self.loop) server, path = self._make_unix_server(lambda: proto) self.assertEqual(len(server.sockets), 1) client = socket.socket(socket.AF_UNIX) client.connect(path) client.sendall(b'xxx') self.loop.run_until_complete(proto.connected) self.assertEqual('CONNECTED', proto.state) test_utils.run_until(self.loop, lambda: proto.nbytes > 0) self.assertEqual(3, proto.nbytes) # close connection proto.transport.close() self.loop.run_until_complete(proto.done) self.assertEqual('CLOSED', proto.state) # the client socket must be closed after to avoid ECONNRESET upon # recv()/send() on the serving socket client.close() # close server server.close() @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_create_unix_server_path_socket_error(self): proto = MyProto(loop=self.loop) sock = socket.socket() with sock: f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock) with self.assertRaisesRegex(ValueError, 'path and sock can not be specified ' 'at the same time'): self.loop.run_until_complete(f) def _create_ssl_context(self, certfile, keyfile=None): sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslcontext.options |= ssl.OP_NO_SSLv2 sslcontext.load_cert_chain(certfile, keyfile) return sslcontext def _make_ssl_server(self, factory, certfile, keyfile=None): sslcontext = self._create_ssl_context(certfile, keyfile) f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext) server = self.loop.run_until_complete(f) sock = server.sockets[0] host, port = sock.getsockname() self.assertEqual(host, '127.0.0.1') return server, host, port def _make_ssl_unix_server(self, factory, certfile, keyfile=None): sslcontext = self._create_ssl_context(certfile, keyfile) return self._make_unix_server(factory, ssl=sslcontext) @unittest.skipIf(ssl is None, 'No ssl module') def test_create_server_ssl(self): proto = MyProto(loop=self.loop) server, host, port = self._make_ssl_server( lambda: proto, ONLYCERT, ONLYKEY) f_c = self.loop.create_connection(MyBaseProto, host, port, ssl=test_utils.dummy_ssl_context()) client, pr = self.loop.run_until_complete(f_c) client.write(b'xxx') self.loop.run_until_complete(proto.connected) self.assertEqual('CONNECTED', proto.state) test_utils.run_until(self.loop, lambda: proto.nbytes > 0) self.assertEqual(3, proto.nbytes) # extra info is available self.check_ssl_extra_info(client, peername=(host, port)) # close connection proto.transport.close() self.loop.run_until_complete(proto.done) self.assertEqual('CLOSED', proto.state) # the client socket must be closed after to avoid ECONNRESET upon # recv()/send() on the serving socket client.close() # stop serving server.close() def test_legacy_create_server_ssl(self): with test_utils.force_legacy_ssl_support(): self.test_create_server_ssl() @unittest.skipIf(ssl is None, 'No ssl module') @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_create_unix_server_ssl(self): proto = MyProto(loop=self.loop) server, path = self._make_ssl_unix_server( lambda: proto, ONLYCERT, ONLYKEY) f_c = self.loop.create_unix_connection( MyBaseProto, path, ssl=test_utils.dummy_ssl_context(), server_hostname='') client, pr = self.loop.run_until_complete(f_c) client.write(b'xxx') self.loop.run_until_complete(proto.connected) self.assertEqual('CONNECTED', proto.state) test_utils.run_until(self.loop, lambda: proto.nbytes > 0) self.assertEqual(3, proto.nbytes) # close connection proto.transport.close() self.loop.run_until_complete(proto.done) self.assertEqual('CLOSED', proto.state) # the client socket must be closed after to avoid ECONNRESET upon # recv()/send() on the serving socket client.close() # stop serving server.close() def test_legacy_create_unix_server_ssl(self): with test_utils.force_legacy_ssl_support(): self.test_create_unix_server_ssl() @unittest.skipIf(ssl is None, 'No ssl module') def test_create_server_ssl_verify_failed(self): proto = MyProto(loop=self.loop) server, host, port = self._make_ssl_server( lambda: proto, SIGNED_CERTFILE) sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslcontext_client.options |= ssl.OP_NO_SSLv2 sslcontext_client.verify_mode = ssl.CERT_REQUIRED if hasattr(sslcontext_client, 'check_hostname'): sslcontext_client.check_hostname = True # no CA loaded f_c = self.loop.create_connection(MyProto, host, port, ssl=sslcontext_client) with mock.patch.object(self.loop, 'call_exception_handler'): with test_utils.disable_logger(): with self.assertRaisesRegex(ssl.SSLError, 'certificate verify failed '): self.loop.run_until_complete(f_c) # execute the loop to log the connection error test_utils.run_briefly(self.loop) # close connection self.assertIsNone(proto.transport) server.close() def test_legacy_create_server_ssl_verify_failed(self): with test_utils.force_legacy_ssl_support(): self.test_create_server_ssl_verify_failed() @unittest.skipIf(ssl is None, 'No ssl module') @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_create_unix_server_ssl_verify_failed(self): proto = MyProto(loop=self.loop) server, path = self._make_ssl_unix_server( lambda: proto, SIGNED_CERTFILE) sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslcontext_client.options |= ssl.OP_NO_SSLv2 sslcontext_client.verify_mode = ssl.CERT_REQUIRED if hasattr(sslcontext_client, 'check_hostname'): sslcontext_client.check_hostname = True # no CA loaded f_c = self.loop.create_unix_connection(MyProto, path, ssl=sslcontext_client, server_hostname='invalid') with mock.patch.object(self.loop, 'call_exception_handler'): with test_utils.disable_logger(): with self.assertRaisesRegex(ssl.SSLError, 'certificate verify failed '): self.loop.run_until_complete(f_c) # execute the loop to log the connection error test_utils.run_briefly(self.loop) # close connection self.assertIsNone(proto.transport) server.close() def test_legacy_create_unix_server_ssl_verify_failed(self): with test_utils.force_legacy_ssl_support(): self.test_create_unix_server_ssl_verify_failed() @unittest.skipIf(ssl is None, 'No ssl module') def test_create_server_ssl_match_failed(self): proto = MyProto(loop=self.loop) server, host, port = self._make_ssl_server( lambda: proto, SIGNED_CERTFILE) sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslcontext_client.options |= ssl.OP_NO_SSLv2 sslcontext_client.verify_mode = ssl.CERT_REQUIRED sslcontext_client.load_verify_locations( cafile=SIGNING_CA) if hasattr(sslcontext_client, 'check_hostname'): sslcontext_client.check_hostname = True # incorrect server_hostname f_c = self.loop.create_connection(MyProto, host, port, ssl=sslcontext_client) with mock.patch.object(self.loop, 'call_exception_handler'): with test_utils.disable_logger(): with self.assertRaisesRegex( ssl.CertificateError, "hostname '127.0.0.1' doesn't match 'localhost'"): self.loop.run_until_complete(f_c) # close connection proto.transport.close() server.close() def test_legacy_create_server_ssl_match_failed(self): with test_utils.force_legacy_ssl_support(): self.test_create_server_ssl_match_failed() @unittest.skipIf(ssl is None, 'No ssl module') @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_create_unix_server_ssl_verified(self): proto = MyProto(loop=self.loop) server, path = self._make_ssl_unix_server( lambda: proto, SIGNED_CERTFILE) sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslcontext_client.options |= ssl.OP_NO_SSLv2 sslcontext_client.verify_mode = ssl.CERT_REQUIRED sslcontext_client.load_verify_locations(cafile=SIGNING_CA) if hasattr(sslcontext_client, 'check_hostname'): sslcontext_client.check_hostname = True # Connection succeeds with correct CA and server hostname. f_c = self.loop.create_unix_connection(MyProto, path, ssl=sslcontext_client, server_hostname='localhost') client, pr = self.loop.run_until_complete(f_c) # close connection proto.transport.close() client.close() server.close() self.loop.run_until_complete(proto.done) def test_legacy_create_unix_server_ssl_verified(self): with test_utils.force_legacy_ssl_support(): self.test_create_unix_server_ssl_verified() @unittest.skipIf(ssl is None, 'No ssl module') def test_create_server_ssl_verified(self): proto = MyProto(loop=self.loop) server, host, port = self._make_ssl_server( lambda: proto, SIGNED_CERTFILE) sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslcontext_client.options |= ssl.OP_NO_SSLv2 sslcontext_client.verify_mode = ssl.CERT_REQUIRED sslcontext_client.load_verify_locations(cafile=SIGNING_CA) if hasattr(sslcontext_client, 'check_hostname'): sslcontext_client.check_hostname = True # Connection succeeds with correct CA and server hostname. f_c = self.loop.create_connection(MyProto, host, port, ssl=sslcontext_client, server_hostname='localhost') client, pr = self.loop.run_until_complete(f_c) # extra info is available self.check_ssl_extra_info(client,peername=(host, port), peercert=PEERCERT) # close connection proto.transport.close() client.close() server.close() self.loop.run_until_complete(proto.done) def test_legacy_create_server_ssl_verified(self): with test_utils.force_legacy_ssl_support(): self.test_create_server_ssl_verified() def test_create_server_sock(self): proto = asyncio.Future(loop=self.loop) class TestMyProto(MyProto): def connection_made(self, transport): super().connection_made(transport) proto.set_result(self) sock_ob = socket.socket(type=socket.SOCK_STREAM) sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock_ob.bind(('0.0.0.0', 0)) f = self.loop.create_server(TestMyProto, sock=sock_ob) server = self.loop.run_until_complete(f) sock = server.sockets[0] self.assertIs(sock, sock_ob) host, port = sock.getsockname() self.assertEqual(host, '0.0.0.0') client = socket.socket() client.connect(('127.0.0.1', port)) client.send(b'xxx') client.close() server.close() def test_create_server_addr_in_use(self): sock_ob = socket.socket(type=socket.SOCK_STREAM) sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock_ob.bind(('0.0.0.0', 0)) f = self.loop.create_server(MyProto, sock=sock_ob) server = self.loop.run_until_complete(f) sock = server.sockets[0] host, port = sock.getsockname() f = self.loop.create_server(MyProto, host=host, port=port) with self.assertRaises(OSError) as cm: self.loop.run_until_complete(f) self.assertEqual(cm.exception.errno, errno.EADDRINUSE) server.close() @unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled') def test_create_server_dual_stack(self): f_proto = asyncio.Future(loop=self.loop) class TestMyProto(MyProto): def connection_made(self, transport): super().connection_made(transport) f_proto.set_result(self) try_count = 0 while True: try: port = support.find_unused_port() f = self.loop.create_server(TestMyProto, host=None, port=port) server = self.loop.run_until_complete(f) except OSError as ex: if ex.errno == errno.EADDRINUSE: try_count += 1 self.assertGreaterEqual(5, try_count) continue else: raise else: break client = socket.socket() client.connect(('127.0.0.1', port)) client.send(b'xxx') proto = self.loop.run_until_complete(f_proto) proto.transport.close() client.close() f_proto = asyncio.Future(loop=self.loop) client = socket.socket(socket.AF_INET6) client.connect(('::1', port)) client.send(b'xxx') proto = self.loop.run_until_complete(f_proto) proto.transport.close() client.close() server.close() def test_server_close(self): f = self.loop.create_server(MyProto, '0.0.0.0', 0) server = self.loop.run_until_complete(f) sock = server.sockets[0] host, port = sock.getsockname() client = socket.socket() client.connect(('127.0.0.1', port)) client.send(b'xxx') client.close() server.close() client = socket.socket() self.assertRaises( ConnectionRefusedError, client.connect, ('127.0.0.1', port)) client.close() def test_create_datagram_endpoint(self): class TestMyDatagramProto(MyDatagramProto): def __init__(inner_self): super().__init__(loop=self.loop) def datagram_received(self, data, addr): super().datagram_received(data, addr) self.transport.sendto(b'resp:'+data, addr) coro = self.loop.create_datagram_endpoint( TestMyDatagramProto, local_addr=('127.0.0.1', 0)) s_transport, server = self.loop.run_until_complete(coro) host, port = s_transport.get_extra_info('sockname') self.assertIsInstance(s_transport, asyncio.Transport) self.assertIsInstance(server, TestMyDatagramProto) self.assertEqual('INITIALIZED', server.state) self.assertIs(server.transport, s_transport) coro = self.loop.create_datagram_endpoint( lambda: MyDatagramProto(loop=self.loop), remote_addr=(host, port)) transport, client = self.loop.run_until_complete(coro) self.assertIsInstance(transport, asyncio.Transport) self.assertIsInstance(client, MyDatagramProto) self.assertEqual('INITIALIZED', client.state) self.assertIs(client.transport, transport) transport.sendto(b'xxx') test_utils.run_until(self.loop, lambda: server.nbytes) self.assertEqual(3, server.nbytes) test_utils.run_until(self.loop, lambda: client.nbytes) # received self.assertEqual(8, client.nbytes) # extra info is available self.assertIsNotNone(transport.get_extra_info('sockname')) # close connection transport.close() self.loop.run_until_complete(client.done) self.assertEqual('CLOSED', client.state) server.transport.close() def test_create_datagram_endpoint_sock(self): sock = None local_address = ('127.0.0.1', 0) infos = self.loop.run_until_complete( self.loop.getaddrinfo( *local_address, type=socket.SOCK_DGRAM)) for family, type, proto, cname, address in infos: try: sock = socket.socket(family=family, type=type, proto=proto) sock.setblocking(False) sock.bind(address) except: pass else: break else: assert False, 'Can not create socket.' f = self.loop.create_connection( lambda: MyDatagramProto(loop=self.loop), sock=sock) tr, pr = self.loop.run_until_complete(f) self.assertIsInstance(tr, asyncio.Transport) self.assertIsInstance(pr, MyDatagramProto) tr.close() self.loop.run_until_complete(pr.done) def test_internal_fds(self): loop = self.create_event_loop() if not isinstance(loop, selector_events.BaseSelectorEventLoop): loop.close() self.skipTest('loop is not a BaseSelectorEventLoop') self.assertEqual(1, loop._internal_fds) loop.close() self.assertEqual(0, loop._internal_fds) self.assertIsNone(loop._csock) self.assertIsNone(loop._ssock) @unittest.skipUnless(sys.platform != 'win32', "Don't support pipes for Windows") def test_read_pipe(self): proto = MyReadPipeProto(loop=self.loop) rpipe, wpipe = os.pipe() pipeobj = io.open(rpipe, 'rb', 1024) @asyncio.coroutine def connect(): t, p = yield from self.loop.connect_read_pipe( lambda: proto, pipeobj) self.assertIs(p, proto) self.assertIs(t, proto.transport) self.assertEqual(['INITIAL', 'CONNECTED'], proto.state) self.assertEqual(0, proto.nbytes) self.loop.run_until_complete(connect()) os.write(wpipe, b'1') test_utils.run_until(self.loop, lambda: proto.nbytes >= 1) self.assertEqual(1, proto.nbytes) os.write(wpipe, b'2345') test_utils.run_until(self.loop, lambda: proto.nbytes >= 5) self.assertEqual(['INITIAL', 'CONNECTED'], proto.state) self.assertEqual(5, proto.nbytes) os.close(wpipe) self.loop.run_until_complete(proto.done) self.assertEqual( ['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state) # extra info is available self.assertIsNotNone(proto.transport.get_extra_info('pipe')) @unittest.skipUnless(sys.platform != 'win32', "Don't support pipes for Windows") # select, poll and kqueue don't support character devices (PTY) on Mac OS X # older than 10.6 (Snow Leopard) @support.requires_mac_ver(10, 6) # Issue #20495: The test hangs on FreeBSD 7.2 but pass on FreeBSD 9 @support.requires_freebsd_version(8) def test_read_pty_output(self): proto = MyReadPipeProto(loop=self.loop) master, slave = os.openpty() master_read_obj = io.open(master, 'rb', 0) @asyncio.coroutine def connect(): t, p = yield from self.loop.connect_read_pipe(lambda: proto, master_read_obj) self.assertIs(p, proto) self.assertIs(t, proto.transport) self.assertEqual(['INITIAL', 'CONNECTED'], proto.state) self.assertEqual(0, proto.nbytes) self.loop.run_until_complete(connect()) os.write(slave, b'1') test_utils.run_until(self.loop, lambda: proto.nbytes) self.assertEqual(1, proto.nbytes) os.write(slave, b'2345') test_utils.run_until(self.loop, lambda: proto.nbytes >= 5) self.assertEqual(['INITIAL', 'CONNECTED'], proto.state) self.assertEqual(5, proto.nbytes) os.close(slave) self.loop.run_until_complete(proto.done) self.assertEqual( ['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state) # extra info is available self.assertIsNotNone(proto.transport.get_extra_info('pipe')) @unittest.skipUnless(sys.platform != 'win32', "Don't support pipes for Windows") def test_write_pipe(self): rpipe, wpipe = os.pipe() pipeobj = io.open(wpipe, 'wb', 1024) proto = MyWritePipeProto(loop=self.loop) connect = self.loop.connect_write_pipe(lambda: proto, pipeobj) transport, p = self.loop.run_until_complete(connect) self.assertIs(p, proto) self.assertIs(transport, proto.transport) self.assertEqual('CONNECTED', proto.state) transport.write(b'1') data = bytearray() def reader(data): chunk = os.read(rpipe, 1024) data += chunk return len(data) test_utils.run_until(self.loop, lambda: reader(data) >= 1) self.assertEqual(b'1', data) transport.write(b'2345') test_utils.run_until(self.loop, lambda: reader(data) >= 5) self.assertEqual(b'12345', data) self.assertEqual('CONNECTED', proto.state) os.close(rpipe) # extra info is available self.assertIsNotNone(proto.transport.get_extra_info('pipe')) # close connection proto.transport.close() self.loop.run_until_complete(proto.done) self.assertEqual('CLOSED', proto.state) @unittest.skipUnless(sys.platform != 'win32', "Don't support pipes for Windows") def test_write_pipe_disconnect_on_close(self): rsock, wsock = test_utils.socketpair() rsock.setblocking(False) pipeobj = io.open(wsock.detach(), 'wb', 1024) proto = MyWritePipeProto(loop=self.loop) connect = self.loop.connect_write_pipe(lambda: proto, pipeobj) transport, p = self.loop.run_until_complete(connect) self.assertIs(p, proto) self.assertIs(transport, proto.transport) self.assertEqual('CONNECTED', proto.state) transport.write(b'1') data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024)) self.assertEqual(b'1', data) rsock.close() self.loop.run_until_complete(proto.done) self.assertEqual('CLOSED', proto.state) @unittest.skipUnless(sys.platform != 'win32', "Don't support pipes for Windows") # select, poll and kqueue don't support character devices (PTY) on Mac OS X # older than 10.6 (Snow Leopard) @support.requires_mac_ver(10, 6) def test_write_pty(self): master, slave = os.openpty() slave_write_obj = io.open(slave, 'wb', 0) proto = MyWritePipeProto(loop=self.loop) connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj) transport, p = self.loop.run_until_complete(connect) self.assertIs(p, proto) self.assertIs(transport, proto.transport) self.assertEqual('CONNECTED', proto.state) transport.write(b'1') data = bytearray() def reader(data): chunk = os.read(master, 1024) data += chunk return len(data) test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10) self.assertEqual(b'1', data) transport.write(b'2345') test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10) self.assertEqual(b'12345', data) self.assertEqual('CONNECTED', proto.state) os.close(master) # extra info is available self.assertIsNotNone(proto.transport.get_extra_info('pipe')) # close connection proto.transport.close() self.loop.run_until_complete(proto.done) self.assertEqual('CLOSED', proto.state) def test_prompt_cancellation(self): r, w = test_utils.socketpair() r.setblocking(False) f = self.loop.sock_recv(r, 1) ov = getattr(f, 'ov', None) if ov is not None: self.assertTrue(ov.pending) @asyncio.coroutine def main(): try: self.loop.call_soon(f.cancel) yield from f except asyncio.CancelledError: res = 'cancelled' else: res = None finally: self.loop.stop() return res start = time.monotonic() t = asyncio.Task(main(), loop=self.loop) self.loop.run_forever() elapsed = time.monotonic() - start self.assertLess(elapsed, 0.1) self.assertEqual(t.result(), 'cancelled') self.assertRaises(asyncio.CancelledError, f.result) if ov is not None: self.assertFalse(ov.pending) self.loop._stop_serving(r) r.close() w.close() def test_timeout_rounding(self): def _run_once(): self.loop._run_once_counter += 1 orig_run_once() orig_run_once = self.loop._run_once self.loop._run_once_counter = 0 self.loop._run_once = _run_once @asyncio.coroutine def wait(): loop = self.loop yield from asyncio.sleep(1e-2, loop=loop) yield from asyncio.sleep(1e-4, loop=loop) yield from asyncio.sleep(1e-6, loop=loop) yield from asyncio.sleep(1e-8, loop=loop) yield from asyncio.sleep(1e-10, loop=loop) self.loop.run_until_complete(wait()) # The ideal number of call is 12, but on some platforms, the selector # may sleep at little bit less than timeout depending on the resolution # of the clock used by the kernel. Tolerate a few useless calls on # these platforms. self.assertLessEqual(self.loop._run_once_counter, 20, {'clock_resolution': self.loop._clock_resolution, 'selector': self.loop._selector.__class__.__name__}) def test_sock_connect_address(self): # In debug mode, sock_connect() must ensure that the address is already # resolved (call _check_resolved_address()) self.loop.set_debug(True) addresses = [(socket.AF_INET, ('www.python.org', 80))] if support.IPV6_ENABLED: addresses.extend(( (socket.AF_INET6, ('www.python.org', 80)), (socket.AF_INET6, ('www.python.org', 80, 0, 0)), )) for family, address in addresses: for sock_type in (socket.SOCK_STREAM, socket.SOCK_DGRAM): sock = socket.socket(family, sock_type) with sock: sock.setblocking(False) connect = self.loop.sock_connect(sock, address) with self.assertRaises(ValueError) as cm: self.loop.run_until_complete(connect) self.assertIn('address must be resolved', str(cm.exception)) def test_remove_fds_after_closing(self): loop = self.create_event_loop() callback = lambda: None r, w = test_utils.socketpair() self.addCleanup(r.close) self.addCleanup(w.close) loop.add_reader(r, callback) loop.add_writer(w, callback) loop.close() self.assertFalse(loop.remove_reader(r)) self.assertFalse(loop.remove_writer(w)) def test_add_fds_after_closing(self): loop = self.create_event_loop() callback = lambda: None r, w = test_utils.socketpair() self.addCleanup(r.close) self.addCleanup(w.close) loop.close() with self.assertRaises(RuntimeError): loop.add_reader(r, callback) with self.assertRaises(RuntimeError): loop.add_writer(w, callback) def test_close_running_event_loop(self): @asyncio.coroutine def close_loop(loop): self.loop.close() coro = close_loop(self.loop) with self.assertRaises(RuntimeError): self.loop.run_until_complete(coro) def test_close(self): self.loop.close() @asyncio.coroutine def test(): pass func = lambda: False coro = test() self.addCleanup(coro.close) # operation blocked when the loop is closed with self.assertRaises(RuntimeError): self.loop.run_forever() with self.assertRaises(RuntimeError): fut = asyncio.Future(loop=self.loop) self.loop.run_until_complete(fut) with self.assertRaises(RuntimeError): self.loop.call_soon(func) with self.assertRaises(RuntimeError): self.loop.call_soon_threadsafe(func) with self.assertRaises(RuntimeError): self.loop.call_later(1.0, func) with self.assertRaises(RuntimeError): self.loop.call_at(self.loop.time() + .0, func) with self.assertRaises(RuntimeError): self.loop.run_in_executor(None, func) with self.assertRaises(RuntimeError): self.loop.create_task(coro) with self.assertRaises(RuntimeError): self.loop.add_signal_handler(signal.SIGTERM, func) class SubprocessTestsMixin: def check_terminated(self, returncode): if sys.platform == 'win32': self.assertIsInstance(returncode, int) # expect 1 but sometimes get 0 else: self.assertEqual(-signal.SIGTERM, returncode) def check_killed(self, returncode): if sys.platform == 'win32': self.assertIsInstance(returncode, int) # expect 1 but sometimes get 0 else: self.assertEqual(-signal.SIGKILL, returncode) def test_subprocess_exec(self): prog = os.path.join(os.path.dirname(__file__), 'echo.py') connect = self.loop.subprocess_exec( functools.partial(MySubprocessProtocol, self.loop), sys.executable, prog) transp, proto = self.loop.run_until_complete(connect) self.assertIsInstance(proto, MySubprocessProtocol) self.loop.run_until_complete(proto.connected) self.assertEqual('CONNECTED', proto.state) stdin = transp.get_pipe_transport(0) stdin.write(b'Python The Winner') self.loop.run_until_complete(proto.got_data[1].wait()) with test_utils.disable_logger(): transp.close() self.loop.run_until_complete(proto.completed) self.check_killed(proto.returncode) self.assertEqual(b'Python The Winner', proto.data[1]) def test_subprocess_interactive(self): prog = os.path.join(os.path.dirname(__file__), 'echo.py') connect = self.loop.subprocess_exec( functools.partial(MySubprocessProtocol, self.loop), sys.executable, prog) transp, proto = self.loop.run_until_complete(connect) self.assertIsInstance(proto, MySubprocessProtocol) self.loop.run_until_complete(proto.connected) self.assertEqual('CONNECTED', proto.state) stdin = transp.get_pipe_transport(0) stdin.write(b'Python ') self.loop.run_until_complete(proto.got_data[1].wait()) proto.got_data[1].clear() self.assertEqual(b'Python ', proto.data[1]) stdin.write(b'The Winner') self.loop.run_until_complete(proto.got_data[1].wait()) self.assertEqual(b'Python The Winner', proto.data[1]) with test_utils.disable_logger(): transp.close() self.loop.run_until_complete(proto.completed) self.check_killed(proto.returncode) def test_subprocess_shell(self): connect = self.loop.subprocess_shell( functools.partial(MySubprocessProtocol, self.loop), 'echo Python') transp, proto = self.loop.run_until_complete(connect) self.assertIsInstance(proto, MySubprocessProtocol) self.loop.run_until_complete(proto.connected) transp.get_pipe_transport(0).close() self.loop.run_until_complete(proto.completed) self.assertEqual(0, proto.returncode) self.assertTrue(all(f.done() for f in proto.disconnects.values())) self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python') self.assertEqual(proto.data[2], b'') transp.close() def test_subprocess_exitcode(self): connect = self.loop.subprocess_shell( functools.partial(MySubprocessProtocol, self.loop), 'exit 7', stdin=None, stdout=None, stderr=None) transp, proto = self.loop.run_until_complete(connect) self.assertIsInstance(proto, MySubprocessProtocol) self.loop.run_until_complete(proto.completed) self.assertEqual(7, proto.returncode) transp.close() def test_subprocess_close_after_finish(self): connect = self.loop.subprocess_shell( functools.partial(MySubprocessProtocol, self.loop), 'exit 7', stdin=None, stdout=None, stderr=None) transp, proto = self.loop.run_until_complete(connect) self.assertIsInstance(proto, MySubprocessProtocol) self.assertIsNone(transp.get_pipe_transport(0)) self.assertIsNone(transp.get_pipe_transport(1)) self.assertIsNone(transp.get_pipe_transport(2)) self.loop.run_until_complete(proto.completed) self.assertEqual(7, proto.returncode) self.assertIsNone(transp.close()) def test_subprocess_kill(self): prog = os.path.join(os.path.dirname(__file__), 'echo.py') connect = self.loop.subprocess_exec( functools.partial(MySubprocessProtocol, self.loop), sys.executable, prog) transp, proto = self.loop.run_until_complete(connect) self.assertIsInstance(proto, MySubprocessProtocol) self.loop.run_until_complete(proto.connected) transp.kill() self.loop.run_until_complete(proto.completed) self.check_killed(proto.returncode) transp.close() def test_subprocess_terminate(self): prog = os.path.join(os.path.dirname(__file__), 'echo.py') connect = self.loop.subprocess_exec( functools.partial(MySubprocessProtocol, self.loop), sys.executable, prog) transp, proto = self.loop.run_until_complete(connect) self.assertIsInstance(proto, MySubprocessProtocol) self.loop.run_until_complete(proto.connected) transp.terminate() self.loop.run_until_complete(proto.completed) self.check_terminated(proto.returncode) transp.close() @unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP") def test_subprocess_send_signal(self): prog = os.path.join(os.path.dirname(__file__), 'echo.py') connect = self.loop.subprocess_exec( functools.partial(MySubprocessProtocol, self.loop), sys.executable, prog) transp, proto = self.loop.run_until_complete(connect) self.assertIsInstance(proto, MySubprocessProtocol) self.loop.run_until_complete(proto.connected) transp.send_signal(signal.SIGHUP) self.loop.run_until_complete(proto.completed) self.assertEqual(-signal.SIGHUP, proto.returncode) transp.close() def test_subprocess_stderr(self): prog = os.path.join(os.path.dirname(__file__), 'echo2.py') connect = self.loop.subprocess_exec( functools.partial(MySubprocessProtocol, self.loop), sys.executable, prog) transp, proto = self.loop.run_until_complete(connect) self.assertIsInstance(proto, MySubprocessProtocol) self.loop.run_until_complete(proto.connected) stdin = transp.get_pipe_transport(0) stdin.write(b'test') self.loop.run_until_complete(proto.completed) transp.close() self.assertEqual(b'OUT:test', proto.data[1]) self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2]) self.assertEqual(0, proto.returncode) def test_subprocess_stderr_redirect_to_stdout(self): prog = os.path.join(os.path.dirname(__file__), 'echo2.py') connect = self.loop.subprocess_exec( functools.partial(MySubprocessProtocol, self.loop), sys.executable, prog, stderr=subprocess.STDOUT) transp, proto = self.loop.run_until_complete(connect) self.assertIsInstance(proto, MySubprocessProtocol) self.loop.run_until_complete(proto.connected) stdin = transp.get_pipe_transport(0) self.assertIsNotNone(transp.get_pipe_transport(1)) self.assertIsNone(transp.get_pipe_transport(2)) stdin.write(b'test') self.loop.run_until_complete(proto.completed) self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'), proto.data[1]) self.assertEqual(b'', proto.data[2]) transp.close() self.assertEqual(0, proto.returncode) def test_subprocess_close_client_stream(self): prog = os.path.join(os.path.dirname(__file__), 'echo3.py') connect = self.loop.subprocess_exec( functools.partial(MySubprocessProtocol, self.loop), sys.executable, prog) transp, proto = self.loop.run_until_complete(connect) self.assertIsInstance(proto, MySubprocessProtocol) self.loop.run_until_complete(proto.connected) stdin = transp.get_pipe_transport(0) stdout = transp.get_pipe_transport(1) stdin.write(b'test') self.loop.run_until_complete(proto.got_data[1].wait()) self.assertEqual(b'OUT:test', proto.data[1]) stdout.close() self.loop.run_until_complete(proto.disconnects[1]) stdin.write(b'xxx') self.loop.run_until_complete(proto.got_data[2].wait()) if sys.platform != 'win32': self.assertEqual(b'ERR:BrokenPipeError', proto.data[2]) else: # After closing the read-end of a pipe, writing to the # write-end using os.write() fails with errno==EINVAL and # GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using # WriteFile() we get ERROR_BROKEN_PIPE as expected.) self.assertEqual(b'ERR:OSError', proto.data[2]) with test_utils.disable_logger(): transp.close() self.loop.run_until_complete(proto.completed) self.check_killed(proto.returncode) def test_subprocess_wait_no_same_group(self): # start the new process in a new session connect = self.loop.subprocess_shell( functools.partial(MySubprocessProtocol, self.loop), 'exit 7', stdin=None, stdout=None, stderr=None, start_new_session=True) _, proto = yield self.loop.run_until_complete(connect) self.assertIsInstance(proto, MySubprocessProtocol) self.loop.run_until_complete(proto.completed) self.assertEqual(7, proto.returncode) def test_subprocess_exec_invalid_args(self): @asyncio.coroutine def connect(**kwds): yield from self.loop.subprocess_exec( asyncio.SubprocessProtocol, 'pwd', **kwds) with self.assertRaises(ValueError): self.loop.run_until_complete(connect(universal_newlines=True)) with self.assertRaises(ValueError): self.loop.run_until_complete(connect(bufsize=4096)) with self.assertRaises(ValueError): self.loop.run_until_complete(connect(shell=True)) def test_subprocess_shell_invalid_args(self): @asyncio.coroutine def connect(cmd=None, **kwds): if not cmd: cmd = 'pwd' yield from self.loop.subprocess_shell( asyncio.SubprocessProtocol, cmd, **kwds) with self.assertRaises(ValueError): self.loop.run_until_complete(connect(['ls', '-l'])) with self.assertRaises(ValueError): self.loop.run_until_complete(connect(universal_newlines=True)) with self.assertRaises(ValueError): self.loop.run_until_complete(connect(bufsize=4096)) with self.assertRaises(ValueError): self.loop.run_until_complete(connect(shell=False)) if sys.platform == 'win32': class SelectEventLoopTests(EventLoopTestsMixin, test_utils.TestCase): def create_event_loop(self): return asyncio.SelectorEventLoop() class ProactorEventLoopTests(EventLoopTestsMixin, SubprocessTestsMixin, test_utils.TestCase): def create_event_loop(self): return asyncio.ProactorEventLoop() if not sslproto._is_sslproto_available(): def test_create_ssl_connection(self): raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)") def test_create_server_ssl(self): raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)") def test_create_server_ssl_verify_failed(self): raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)") def test_create_server_ssl_match_failed(self): raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)") def test_create_server_ssl_verified(self): raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)") def test_legacy_create_ssl_connection(self): raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL") def test_legacy_create_server_ssl(self): raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL") def test_legacy_create_server_ssl_verify_failed(self): raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL") def test_legacy_create_server_ssl_match_failed(self): raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL") def test_legacy_create_server_ssl_verified(self): raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL") def test_reader_callback(self): raise unittest.SkipTest("IocpEventLoop does not have add_reader()") def test_reader_callback_cancel(self): raise unittest.SkipTest("IocpEventLoop does not have add_reader()") def test_writer_callback(self): raise unittest.SkipTest("IocpEventLoop does not have add_writer()") def test_writer_callback_cancel(self): raise unittest.SkipTest("IocpEventLoop does not have add_writer()") def test_create_datagram_endpoint(self): raise unittest.SkipTest( "IocpEventLoop does not have create_datagram_endpoint()") def test_remove_fds_after_closing(self): raise unittest.SkipTest("IocpEventLoop does not have add_reader()") else: from asyncio import selectors class UnixEventLoopTestsMixin(EventLoopTestsMixin): def setUp(self): super().setUp() watcher = asyncio.SafeChildWatcher() watcher.attach_loop(self.loop) asyncio.set_child_watcher(watcher) def tearDown(self): asyncio.set_child_watcher(None) super().tearDown() if hasattr(selectors, 'KqueueSelector'): class KqueueEventLoopTests(UnixEventLoopTestsMixin, SubprocessTestsMixin, test_utils.TestCase): def create_event_loop(self): return asyncio.SelectorEventLoop( selectors.KqueueSelector()) # kqueue doesn't support character devices (PTY) on Mac OS X older # than 10.9 (Maverick) @support.requires_mac_ver(10, 9) # Issue #20667: KqueueEventLoopTests.test_read_pty_output() # hangs on OpenBSD 5.5 @unittest.skipIf(sys.platform.startswith('openbsd'), 'test hangs on OpenBSD') def test_read_pty_output(self): super().test_read_pty_output() # kqueue doesn't support character devices (PTY) on Mac OS X older # than 10.9 (Maverick) @support.requires_mac_ver(10, 9) def test_write_pty(self): super().test_write_pty() if hasattr(selectors, 'EpollSelector'): class EPollEventLoopTests(UnixEventLoopTestsMixin, SubprocessTestsMixin, test_utils.TestCase): def create_event_loop(self): return asyncio.SelectorEventLoop(selectors.EpollSelector()) if hasattr(selectors, 'PollSelector'): class PollEventLoopTests(UnixEventLoopTestsMixin, SubprocessTestsMixin, test_utils.TestCase): def create_event_loop(self): return asyncio.SelectorEventLoop(selectors.PollSelector()) # Should always exist. class SelectEventLoopTests(UnixEventLoopTestsMixin, SubprocessTestsMixin, test_utils.TestCase): def create_event_loop(self): return asyncio.SelectorEventLoop(selectors.SelectSelector()) def noop(*args): pass class HandleTests(test_utils.TestCase): def setUp(self): self.loop = mock.Mock() self.loop.get_debug.return_value = True def test_handle(self): def callback(*args): return args args = () h = asyncio.Handle(callback, args, self.loop) self.assertIs(h._callback, callback) self.assertIs(h._args, args) self.assertFalse(h._cancelled) h.cancel() self.assertTrue(h._cancelled) def test_handle_from_handle(self): def callback(*args): return args h1 = asyncio.Handle(callback, (), loop=self.loop) self.assertRaises( AssertionError, asyncio.Handle, h1, (), self.loop) def test_callback_with_exception(self): def callback(): raise ValueError() self.loop = mock.Mock() self.loop.call_exception_handler = mock.Mock() h = asyncio.Handle(callback, (), self.loop) h._run() self.loop.call_exception_handler.assert_called_with({ 'message': test_utils.MockPattern('Exception in callback.*'), 'exception': mock.ANY, 'handle': h, 'source_traceback': h._source_traceback, }) def test_handle_weakref(self): wd = weakref.WeakValueDictionary() h = asyncio.Handle(lambda: None, (), self.loop) wd['h'] = h # Would fail without __weakref__ slot. def test_handle_repr(self): self.loop.get_debug.return_value = False # simple function h = asyncio.Handle(noop, (1, 2), self.loop) filename, lineno = test_utils.get_function_source(noop) self.assertEqual(repr(h), '<Handle noop(1, 2) at %s:%s>' % (filename, lineno)) # cancelled handle h.cancel() self.assertEqual(repr(h), '<Handle cancelled>') # decorated function cb = asyncio.coroutine(noop) h = asyncio.Handle(cb, (), self.loop) self.assertEqual(repr(h), '<Handle noop() at %s:%s>' % (filename, lineno)) # partial function cb = functools.partial(noop, 1, 2) h = asyncio.Handle(cb, (3,), self.loop) regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$' % (re.escape(filename), lineno)) self.assertRegex(repr(h), regex) # partial method if sys.version_info >= (3, 4): method = HandleTests.test_handle_repr cb = functools.partialmethod(method) filename, lineno = test_utils.get_function_source(method) h = asyncio.Handle(cb, (), self.loop) cb_regex = r'<function HandleTests.test_handle_repr .*>' cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex) regex = (r'^<Handle %s at %s:%s>$' % (cb_regex, re.escape(filename), lineno)) self.assertRegex(repr(h), regex) def test_handle_repr_debug(self): self.loop.get_debug.return_value = True # simple function create_filename = __file__ create_lineno = sys._getframe().f_lineno + 1 h = asyncio.Handle(noop, (1, 2), self.loop) filename, lineno = test_utils.get_function_source(noop) self.assertEqual(repr(h), '<Handle noop(1, 2) at %s:%s created at %s:%s>' % (filename, lineno, create_filename, create_lineno)) # cancelled handle h.cancel() self.assertEqual( repr(h), '<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>' % (filename, lineno, create_filename, create_lineno)) # double cancellation won't overwrite _repr h.cancel() self.assertEqual( repr(h), '<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>' % (filename, lineno, create_filename, create_lineno)) def test_handle_source_traceback(self): loop = asyncio.get_event_loop_policy().new_event_loop() loop.set_debug(True) self.set_event_loop(loop) def check_source_traceback(h): lineno = sys._getframe(1).f_lineno - 1 self.assertIsInstance(h._source_traceback, list) self.assertEqual(h._source_traceback[-1][:3], (__file__, lineno, 'test_handle_source_traceback')) # call_soon h = loop.call_soon(noop) check_source_traceback(h) # call_soon_threadsafe h = loop.call_soon_threadsafe(noop) check_source_traceback(h) # call_later h = loop.call_later(0, noop) check_source_traceback(h) # call_at h = loop.call_later(0, noop) check_source_traceback(h) class TimerTests(unittest.TestCase): def setUp(self): self.loop = mock.Mock() def test_hash(self): when = time.monotonic() h = asyncio.TimerHandle(when, lambda: False, (), mock.Mock()) self.assertEqual(hash(h), hash(when)) def test_timer(self): def callback(*args): return args args = (1, 2, 3) when = time.monotonic() h = asyncio.TimerHandle(when, callback, args, mock.Mock()) self.assertIs(h._callback, callback) self.assertIs(h._args, args) self.assertFalse(h._cancelled) # cancel h.cancel() self.assertTrue(h._cancelled) self.assertIsNone(h._callback) self.assertIsNone(h._args) # when cannot be None self.assertRaises(AssertionError, asyncio.TimerHandle, None, callback, args, self.loop) def test_timer_repr(self): self.loop.get_debug.return_value = False # simple function h = asyncio.TimerHandle(123, noop, (), self.loop) src = test_utils.get_function_source(noop) self.assertEqual(repr(h), '<TimerHandle when=123 noop() at %s:%s>' % src) # cancelled handle h.cancel() self.assertEqual(repr(h), '<TimerHandle cancelled when=123>') def test_timer_repr_debug(self): self.loop.get_debug.return_value = True # simple function create_filename = __file__ create_lineno = sys._getframe().f_lineno + 1 h = asyncio.TimerHandle(123, noop, (), self.loop) filename, lineno = test_utils.get_function_source(noop) self.assertEqual(repr(h), '<TimerHandle when=123 noop() ' 'at %s:%s created at %s:%s>' % (filename, lineno, create_filename, create_lineno)) # cancelled handle h.cancel() self.assertEqual(repr(h), '<TimerHandle cancelled when=123 noop() ' 'at %s:%s created at %s:%s>' % (filename, lineno, create_filename, create_lineno)) def test_timer_comparison(self): def callback(*args): return args when = time.monotonic() h1 = asyncio.TimerHandle(when, callback, (), self.loop) h2 = asyncio.TimerHandle(when, callback, (), self.loop) # TODO: Use assertLess etc. self.assertFalse(h1 < h2) self.assertFalse(h2 < h1) self.assertTrue(h1 <= h2) self.assertTrue(h2 <= h1) self.assertFalse(h1 > h2) self.assertFalse(h2 > h1) self.assertTrue(h1 >= h2) self.assertTrue(h2 >= h1) self.assertTrue(h1 == h2) self.assertFalse(h1 != h2) h2.cancel() self.assertFalse(h1 == h2) h1 = asyncio.TimerHandle(when, callback, (), self.loop) h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop) self.assertTrue(h1 < h2) self.assertFalse(h2 < h1) self.assertTrue(h1 <= h2) self.assertFalse(h2 <= h1) self.assertFalse(h1 > h2) self.assertTrue(h2 > h1) self.assertFalse(h1 >= h2) self.assertTrue(h2 >= h1) self.assertFalse(h1 == h2) self.assertTrue(h1 != h2) h3 = asyncio.Handle(callback, (), self.loop) self.assertIs(NotImplemented, h1.__eq__(h3)) self.assertIs(NotImplemented, h1.__ne__(h3)) class AbstractEventLoopTests(unittest.TestCase): def test_not_implemented(self): f = mock.Mock() loop = asyncio.AbstractEventLoop() self.assertRaises( NotImplementedError, loop.run_forever) self.assertRaises( NotImplementedError, loop.run_until_complete, None) self.assertRaises( NotImplementedError, loop.stop) self.assertRaises( NotImplementedError, loop.is_running) self.assertRaises( NotImplementedError, loop.is_closed) self.assertRaises( NotImplementedError, loop.close) self.assertRaises( NotImplementedError, loop.create_task, None) self.assertRaises( NotImplementedError, loop.call_later, None, None) self.assertRaises( NotImplementedError, loop.call_at, f, f) self.assertRaises( NotImplementedError, loop.call_soon, None) self.assertRaises( NotImplementedError, loop.time) self.assertRaises( NotImplementedError, loop.call_soon_threadsafe, None) self.assertRaises( NotImplementedError, loop.run_in_executor, f, f) self.assertRaises( NotImplementedError, loop.set_default_executor, f) self.assertRaises( NotImplementedError, loop.getaddrinfo, 'localhost', 8080) self.assertRaises( NotImplementedError, loop.getnameinfo, ('localhost', 8080)) self.assertRaises( NotImplementedError, loop.create_connection, f) self.assertRaises( NotImplementedError, loop.create_server, f) self.assertRaises( NotImplementedError, loop.create_datagram_endpoint, f) self.assertRaises( NotImplementedError, loop.add_reader, 1, f) self.assertRaises( NotImplementedError, loop.remove_reader, 1) self.assertRaises( NotImplementedError, loop.add_writer, 1, f) self.assertRaises( NotImplementedError, loop.remove_writer, 1) self.assertRaises( NotImplementedError, loop.sock_recv, f, 10) self.assertRaises( NotImplementedError, loop.sock_sendall, f, 10) self.assertRaises( NotImplementedError, loop.sock_connect, f, f) self.assertRaises( NotImplementedError, loop.sock_accept, f) self.assertRaises( NotImplementedError, loop.add_signal_handler, 1, f) self.assertRaises( NotImplementedError, loop.remove_signal_handler, 1) self.assertRaises( NotImplementedError, loop.remove_signal_handler, 1) self.assertRaises( NotImplementedError, loop.connect_read_pipe, f, mock.sentinel.pipe) self.assertRaises( NotImplementedError, loop.connect_write_pipe, f, mock.sentinel.pipe) self.assertRaises( NotImplementedError, loop.subprocess_shell, f, mock.sentinel) self.assertRaises( NotImplementedError, loop.subprocess_exec, f) self.assertRaises( NotImplementedError, loop.set_exception_handler, f) self.assertRaises( NotImplementedError, loop.default_exception_handler, f) self.assertRaises( NotImplementedError, loop.call_exception_handler, f) self.assertRaises( NotImplementedError, loop.get_debug) self.assertRaises( NotImplementedError, loop.set_debug, f) class ProtocolsAbsTests(unittest.TestCase): def test_empty(self): f = mock.Mock() p = asyncio.Protocol() self.assertIsNone(p.connection_made(f)) self.assertIsNone(p.connection_lost(f)) self.assertIsNone(p.data_received(f)) self.assertIsNone(p.eof_received()) dp = asyncio.DatagramProtocol() self.assertIsNone(dp.connection_made(f)) self.assertIsNone(dp.connection_lost(f)) self.assertIsNone(dp.error_received(f)) self.assertIsNone(dp.datagram_received(f, f)) sp = asyncio.SubprocessProtocol() self.assertIsNone(sp.connection_made(f)) self.assertIsNone(sp.connection_lost(f)) self.assertIsNone(sp.pipe_data_received(1, f)) self.assertIsNone(sp.pipe_connection_lost(1, f)) self.assertIsNone(sp.process_exited()) class PolicyTests(unittest.TestCase): def test_event_loop_policy(self): policy = asyncio.AbstractEventLoopPolicy() self.assertRaises(NotImplementedError, policy.get_event_loop) self.assertRaises(NotImplementedError, policy.set_event_loop, object()) self.assertRaises(NotImplementedError, policy.new_event_loop) self.assertRaises(NotImplementedError, policy.get_child_watcher) self.assertRaises(NotImplementedError, policy.set_child_watcher, object()) def test_get_event_loop(self): policy = asyncio.DefaultEventLoopPolicy() self.assertIsNone(policy._local._loop) loop = policy.get_event_loop() self.assertIsInstance(loop, asyncio.AbstractEventLoop) self.assertIs(policy._local._loop, loop) self.assertIs(loop, policy.get_event_loop()) loop.close() def test_get_event_loop_calls_set_event_loop(self): policy = asyncio.DefaultEventLoopPolicy() with mock.patch.object( policy, "set_event_loop", wraps=policy.set_event_loop) as m_set_event_loop: loop = policy.get_event_loop() # policy._local._loop must be set through .set_event_loop() # (the unix DefaultEventLoopPolicy needs this call to attach # the child watcher correctly) m_set_event_loop.assert_called_with(loop) loop.close() def test_get_event_loop_after_set_none(self): policy = asyncio.DefaultEventLoopPolicy() policy.set_event_loop(None) self.assertRaises(RuntimeError, policy.get_event_loop) @mock.patch('asyncio.events.threading.current_thread') def test_get_event_loop_thread(self, m_current_thread): def f(): policy = asyncio.DefaultEventLoopPolicy() self.assertRaises(RuntimeError, policy.get_event_loop) th = threading.Thread(target=f) th.start() th.join() def test_new_event_loop(self): policy = asyncio.DefaultEventLoopPolicy() loop = policy.new_event_loop() self.assertIsInstance(loop, asyncio.AbstractEventLoop) loop.close() def test_set_event_loop(self): policy = asyncio.DefaultEventLoopPolicy() old_loop = policy.get_event_loop() self.assertRaises(AssertionError, policy.set_event_loop, object()) loop = policy.new_event_loop() policy.set_event_loop(loop) self.assertIs(loop, policy.get_event_loop()) self.assertIsNot(old_loop, policy.get_event_loop()) loop.close() old_loop.close() def test_get_event_loop_policy(self): policy = asyncio.get_event_loop_policy() self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy) self.assertIs(policy, asyncio.get_event_loop_policy()) def test_set_event_loop_policy(self): self.assertRaises( AssertionError, asyncio.set_event_loop_policy, object()) old_policy = asyncio.get_event_loop_policy() policy = asyncio.DefaultEventLoopPolicy() asyncio.set_event_loop_policy(policy) self.assertIs(policy, asyncio.get_event_loop_policy()) self.assertIsNot(policy, old_policy) if __name__ == '__main__': unittest.main()
codeparrot/github-code-clean
#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """``tornado.web`` provides a simple web framework with asynchronous features that allow it to scale to large numbers of open connections, making it ideal for `long polling <http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_. Here is a simple "Hello, world" example app: .. testcode:: import tornado.ioloop import tornado.web class MainHandler(tornado.web.RequestHandler): def get(self): self.write("Hello, world") if __name__ == "__main__": application = tornado.web.Application([ (r"/", MainHandler), ]) application.listen(8888) tornado.ioloop.IOLoop.current().start() .. testoutput:: :hide: See the :doc:`guide` for additional information. Thread-safety notes ------------------- In general, methods on `RequestHandler` and elsewhere in Tornado are not thread-safe. In particular, methods such as `~RequestHandler.write()`, `~RequestHandler.finish()`, and `~RequestHandler.flush()` must only be called from the main thread. If you use multiple threads it is important to use `.IOLoop.add_callback` to transfer control back to the main thread before finishing the request. """ from __future__ import (absolute_import, division, print_function, with_statement) import base64 import binascii import datetime import email.utils import functools import gzip import hashlib import hmac import mimetypes import numbers import os.path import re import stat import sys import threading import time import tornado import traceback import types from io import BytesIO from tornado.concurrent import Future, is_future from tornado import escape from tornado import gen from tornado import httputil from tornado import iostream from tornado import locale from tornado.log import access_log, app_log, gen_log from tornado import stack_context from tornado import template from tornado.escape import utf8, _unicode from tornado.util import (import_object, ObjectDict, raise_exc_info, unicode_type, _websocket_mask) from tornado.httputil import split_host_and_port try: import Cookie # py2 except ImportError: import http.cookies as Cookie # py3 try: import urlparse # py2 except ImportError: import urllib.parse as urlparse # py3 try: from urllib import urlencode # py2 except ImportError: from urllib.parse import urlencode # py3 MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1 """The oldest signed value version supported by this version of Tornado. Signed values older than this version cannot be decoded. .. versionadded:: 3.2.1 """ MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2 """The newest signed value version supported by this version of Tornado. Signed values newer than this version cannot be decoded. .. versionadded:: 3.2.1 """ DEFAULT_SIGNED_VALUE_VERSION = 2 """The signed value version produced by `.RequestHandler.create_signed_value`. May be overridden by passing a ``version`` keyword argument. .. versionadded:: 3.2.1 """ DEFAULT_SIGNED_VALUE_MIN_VERSION = 1 """The oldest signed value accepted by `.RequestHandler.get_secure_cookie`. May be overridden by passing a ``min_version`` keyword argument. .. versionadded:: 3.2.1 """ class RequestHandler(object): """Base class for HTTP request handlers. Subclasses must define at least one of the methods defined in the "Entry points" section below. """ SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT", "OPTIONS") _template_loaders = {} # {path: template.BaseLoader} _template_loader_lock = threading.Lock() _remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]") def __init__(self, application, request, **kwargs): super(RequestHandler, self).__init__() self.application = application self.request = request self._headers_written = False self._finished = False self._auto_finish = True self._transforms = None # will be set in _execute self._prepared_future = None self.path_args = None self.path_kwargs = None self.ui = ObjectDict((n, self._ui_method(m)) for n, m in application.ui_methods.items()) # UIModules are available as both `modules` and `_tt_modules` in the # template namespace. Historically only `modules` was available # but could be clobbered by user additions to the namespace. # The template {% module %} directive looks in `_tt_modules` to avoid # possible conflicts. self.ui["_tt_modules"] = _UIModuleNamespace(self, application.ui_modules) self.ui["modules"] = self.ui["_tt_modules"] self.clear() self.request.connection.set_close_callback(self.on_connection_close) self.initialize(**kwargs) def initialize(self): """Hook for subclass initialization. A dictionary passed as the third argument of a url spec will be supplied as keyword arguments to initialize(). Example:: class ProfileHandler(RequestHandler): def initialize(self, database): self.database = database def get(self, username): ... app = Application([ (r'/user/(.*)', ProfileHandler, dict(database=database)), ]) """ pass @property def settings(self): """An alias for `self.application.settings <Application.settings>`.""" return self.application.settings def head(self, *args, **kwargs): raise HTTPError(405) def get(self, *args, **kwargs): raise HTTPError(405) def post(self, *args, **kwargs): raise HTTPError(405) def delete(self, *args, **kwargs): raise HTTPError(405) def patch(self, *args, **kwargs): raise HTTPError(405) def put(self, *args, **kwargs): raise HTTPError(405) def options(self, *args, **kwargs): raise HTTPError(405) def prepare(self): """Called at the beginning of a request before `get`/`post`/etc. Override this method to perform common initialization regardless of the request method. Asynchronous support: Decorate this method with `.gen.coroutine` or `.return_future` to make it asynchronous (the `asynchronous` decorator cannot be used on `prepare`). If this method returns a `.Future` execution will not proceed until the `.Future` is done. .. versionadded:: 3.1 Asynchronous support. """ pass def on_finish(self): """Called after the end of a request. Override this method to perform cleanup, logging, etc. This method is a counterpart to `prepare`. ``on_finish`` may not produce any output, as it is called after the response has been sent to the client. """ pass def on_connection_close(self): """Called in async handlers if the client closed the connection. Override this to clean up resources associated with long-lived connections. Note that this method is called only if the connection was closed during asynchronous processing; if you need to do cleanup after every request override `on_finish` instead. Proxies may keep a connection open for a time (perhaps indefinitely) after the client has gone away, so this method may not be called promptly after the end user closes their connection. """ if _has_stream_request_body(self.__class__): if not self.request.body.done(): self.request.body.set_exception(iostream.StreamClosedError()) self.request.body.exception() def clear(self): """Resets all headers and content for this response.""" self._headers = httputil.HTTPHeaders({ "Server": "TornadoServer/%s" % tornado.version, "Content-Type": "text/html; charset=UTF-8", "Date": httputil.format_timestamp(time.time()), }) self.set_default_headers() self._write_buffer = [] self._status_code = 200 self._reason = httputil.responses[200] def set_default_headers(self): """Override this to set HTTP headers at the beginning of the request. For example, this is the place to set a custom ``Server`` header. Note that setting such headers in the normal flow of request processing may not do what you want, since headers may be reset during error handling. """ pass def set_status(self, status_code, reason=None): """Sets the status code for our response. :arg int status_code: Response status code. If ``reason`` is ``None``, it must be present in `httplib.responses <http.client.responses>`. :arg string reason: Human-readable reason phrase describing the status code. If ``None``, it will be filled in from `httplib.responses <http.client.responses>`. """ self._status_code = status_code if reason is not None: self._reason = escape.native_str(reason) else: try: self._reason = httputil.responses[status_code] except KeyError: raise ValueError("unknown status code %d", status_code) def get_status(self): """Returns the status code for our response.""" return self._status_code def set_header(self, name, value): """Sets the given response header name and value. If a datetime is given, we automatically format it according to the HTTP specification. If the value is not a string, we convert it to a string. All header values are then encoded as UTF-8. """ self._headers[name] = self._convert_header_value(value) def add_header(self, name, value): """Adds the given response header and value. Unlike `set_header`, `add_header` may be called multiple times to return multiple values for the same header. """ self._headers.add(name, self._convert_header_value(value)) def clear_header(self, name): """Clears an outgoing header, undoing a previous `set_header` call. Note that this method does not apply to multi-valued headers set by `add_header`. """ if name in self._headers: del self._headers[name] _INVALID_HEADER_CHAR_RE = re.compile(br"[\x00-\x1f]") def _convert_header_value(self, value): if isinstance(value, bytes): pass elif isinstance(value, unicode_type): value = value.encode('utf-8') elif isinstance(value, numbers.Integral): # return immediately since we know the converted value will be safe return str(value) elif isinstance(value, datetime.datetime): return httputil.format_timestamp(value) else: raise TypeError("Unsupported header value %r" % value) # If \n is allowed into the header, it is possible to inject # additional headers or split the request. Also cap length to # prevent obviously erroneous values. if (len(value) > 4000 or RequestHandler._INVALID_HEADER_CHAR_RE.search(value)): raise ValueError("Unsafe header value %r", value) return value _ARG_DEFAULT = [] def get_argument(self, name, default=_ARG_DEFAULT, strip=True): """Returns the value of the argument with the given name. If default is not provided, the argument is considered to be required, and we raise a `MissingArgumentError` if it is missing. If the argument appears in the url more than once, we return the last value. The returned value is always unicode. """ return self._get_argument(name, default, self.request.arguments, strip) def get_arguments(self, name, strip=True): """Returns a list of the arguments with the given name. If the argument is not present, returns an empty list. The returned values are always unicode. """ # Make sure `get_arguments` isn't accidentally being called with a # positional argument that's assumed to be a default (like in # `get_argument`.) assert isinstance(strip, bool) return self._get_arguments(name, self.request.arguments, strip) def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True): """Returns the value of the argument with the given name from the request body. If default is not provided, the argument is considered to be required, and we raise a `MissingArgumentError` if it is missing. If the argument appears in the url more than once, we return the last value. The returned value is always unicode. .. versionadded:: 3.2 """ return self._get_argument(name, default, self.request.body_arguments, strip) def get_body_arguments(self, name, strip=True): """Returns a list of the body arguments with the given name. If the argument is not present, returns an empty list. The returned values are always unicode. .. versionadded:: 3.2 """ return self._get_arguments(name, self.request.body_arguments, strip) def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True): """Returns the value of the argument with the given name from the request query string. If default is not provided, the argument is considered to be required, and we raise a `MissingArgumentError` if it is missing. If the argument appears in the url more than once, we return the last value. The returned value is always unicode. .. versionadded:: 3.2 """ return self._get_argument(name, default, self.request.query_arguments, strip) def get_query_arguments(self, name, strip=True): """Returns a list of the query arguments with the given name. If the argument is not present, returns an empty list. The returned values are always unicode. .. versionadded:: 3.2 """ return self._get_arguments(name, self.request.query_arguments, strip) def _get_argument(self, name, default, source, strip=True): args = self._get_arguments(name, source, strip=strip) if not args: if default is self._ARG_DEFAULT: raise MissingArgumentError(name) return default return args[-1] def _get_arguments(self, name, source, strip=True): values = [] for v in source.get(name, []): v = self.decode_argument(v, name=name) if isinstance(v, unicode_type): # Get rid of any weird control chars (unless decoding gave # us bytes, in which case leave it alone) v = RequestHandler._remove_control_chars_regex.sub(" ", v) if strip: v = v.strip() values.append(v) return values def decode_argument(self, value, name=None): """Decodes an argument from the request. The argument has been percent-decoded and is now a byte string. By default, this method decodes the argument as utf-8 and returns a unicode string, but this may be overridden in subclasses. This method is used as a filter for both `get_argument()` and for values extracted from the url and passed to `get()`/`post()`/etc. The name of the argument is provided if known, but may be None (e.g. for unnamed groups in the url regex). """ try: return _unicode(value) except UnicodeDecodeError: raise HTTPError(400, "Invalid unicode in %s: %r" % (name or "url", value[:40])) @property def cookies(self): """An alias for `self.request.cookies <.httputil.HTTPServerRequest.cookies>`.""" return self.request.cookies def get_cookie(self, name, default=None): """Gets the value of the cookie with the given name, else default.""" if self.request.cookies is not None and name in self.request.cookies: return self.request.cookies[name].value return default def set_cookie(self, name, value, domain=None, expires=None, path="/", expires_days=None, **kwargs): """Sets the given cookie name/value with the given options. Additional keyword arguments are set on the Cookie.Morsel directly. See http://docs.python.org/library/cookie.html#morsel-objects for available attributes. """ # The cookie library only accepts type str, in both python 2 and 3 name = escape.native_str(name) value = escape.native_str(value) if re.search(r"[\x00-\x20]", name + value): # Don't let us accidentally inject bad stuff raise ValueError("Invalid cookie %r: %r" % (name, value)) if not hasattr(self, "_new_cookie"): self._new_cookie = Cookie.SimpleCookie() if name in self._new_cookie: del self._new_cookie[name] self._new_cookie[name] = value morsel = self._new_cookie[name] if domain: morsel["domain"] = domain if expires_days is not None and not expires: expires = datetime.datetime.utcnow() + datetime.timedelta( days=expires_days) if expires: morsel["expires"] = httputil.format_timestamp(expires) if path: morsel["path"] = path for k, v in kwargs.items(): if k == 'max_age': k = 'max-age' # skip falsy values for httponly and secure flags because # SimpleCookie sets them regardless if k in ['httponly', 'secure'] and not v: continue morsel[k] = v def clear_cookie(self, name, path="/", domain=None): """Deletes the cookie with the given name. Due to limitations of the cookie protocol, you must pass the same path and domain to clear a cookie as were used when that cookie was set (but there is no way to find out on the server side which values were used for a given cookie). """ expires = datetime.datetime.utcnow() - datetime.timedelta(days=365) self.set_cookie(name, value="", path=path, expires=expires, domain=domain) def clear_all_cookies(self, path="/", domain=None): """Deletes all the cookies the user sent with this request. See `clear_cookie` for more information on the path and domain parameters. .. versionchanged:: 3.2 Added the ``path`` and ``domain`` parameters. """ for name in self.request.cookies: self.clear_cookie(name, path=path, domain=domain) def set_secure_cookie(self, name, value, expires_days=30, version=None, **kwargs): """Signs and timestamps a cookie so it cannot be forged. You must specify the ``cookie_secret`` setting in your Application to use this method. It should be a long, random sequence of bytes to be used as the HMAC secret for the signature. To read a cookie set with this method, use `get_secure_cookie()`. Note that the ``expires_days`` parameter sets the lifetime of the cookie in the browser, but is independent of the ``max_age_days`` parameter to `get_secure_cookie`. Secure cookies may contain arbitrary byte values, not just unicode strings (unlike regular cookies) .. versionchanged:: 3.2.1 Added the ``version`` argument. Introduced cookie version 2 and made it the default. """ self.set_cookie(name, self.create_signed_value(name, value, version=version), expires_days=expires_days, **kwargs) def create_signed_value(self, name, value, version=None): """Signs and timestamps a string so it cannot be forged. Normally used via set_secure_cookie, but provided as a separate method for non-cookie uses. To decode a value not stored as a cookie use the optional value argument to get_secure_cookie. .. versionchanged:: 3.2.1 Added the ``version`` argument. Introduced cookie version 2 and made it the default. """ self.require_setting("cookie_secret", "secure cookies") secret = self.application.settings["cookie_secret"] key_version = None if isinstance(secret, dict): if self.application.settings.get("key_version") is None: raise Exception("key_version setting must be used for secret_key dicts") key_version = self.application.settings["key_version"] return create_signed_value(secret, name, value, version=version, key_version=key_version) def get_secure_cookie(self, name, value=None, max_age_days=31, min_version=None): """Returns the given signed cookie if it validates, or None. The decoded cookie value is returned as a byte string (unlike `get_cookie`). .. versionchanged:: 3.2.1 Added the ``min_version`` argument. Introduced cookie version 2; both versions 1 and 2 are accepted by default. """ self.require_setting("cookie_secret", "secure cookies") if value is None: value = self.get_cookie(name) return decode_signed_value(self.application.settings["cookie_secret"], name, value, max_age_days=max_age_days, min_version=min_version) def get_secure_cookie_key_version(self, name, value=None): """Returns the signing key version of the secure cookie. The version is returned as int. """ self.require_setting("cookie_secret", "secure cookies") if value is None: value = self.get_cookie(name) return get_signature_key_version(value) def redirect(self, url, permanent=False, status=None): """Sends a redirect to the given (optionally relative) URL. If the ``status`` argument is specified, that value is used as the HTTP status code; otherwise either 301 (permanent) or 302 (temporary) is chosen based on the ``permanent`` argument. The default is 302 (temporary). """ if self._headers_written: raise Exception("Cannot redirect after headers have been written") if status is None: status = 301 if permanent else 302 else: assert isinstance(status, int) and 300 <= status <= 399 self.set_status(status) self.set_header("Location", utf8(url)) self.finish() def write(self, chunk): """Writes the given chunk to the output buffer. To write the output to the network, use the flush() method below. If the given chunk is a dictionary, we write it as JSON and set the Content-Type of the response to be ``application/json``. (if you want to send JSON as a different ``Content-Type``, call set_header *after* calling write()). Note that lists are not converted to JSON because of a potential cross-site security vulnerability. All JSON output should be wrapped in a dictionary. More details at http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and https://github.com/facebook/tornado/issues/1009 """ if self._finished: raise RuntimeError("Cannot write() after finish()") if not isinstance(chunk, (bytes, unicode_type, dict)): message = "write() only accepts bytes, unicode, and dict objects" if isinstance(chunk, list): message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" raise TypeError(message) if isinstance(chunk, dict): chunk = escape.json_encode(chunk) self.set_header("Content-Type", "application/json; charset=UTF-8") chunk = utf8(chunk) self._write_buffer.append(chunk) def render(self, template_name, **kwargs): """Renders the template with the given arguments as the response.""" html = self.render_string(template_name, **kwargs) # Insert the additional JS and CSS added by the modules on the page js_embed = [] js_files = [] css_embed = [] css_files = [] html_heads = [] html_bodies = [] for module in getattr(self, "_active_modules", {}).values(): embed_part = module.embedded_javascript() if embed_part: js_embed.append(utf8(embed_part)) file_part = module.javascript_files() if file_part: if isinstance(file_part, (unicode_type, bytes)): js_files.append(file_part) else: js_files.extend(file_part) embed_part = module.embedded_css() if embed_part: css_embed.append(utf8(embed_part)) file_part = module.css_files() if file_part: if isinstance(file_part, (unicode_type, bytes)): css_files.append(file_part) else: css_files.extend(file_part) head_part = module.html_head() if head_part: html_heads.append(utf8(head_part)) body_part = module.html_body() if body_part: html_bodies.append(utf8(body_part)) def is_absolute(path): return any(path.startswith(x) for x in ["/", "http:", "https:"]) if js_files: # Maintain order of JavaScript files given by modules paths = [] unique_paths = set() for path in js_files: if not is_absolute(path): path = self.static_url(path) if path not in unique_paths: paths.append(path) unique_paths.add(path) js = ''.join('<script src="' + escape.xhtml_escape(p) + '" type="text/javascript"></script>' for p in paths) sloc = html.rindex(b'</body>') html = html[:sloc] + utf8(js) + b'\n' + html[sloc:] if js_embed: js = b'<script type="text/javascript">\n//<![CDATA[\n' + \ b'\n'.join(js_embed) + b'\n//]]>\n</script>' sloc = html.rindex(b'</body>') html = html[:sloc] + js + b'\n' + html[sloc:] if css_files: paths = [] unique_paths = set() for path in css_files: if not is_absolute(path): path = self.static_url(path) if path not in unique_paths: paths.append(path) unique_paths.add(path) css = ''.join('<link href="' + escape.xhtml_escape(p) + '" ' 'type="text/css" rel="stylesheet"/>' for p in paths) hloc = html.index(b'</head>') html = html[:hloc] + utf8(css) + b'\n' + html[hloc:] if css_embed: css = b'<style type="text/css">\n' + b'\n'.join(css_embed) + \ b'\n</style>' hloc = html.index(b'</head>') html = html[:hloc] + css + b'\n' + html[hloc:] if html_heads: hloc = html.index(b'</head>') html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:] if html_bodies: hloc = html.index(b'</body>') html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:] self.finish(html) def render_string(self, template_name, **kwargs): """Generate the given template with the given arguments. We return the generated byte string (in utf8). To generate and write a template as a response, use render() above. """ # If no template_path is specified, use the path of the calling file template_path = self.get_template_path() if not template_path: frame = sys._getframe(0) web_file = frame.f_code.co_filename while frame.f_code.co_filename == web_file: frame = frame.f_back template_path = os.path.dirname(frame.f_code.co_filename) with RequestHandler._template_loader_lock: if template_path not in RequestHandler._template_loaders: loader = self.create_template_loader(template_path) RequestHandler._template_loaders[template_path] = loader else: loader = RequestHandler._template_loaders[template_path] t = loader.load(template_name) namespace = self.get_template_namespace() namespace.update(kwargs) return t.generate(**namespace) def get_template_namespace(self): """Returns a dictionary to be used as the default template namespace. May be overridden by subclasses to add or modify values. The results of this method will be combined with additional defaults in the `tornado.template` module and keyword arguments to `render` or `render_string`. """ namespace = dict( handler=self, request=self.request, current_user=self.current_user, locale=self.locale, _=self.locale.translate, pgettext=self.locale.pgettext, static_url=self.static_url, xsrf_form_html=self.xsrf_form_html, reverse_url=self.reverse_url ) namespace.update(self.ui) return namespace def create_template_loader(self, template_path): """Returns a new template loader for the given path. May be overridden by subclasses. By default returns a directory-based loader on the given path, using the ``autoescape`` application setting. If a ``template_loader`` application setting is supplied, uses that instead. """ settings = self.application.settings if "template_loader" in settings: return settings["template_loader"] kwargs = {} if "autoescape" in settings: # autoescape=None means "no escaping", so we have to be sure # to only pass this kwarg if the user asked for it. kwargs["autoescape"] = settings["autoescape"] return template.Loader(template_path, **kwargs) def flush(self, include_footers=False, callback=None): """Flushes the current output buffer to the network. The ``callback`` argument, if given, can be used for flow control: it will be run when all flushed data has been written to the socket. Note that only one flush callback can be outstanding at a time; if another flush occurs before the previous flush's callback has been run, the previous callback will be discarded. .. versionchanged:: 4.0 Now returns a `.Future` if no callback is given. """ chunk = b"".join(self._write_buffer) self._write_buffer = [] if not self._headers_written: self._headers_written = True for transform in self._transforms: self._status_code, self._headers, chunk = \ transform.transform_first_chunk( self._status_code, self._headers, chunk, include_footers) # Ignore the chunk and only write the headers for HEAD requests if self.request.method == "HEAD": chunk = None # Finalize the cookie headers (which have been stored in a side # object so an outgoing cookie could be overwritten before it # is sent). if hasattr(self, "_new_cookie"): for cookie in self._new_cookie.values(): self.add_header("Set-Cookie", cookie.OutputString(None)) start_line = httputil.ResponseStartLine('', self._status_code, self._reason) return self.request.connection.write_headers( start_line, self._headers, chunk, callback=callback) else: for transform in self._transforms: chunk = transform.transform_chunk(chunk, include_footers) # Ignore the chunk and only write the headers for HEAD requests if self.request.method != "HEAD": return self.request.connection.write(chunk, callback=callback) else: future = Future() future.set_result(None) return future def finish(self, chunk=None): """Finishes this response, ending the HTTP request.""" if self._finished: raise RuntimeError("finish() called twice") if chunk is not None: self.write(chunk) # Automatically support ETags and add the Content-Length header if # we have not flushed any content yet. if not self._headers_written: if (self._status_code == 200 and self.request.method in ("GET", "HEAD") and "Etag" not in self._headers): self.set_etag_header() if self.check_etag_header(): self._write_buffer = [] self.set_status(304) if self._status_code == 304: assert not self._write_buffer, "Cannot send body with 304" self._clear_headers_for_304() elif "Content-Length" not in self._headers: content_length = sum(len(part) for part in self._write_buffer) self.set_header("Content-Length", content_length) if hasattr(self.request, "connection"): # Now that the request is finished, clear the callback we # set on the HTTPConnection (which would otherwise prevent the # garbage collection of the RequestHandler when there # are keepalive connections) self.request.connection.set_close_callback(None) self.flush(include_footers=True) self.request.finish() self._log() self._finished = True self.on_finish() # Break up a reference cycle between this handler and the # _ui_module closures to allow for faster GC on CPython. self.ui = None def send_error(self, status_code=500, **kwargs): """Sends the given HTTP error code to the browser. If `flush()` has already been called, it is not possible to send an error, so this method will simply terminate the response. If output has been written but not yet flushed, it will be discarded and replaced with the error page. Override `write_error()` to customize the error page that is returned. Additional keyword arguments are passed through to `write_error`. """ if self._headers_written: gen_log.error("Cannot send error response after headers written") if not self._finished: # If we get an error between writing headers and finishing, # we are unlikely to be able to finish due to a # Content-Length mismatch. Try anyway to release the # socket. try: self.finish() except Exception: gen_log.error("Failed to flush partial response", exc_info=True) return self.clear() reason = kwargs.get('reason') if 'exc_info' in kwargs: exception = kwargs['exc_info'][1] if isinstance(exception, HTTPError) and exception.reason: reason = exception.reason self.set_status(status_code, reason=reason) try: self.write_error(status_code, **kwargs) except Exception: app_log.error("Uncaught exception in write_error", exc_info=True) if not self._finished: self.finish() def write_error(self, status_code, **kwargs): """Override to implement custom error pages. ``write_error`` may call `write`, `render`, `set_header`, etc to produce output as usual. If this error was caused by an uncaught exception (including HTTPError), an ``exc_info`` triple will be available as ``kwargs["exc_info"]``. Note that this exception may not be the "current" exception for purposes of methods like ``sys.exc_info()`` or ``traceback.format_exc``. """ if self.settings.get("serve_traceback") and "exc_info" in kwargs: # in debug mode, try to send a traceback self.set_header('Content-Type', 'text/plain') for line in traceback.format_exception(*kwargs["exc_info"]): self.write(line) self.finish() else: self.finish("<html><title>%(code)d: %(message)s</title>" "<body>%(code)d: %(message)s</body></html>" % { "code": status_code, "message": self._reason, }) @property def locale(self): """The locale for the current session. Determined by either `get_user_locale`, which you can override to set the locale based on, e.g., a user preference stored in a database, or `get_browser_locale`, which uses the ``Accept-Language`` header. .. versionchanged: 4.1 Added a property setter. """ if not hasattr(self, "_locale"): self._locale = self.get_user_locale() if not self._locale: self._locale = self.get_browser_locale() assert self._locale return self._locale @locale.setter def locale(self, value): self._locale = value def get_user_locale(self): """Override to determine the locale from the authenticated user. If None is returned, we fall back to `get_browser_locale()`. This method should return a `tornado.locale.Locale` object, most likely obtained via a call like ``tornado.locale.get("en")`` """ return None def get_browser_locale(self, default="en_US"): """Determines the user's locale from ``Accept-Language`` header. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4 """ if "Accept-Language" in self.request.headers: languages = self.request.headers["Accept-Language"].split(",") locales = [] for language in languages: parts = language.strip().split(";") if len(parts) > 1 and parts[1].startswith("q="): try: score = float(parts[1][2:]) except (ValueError, TypeError): score = 0.0 else: score = 1.0 locales.append((parts[0], score)) if locales: locales.sort(key=lambda pair: pair[1], reverse=True) codes = [l[0] for l in locales] return locale.get(*codes) return locale.get(default) @property def current_user(self): """The authenticated user for this request. This is a cached version of `get_current_user`, which you can override to set the user based on, e.g., a cookie. If that method is not overridden, this method always returns None. We lazy-load the current user the first time this method is called and cache the result after that. """ if not hasattr(self, "_current_user"): self._current_user = self.get_current_user() return self._current_user @current_user.setter def current_user(self, value): self._current_user = value def get_current_user(self): """Override to determine the current user from, e.g., a cookie.""" return None def get_login_url(self): """Override to customize the login URL based on the request. By default, we use the ``login_url`` application setting. """ self.require_setting("login_url", "@tornado.web.authenticated") return self.application.settings["login_url"] def get_template_path(self): """Override to customize template path for each handler. By default, we use the ``template_path`` application setting. Return None to load templates relative to the calling file. """ return self.application.settings.get("template_path") @property def xsrf_token(self): """The XSRF-prevention token for the current user/session. To prevent cross-site request forgery, we set an '_xsrf' cookie and include the same '_xsrf' value as an argument with all POST requests. If the two do not match, we reject the form submission as a potential forgery. See http://en.wikipedia.org/wiki/Cross-site_request_forgery .. versionchanged:: 3.2.2 The xsrf token will now be have a random mask applied in every request, which makes it safe to include the token in pages that are compressed. See http://breachattack.com for more information on the issue fixed by this change. Old (version 1) cookies will be converted to version 2 when this method is called unless the ``xsrf_cookie_version`` `Application` setting is set to 1. """ if not hasattr(self, "_xsrf_token"): version, token, timestamp = self._get_raw_xsrf_token() output_version = self.settings.get("xsrf_cookie_version", 2) if output_version == 1: self._xsrf_token = binascii.b2a_hex(token) elif output_version == 2: mask = os.urandom(4) self._xsrf_token = b"|".join([ b"2", binascii.b2a_hex(mask), binascii.b2a_hex(_websocket_mask(mask, token)), utf8(str(int(timestamp)))]) else: raise ValueError("unknown xsrf cookie version %d", output_version) if version is None: expires_days = 30 if self.current_user else None self.set_cookie("_xsrf", self._xsrf_token, expires_days=expires_days) return self._xsrf_token def _get_raw_xsrf_token(self): """Read or generate the xsrf token in its raw form. The raw_xsrf_token is a tuple containing: * version: the version of the cookie from which this token was read, or None if we generated a new token in this request. * token: the raw token data; random (non-ascii) bytes. * timestamp: the time this token was generated (will not be accurate for version 1 cookies) """ if not hasattr(self, '_raw_xsrf_token'): cookie = self.get_cookie("_xsrf") if cookie: version, token, timestamp = self._decode_xsrf_token(cookie) else: version, token, timestamp = None, None, None if token is None: version = None token = os.urandom(16) timestamp = time.time() self._raw_xsrf_token = (version, token, timestamp) return self._raw_xsrf_token def _decode_xsrf_token(self, cookie): """Convert a cookie string into a the tuple form returned by _get_raw_xsrf_token. """ try: m = _signed_value_version_re.match(utf8(cookie)) if m: version = int(m.group(1)) if version == 2: _, mask, masked_token, timestamp = cookie.split("|") mask = binascii.a2b_hex(utf8(mask)) token = _websocket_mask( mask, binascii.a2b_hex(utf8(masked_token))) timestamp = int(timestamp) return version, token, timestamp else: # Treat unknown versions as not present instead of failing. raise Exception("Unknown xsrf cookie version") else: version = 1 try: token = binascii.a2b_hex(utf8(cookie)) except (binascii.Error, TypeError): token = utf8(cookie) # We don't have a usable timestamp in older versions. timestamp = int(time.time()) return (version, token, timestamp) except Exception: # Catch exceptions and return nothing instead of failing. gen_log.debug("Uncaught exception in _decode_xsrf_token", exc_info=True) return None, None, None def check_xsrf_cookie(self): """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument. To prevent cross-site request forgery, we set an ``_xsrf`` cookie and include the same value as a non-cookie field with all ``POST`` requests. If the two do not match, we reject the form submission as a potential forgery. The ``_xsrf`` value may be set as either a form field named ``_xsrf`` or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken`` (the latter is accepted for compatibility with Django). See http://en.wikipedia.org/wiki/Cross-site_request_forgery Prior to release 1.1.1, this check was ignored if the HTTP header ``X-Requested-With: XMLHTTPRequest`` was present. This exception has been shown to be insecure and has been removed. For more information please see http://www.djangoproject.com/weblog/2011/feb/08/security/ http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails .. versionchanged:: 3.2.2 Added support for cookie version 2. Both versions 1 and 2 are supported. """ token = (self.get_argument("_xsrf", None) or self.request.headers.get("X-Xsrftoken") or self.request.headers.get("X-Csrftoken")) if not token: raise HTTPError(403, "'_xsrf' argument missing from POST") _, token, _ = self._decode_xsrf_token(token) _, expected_token, _ = self._get_raw_xsrf_token() if not _time_independent_equals(utf8(token), utf8(expected_token)): raise HTTPError(403, "XSRF cookie does not match POST argument") def xsrf_form_html(self): """An HTML ``<input/>`` element to be included with all POST forms. It defines the ``_xsrf`` input value, which we check on all POST requests to prevent cross-site request forgery. If you have set the ``xsrf_cookies`` application setting, you must include this HTML within all of your HTML forms. In a template, this method should be called with ``{% module xsrf_form_html() %}`` See `check_xsrf_cookie()` above for more information. """ return '<input type="hidden" name="_xsrf" value="' + \ escape.xhtml_escape(self.xsrf_token) + '"/>' def static_url(self, path, include_host=None, **kwargs): """Returns a static URL for the given relative static file path. This method requires you set the ``static_path`` setting in your application (which specifies the root directory of your static files). This method returns a versioned url (by default appending ``?v=<signature>``), which allows the static files to be cached indefinitely. This can be disabled by passing ``include_version=False`` (in the default implementation; other static file implementations are not required to support this, but they may support other options). By default this method returns URLs relative to the current host, but if ``include_host`` is true the URL returned will be absolute. If this handler has an ``include_host`` attribute, that value will be used as the default for all `static_url` calls that do not pass ``include_host`` as a keyword argument. """ self.require_setting("static_path", "static_url") get_url = self.settings.get("static_handler_class", StaticFileHandler).make_static_url if include_host is None: include_host = getattr(self, "include_host", False) if include_host: base = self.request.protocol + "://" + self.request.host else: base = "" return base + get_url(self.settings, path, **kwargs) def require_setting(self, name, feature="this feature"): """Raises an exception if the given app setting is not defined.""" if not self.application.settings.get(name): raise Exception("You must define the '%s' setting in your " "application to use %s" % (name, feature)) def reverse_url(self, name, *args): """Alias for `Application.reverse_url`.""" return self.application.reverse_url(name, *args) def compute_etag(self): """Computes the etag header to be used for this request. By default uses a hash of the content written so far. May be overridden to provide custom etag implementations, or may return None to disable tornado's default etag support. """ hasher = hashlib.sha1() for part in self._write_buffer: hasher.update(part) return '"%s"' % hasher.hexdigest() def set_etag_header(self): """Sets the response's Etag header using ``self.compute_etag()``. Note: no header will be set if ``compute_etag()`` returns ``None``. This method is called automatically when the request is finished. """ etag = self.compute_etag() if etag is not None: self.set_header("Etag", etag) def check_etag_header(self): """Checks the ``Etag`` header against requests's ``If-None-Match``. Returns ``True`` if the request's Etag matches and a 304 should be returned. For example:: self.set_etag_header() if self.check_etag_header(): self.set_status(304) return This method is called automatically when the request is finished, but may be called earlier for applications that override `compute_etag` and want to do an early check for ``If-None-Match`` before completing the request. The ``Etag`` header should be set (perhaps with `set_etag_header`) before calling this method. """ computed_etag = utf8(self._headers.get("Etag", "")) # Find all weak and strong etag values from If-None-Match header # because RFC 7232 allows multiple etag values in a single header. etags = re.findall( br'\*|(?:W/)?"[^"]*"', utf8(self.request.headers.get("If-None-Match", "")) ) if not computed_etag or not etags: return False match = False if etags[0] == b'*': match = True else: # Use a weak comparison when comparing entity-tags. val = lambda x: x[2:] if x.startswith(b'W/') else x for etag in etags: if val(etag) == val(computed_etag): match = True break return match def _stack_context_handle_exception(self, type, value, traceback): try: # For historical reasons _handle_request_exception only takes # the exception value instead of the full triple, # so re-raise the exception to ensure that it's in # sys.exc_info() raise_exc_info((type, value, traceback)) except Exception: self._handle_request_exception(value) return True @gen.coroutine def _execute(self, transforms, *args, **kwargs): """Executes this request with the given output transforms.""" self._transforms = transforms try: if self.request.method not in self.SUPPORTED_METHODS: raise HTTPError(405) self.path_args = [self.decode_argument(arg) for arg in args] self.path_kwargs = dict((k, self.decode_argument(v, name=k)) for (k, v) in kwargs.items()) # If XSRF cookies are turned on, reject form submissions without # the proper cookie if self.request.method not in ("GET", "HEAD", "OPTIONS") and \ self.application.settings.get("xsrf_cookies"): self.check_xsrf_cookie() result = self.prepare() if is_future(result): result = yield result if result is not None: raise TypeError("Expected None, got %r" % result) if self._prepared_future is not None: # Tell the Application we've finished with prepare() # and are ready for the body to arrive. self._prepared_future.set_result(None) if self._finished: return if _has_stream_request_body(self.__class__): # In streaming mode request.body is a Future that signals # the body has been completely received. The Future has no # result; the data has been passed to self.data_received # instead. try: yield self.request.body except iostream.StreamClosedError: return method = getattr(self, self.request.method.lower()) result = method(*self.path_args, **self.path_kwargs) if is_future(result): result = yield result if result is not None: raise TypeError("Expected None, got %r" % result) if self._auto_finish and not self._finished: self.finish() except Exception as e: try: self._handle_request_exception(e) except Exception: app_log.error("Exception in exception handler", exc_info=True) if (self._prepared_future is not None and not self._prepared_future.done()): # In case we failed before setting _prepared_future, do it # now (to unblock the HTTP server). Note that this is not # in a finally block to avoid GC issues prior to Python 3.4. self._prepared_future.set_result(None) def data_received(self, chunk): """Implement this method to handle streamed request data. Requires the `.stream_request_body` decorator. """ raise NotImplementedError() def _log(self): """Logs the current request. Sort of deprecated since this functionality was moved to the Application, but left in place for the benefit of existing apps that have overridden this method. """ self.application.log_request(self) def _request_summary(self): return "%s %s (%s)" % (self.request.method, self.request.uri, self.request.remote_ip) def _handle_request_exception(self, e): if isinstance(e, Finish): # Not an error; just finish the request without logging. if not self._finished: self.finish() return try: self.log_exception(*sys.exc_info()) except Exception: # An error here should still get a best-effort send_error() # to avoid leaking the connection. app_log.error("Error in exception logger", exc_info=True) if self._finished: # Extra errors after the request has been finished should # be logged, but there is no reason to continue to try and # send a response. return if isinstance(e, HTTPError): if e.status_code not in httputil.responses and not e.reason: gen_log.error("Bad HTTP status code: %d", e.status_code) self.send_error(500, exc_info=sys.exc_info()) else: self.send_error(e.status_code, exc_info=sys.exc_info()) else: self.send_error(500, exc_info=sys.exc_info()) def log_exception(self, typ, value, tb): """Override to customize logging of uncaught exceptions. By default logs instances of `HTTPError` as warnings without stack traces (on the ``tornado.general`` logger), and all other exceptions as errors with stack traces (on the ``tornado.application`` logger). .. versionadded:: 3.1 """ if isinstance(value, HTTPError): if value.log_message: format = "%d %s: " + value.log_message args = ([value.status_code, self._request_summary()] + list(value.args)) gen_log.warning(format, *args) else: app_log.error("Uncaught exception %s\n%r", self._request_summary(), self.request, exc_info=(typ, value, tb)) def _ui_module(self, name, module): def render(*args, **kwargs): if not hasattr(self, "_active_modules"): self._active_modules = {} if name not in self._active_modules: self._active_modules[name] = module(self) rendered = self._active_modules[name].render(*args, **kwargs) return rendered return render def _ui_method(self, method): return lambda *args, **kwargs: method(self, *args, **kwargs) def _clear_headers_for_304(self): # 304 responses should not contain entity headers (defined in # http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1) # not explicitly allowed by # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5 headers = ["Allow", "Content-Encoding", "Content-Language", "Content-Length", "Content-MD5", "Content-Range", "Content-Type", "Last-Modified"] for h in headers: self.clear_header(h) def asynchronous(method): """Wrap request handler methods with this if they are asynchronous. This decorator is for callback-style asynchronous methods; for coroutines, use the ``@gen.coroutine`` decorator without ``@asynchronous``. (It is legal for legacy reasons to use the two decorators together provided ``@asynchronous`` is first, but ``@asynchronous`` will be ignored in this case) This decorator should only be applied to the :ref:`HTTP verb methods <verbs>`; its behavior is undefined for any other method. This decorator does not *make* a method asynchronous; it tells the framework that the method *is* asynchronous. For this decorator to be useful the method must (at least sometimes) do something asynchronous. If this decorator is given, the response is not finished when the method returns. It is up to the request handler to call `self.finish() <RequestHandler.finish>` to finish the HTTP request. Without this decorator, the request is automatically finished when the ``get()`` or ``post()`` method returns. Example: .. testcode:: class MyRequestHandler(RequestHandler): @asynchronous def get(self): http = httpclient.AsyncHTTPClient() http.fetch("http://friendfeed.com/", self._on_download) def _on_download(self, response): self.write("Downloaded!") self.finish() .. testoutput:: :hide: .. versionadded:: 3.1 The ability to use ``@gen.coroutine`` without ``@asynchronous``. """ # Delay the IOLoop import because it's not available on app engine. from tornado.ioloop import IOLoop @functools.wraps(method) def wrapper(self, *args, **kwargs): self._auto_finish = False with stack_context.ExceptionStackContext( self._stack_context_handle_exception): result = method(self, *args, **kwargs) if is_future(result): # If @asynchronous is used with @gen.coroutine, (but # not @gen.engine), we can automatically finish the # request when the future resolves. Additionally, # the Future will swallow any exceptions so we need # to throw them back out to the stack context to finish # the request. def future_complete(f): f.result() if not self._finished: self.finish() IOLoop.current().add_future(result, future_complete) # Once we have done this, hide the Future from our # caller (i.e. RequestHandler._when_complete), which # would otherwise set up its own callback and # exception handler (resulting in exceptions being # logged twice). return None return result return wrapper def stream_request_body(cls): """Apply to `RequestHandler` subclasses to enable streaming body support. This decorator implies the following changes: * `.HTTPServerRequest.body` is undefined, and body arguments will not be included in `RequestHandler.get_argument`. * `RequestHandler.prepare` is called when the request headers have been read instead of after the entire body has been read. * The subclass must define a method ``data_received(self, data):``, which will be called zero or more times as data is available. Note that if the request has an empty body, ``data_received`` may not be called. * ``prepare`` and ``data_received`` may return Futures (such as via ``@gen.coroutine``, in which case the next method will not be called until those futures have completed. * The regular HTTP method (``post``, ``put``, etc) will be called after the entire body has been read. There is a subtle interaction between ``data_received`` and asynchronous ``prepare``: The first call to ``data_received`` may occur at any point after the call to ``prepare`` has returned *or yielded*. """ if not issubclass(cls, RequestHandler): raise TypeError("expected subclass of RequestHandler, got %r", cls) cls._stream_request_body = True return cls def _has_stream_request_body(cls): if not issubclass(cls, RequestHandler): raise TypeError("expected subclass of RequestHandler, got %r", cls) return getattr(cls, '_stream_request_body', False) def removeslash(method): """Use this decorator to remove trailing slashes from the request path. For example, a request to ``/foo/`` would redirect to ``/foo`` with this decorator. Your request handler mapping should use a regular expression like ``r'/foo/*'`` in conjunction with using the decorator. """ @functools.wraps(method) def wrapper(self, *args, **kwargs): if self.request.path.endswith("/"): if self.request.method in ("GET", "HEAD"): uri = self.request.path.rstrip("/") if uri: # don't try to redirect '/' to '' if self.request.query: uri += "?" + self.request.query self.redirect(uri, permanent=True) return else: raise HTTPError(404) return method(self, *args, **kwargs) return wrapper def addslash(method): """Use this decorator to add a missing trailing slash to the request path. For example, a request to ``/foo`` would redirect to ``/foo/`` with this decorator. Your request handler mapping should use a regular expression like ``r'/foo/?'`` in conjunction with using the decorator. """ @functools.wraps(method) def wrapper(self, *args, **kwargs): if not self.request.path.endswith("/"): if self.request.method in ("GET", "HEAD"): uri = self.request.path + "/" if self.request.query: uri += "?" + self.request.query self.redirect(uri, permanent=True) return raise HTTPError(404) return method(self, *args, **kwargs) return wrapper class Application(httputil.HTTPServerConnectionDelegate): """A collection of request handlers that make up a web application. Instances of this class are callable and can be passed directly to HTTPServer to serve the application:: application = web.Application([ (r"/", MainPageHandler), ]) http_server = httpserver.HTTPServer(application) http_server.listen(8080) ioloop.IOLoop.current().start() The constructor for this class takes in a list of `URLSpec` objects or (regexp, request_class) tuples. When we receive requests, we iterate over the list in order and instantiate an instance of the first request class whose regexp matches the request path. The request class can be specified as either a class object or a (fully-qualified) name. Each tuple can contain additional elements, which correspond to the arguments to the `URLSpec` constructor. (Prior to Tornado 3.2, this only tuples of two or three elements were allowed). A dictionary may be passed as the third element of the tuple, which will be used as keyword arguments to the handler's constructor and `~RequestHandler.initialize` method. This pattern is used for the `StaticFileHandler` in this example (note that a `StaticFileHandler` can be installed automatically with the static_path setting described below):: application = web.Application([ (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}), ]) We support virtual hosts with the `add_handlers` method, which takes in a host regular expression as the first argument:: application.add_handlers(r"www\.myhost\.com", [ (r"/article/([0-9]+)", ArticleHandler), ]) You can serve static files by sending the ``static_path`` setting as a keyword argument. We will serve those files from the ``/static/`` URI (this is configurable with the ``static_url_prefix`` setting), and we will serve ``/favicon.ico`` and ``/robots.txt`` from the same directory. A custom subclass of `StaticFileHandler` can be specified with the ``static_handler_class`` setting. """ def __init__(self, handlers=None, default_host="", transforms=None, **settings): if transforms is None: self.transforms = [] if settings.get("compress_response") or settings.get("gzip"): self.transforms.append(GZipContentEncoding) else: self.transforms = transforms self.handlers = [] self.named_handlers = {} self.default_host = default_host self.settings = settings self.ui_modules = {'linkify': _linkify, 'xsrf_form_html': _xsrf_form_html, 'Template': TemplateModule, } self.ui_methods = {} self._load_ui_modules(settings.get("ui_modules", {})) self._load_ui_methods(settings.get("ui_methods", {})) if self.settings.get("static_path"): path = self.settings["static_path"] handlers = list(handlers or []) static_url_prefix = settings.get("static_url_prefix", "/static/") static_handler_class = settings.get("static_handler_class", StaticFileHandler) static_handler_args = settings.get("static_handler_args", {}) static_handler_args['path'] = path for pattern in [re.escape(static_url_prefix) + r"(.*)", r"/(favicon\.ico)", r"/(robots\.txt)"]: handlers.insert(0, (pattern, static_handler_class, static_handler_args)) if handlers: self.add_handlers(".*$", handlers) if self.settings.get('debug'): self.settings.setdefault('autoreload', True) self.settings.setdefault('compiled_template_cache', False) self.settings.setdefault('static_hash_cache', False) self.settings.setdefault('serve_traceback', True) # Automatically reload modified modules if self.settings.get('autoreload'): from tornado import autoreload autoreload.start() def listen(self, port, address="", **kwargs): """Starts an HTTP server for this application on the given port. This is a convenience alias for creating an `.HTTPServer` object and calling its listen method. Keyword arguments not supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the `.HTTPServer` constructor. For advanced uses (e.g. multi-process mode), do not use this method; create an `.HTTPServer` and call its `.TCPServer.bind`/`.TCPServer.start` methods directly. Note that after calling this method you still need to call ``IOLoop.current().start()`` to start the server. """ # import is here rather than top level because HTTPServer # is not importable on appengine from tornado.httpserver import HTTPServer server = HTTPServer(self, **kwargs) server.listen(port, address) def add_handlers(self, host_pattern, host_handlers): """Appends the given handlers to our handler list. Host patterns are processed sequentially in the order they were added. All matching patterns will be considered. """ if not host_pattern.endswith("$"): host_pattern += "$" handlers = [] # The handlers with the wildcard host_pattern are a special # case - they're added in the constructor but should have lower # precedence than the more-precise handlers added later. # If a wildcard handler group exists, it should always be last # in the list, so insert new groups just before it. if self.handlers and self.handlers[-1][0].pattern == '.*$': self.handlers.insert(-1, (re.compile(host_pattern), handlers)) else: self.handlers.append((re.compile(host_pattern), handlers)) for spec in host_handlers: if isinstance(spec, (tuple, list)): assert len(spec) in (2, 3, 4) spec = URLSpec(*spec) handlers.append(spec) if spec.name: if spec.name in self.named_handlers: app_log.warning( "Multiple handlers named %s; replacing previous value", spec.name) self.named_handlers[spec.name] = spec def add_transform(self, transform_class): self.transforms.append(transform_class) def _get_host_handlers(self, request): host = split_host_and_port(request.host.lower())[0] matches = [] for pattern, handlers in self.handlers: if pattern.match(host): matches.extend(handlers) # Look for default host if not behind load balancer (for debugging) if not matches and "X-Real-Ip" not in request.headers: for pattern, handlers in self.handlers: if pattern.match(self.default_host): matches.extend(handlers) return matches or None def _load_ui_methods(self, methods): if isinstance(methods, types.ModuleType): self._load_ui_methods(dict((n, getattr(methods, n)) for n in dir(methods))) elif isinstance(methods, list): for m in methods: self._load_ui_methods(m) else: for name, fn in methods.items(): if not name.startswith("_") and hasattr(fn, "__call__") \ and name[0].lower() == name[0]: self.ui_methods[name] = fn def _load_ui_modules(self, modules): if isinstance(modules, types.ModuleType): self._load_ui_modules(dict((n, getattr(modules, n)) for n in dir(modules))) elif isinstance(modules, list): for m in modules: self._load_ui_modules(m) else: assert isinstance(modules, dict) for name, cls in modules.items(): try: if issubclass(cls, UIModule): self.ui_modules[name] = cls except TypeError: pass def start_request(self, server_conn, request_conn): # Modern HTTPServer interface return _RequestDispatcher(self, request_conn) def __call__(self, request): # Legacy HTTPServer interface dispatcher = _RequestDispatcher(self, None) dispatcher.set_request(request) return dispatcher.execute() def reverse_url(self, name, *args): """Returns a URL path for handler named ``name`` The handler must be added to the application as a named `URLSpec`. Args will be substituted for capturing groups in the `URLSpec` regex. They will be converted to strings if necessary, encoded as utf8, and url-escaped. """ if name in self.named_handlers: return self.named_handlers[name].reverse(*args) raise KeyError("%s not found in named urls" % name) def log_request(self, handler): """Writes a completed HTTP request to the logs. By default writes to the python root logger. To change this behavior either subclass Application and override this method, or pass a function in the application settings dictionary as ``log_function``. """ if "log_function" in self.settings: self.settings["log_function"](handler) return if handler.get_status() < 400: log_method = access_log.info elif handler.get_status() < 500: log_method = access_log.warning else: log_method = access_log.error request_time = 1000.0 * handler.request.request_time() log_method("%d %s %.2fms", handler.get_status(), handler._request_summary(), request_time) class _RequestDispatcher(httputil.HTTPMessageDelegate): def __init__(self, application, connection): self.application = application self.connection = connection self.request = None self.chunks = [] self.handler_class = None self.handler_kwargs = None self.path_args = [] self.path_kwargs = {} def headers_received(self, start_line, headers): self.set_request(httputil.HTTPServerRequest( connection=self.connection, start_line=start_line, headers=headers)) if self.stream_request_body: self.request.body = Future() return self.execute() def set_request(self, request): self.request = request self._find_handler() self.stream_request_body = _has_stream_request_body(self.handler_class) def _find_handler(self): # Identify the handler to use as soon as we have the request. # Save url path arguments for later. app = self.application handlers = app._get_host_handlers(self.request) if not handlers: self.handler_class = RedirectHandler self.handler_kwargs = dict(url="%s://%s/" % (self.request.protocol, app.default_host)) return for spec in handlers: match = spec.regex.match(self.request.path) if match: self.handler_class = spec.handler_class self.handler_kwargs = spec.kwargs if spec.regex.groups: # Pass matched groups to the handler. Since # match.groups() includes both named and # unnamed groups, we want to use either groups # or groupdict but not both. if spec.regex.groupindex: self.path_kwargs = dict( (str(k), _unquote_or_none(v)) for (k, v) in match.groupdict().items()) else: self.path_args = [_unquote_or_none(s) for s in match.groups()] return if app.settings.get('default_handler_class'): self.handler_class = app.settings['default_handler_class'] self.handler_kwargs = app.settings.get( 'default_handler_args', {}) else: self.handler_class = ErrorHandler self.handler_kwargs = dict(status_code=404) def data_received(self, data): if self.stream_request_body: return self.handler.data_received(data) else: self.chunks.append(data) def finish(self): if self.stream_request_body: self.request.body.set_result(None) else: self.request.body = b''.join(self.chunks) self.request._parse_body() self.execute() def on_connection_close(self): if self.stream_request_body: self.handler.on_connection_close() else: self.chunks = None def execute(self): # If template cache is disabled (usually in the debug mode), # re-compile templates and reload static files on every # request so you don't need to restart to see changes if not self.application.settings.get("compiled_template_cache", True): with RequestHandler._template_loader_lock: for loader in RequestHandler._template_loaders.values(): loader.reset() if not self.application.settings.get('static_hash_cache', True): StaticFileHandler.reset() self.handler = self.handler_class(self.application, self.request, **self.handler_kwargs) transforms = [t(self.request) for t in self.application.transforms] if self.stream_request_body: self.handler._prepared_future = Future() # Note that if an exception escapes handler._execute it will be # trapped in the Future it returns (which we are ignoring here, # leaving it to be logged when the Future is GC'd). # However, that shouldn't happen because _execute has a blanket # except handler, and we cannot easily access the IOLoop here to # call add_future (because of the requirement to remain compatible # with WSGI) f = self.handler._execute(transforms, *self.path_args, **self.path_kwargs) # If we are streaming the request body, then execute() is finished # when the handler has prepared to receive the body. If not, # it doesn't matter when execute() finishes (so we return None) return self.handler._prepared_future class HTTPError(Exception): """An exception that will turn into an HTTP error response. Raising an `HTTPError` is a convenient alternative to calling `RequestHandler.send_error` since it automatically ends the current function. To customize the response sent with an `HTTPError`, override `RequestHandler.write_error`. :arg int status_code: HTTP status code. Must be listed in `httplib.responses <http.client.responses>` unless the ``reason`` keyword argument is given. :arg string log_message: Message to be written to the log for this error (will not be shown to the user unless the `Application` is in debug mode). May contain ``%s``-style placeholders, which will be filled in with remaining positional parameters. :arg string reason: Keyword-only argument. The HTTP "reason" phrase to pass in the status line along with ``status_code``. Normally determined automatically from ``status_code``, but can be used to use a non-standard numeric code. """ def __init__(self, status_code, log_message=None, *args, **kwargs): self.status_code = status_code self.log_message = log_message self.args = args self.reason = kwargs.get('reason', None) if log_message and not args: self.log_message = log_message.replace('%', '%%') def __str__(self): message = "HTTP %d: %s" % ( self.status_code, self.reason or httputil.responses.get(self.status_code, 'Unknown')) if self.log_message: return message + " (" + (self.log_message % self.args) + ")" else: return message class Finish(Exception): """An exception that ends the request without producing an error response. When `Finish` is raised in a `RequestHandler`, the request will end (calling `RequestHandler.finish` if it hasn't already been called), but the outgoing response will not be modified and the error-handling methods (including `RequestHandler.write_error`) will not be called. This can be a more convenient way to implement custom error pages than overriding ``write_error`` (especially in library code):: if self.current_user is None: self.set_status(401) self.set_header('WWW-Authenticate', 'Basic realm="something"') raise Finish() """ pass class MissingArgumentError(HTTPError): """Exception raised by `RequestHandler.get_argument`. This is a subclass of `HTTPError`, so if it is uncaught a 400 response code will be used instead of 500 (and a stack trace will not be logged). .. versionadded:: 3.1 """ def __init__(self, arg_name): super(MissingArgumentError, self).__init__( 400, 'Missing argument %s' % arg_name) self.arg_name = arg_name class ErrorHandler(RequestHandler): """Generates an error response with ``status_code`` for all requests.""" def initialize(self, status_code): self.set_status(status_code) def prepare(self): raise HTTPError(self._status_code) def check_xsrf_cookie(self): # POSTs to an ErrorHandler don't actually have side effects, # so we don't need to check the xsrf token. This allows POSTs # to the wrong url to return a 404 instead of 403. pass class RedirectHandler(RequestHandler): """Redirects the client to the given URL for all GET requests. You should provide the keyword argument ``url`` to the handler, e.g.:: application = web.Application([ (r"/oldpath", web.RedirectHandler, {"url": "/newpath"}), ]) """ def initialize(self, url, permanent=True): self._url = url self._permanent = permanent def get(self): self.redirect(self._url, permanent=self._permanent) class StaticFileHandler(RequestHandler): """A simple handler that can serve static content from a directory. A `StaticFileHandler` is configured automatically if you pass the ``static_path`` keyword argument to `Application`. This handler can be customized with the ``static_url_prefix``, ``static_handler_class``, and ``static_handler_args`` settings. To map an additional path to this handler for a static data directory you would add a line to your application like:: application = web.Application([ (r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}), ]) The handler constructor requires a ``path`` argument, which specifies the local root directory of the content to be served. Note that a capture group in the regex is required to parse the value for the ``path`` argument to the get() method (different than the constructor argument above); see `URLSpec` for details. To maximize the effectiveness of browser caching, this class supports versioned urls (by default using the argument ``?v=``). If a version is given, we instruct the browser to cache this file indefinitely. `make_static_url` (also available as `RequestHandler.static_url`) can be used to construct a versioned url. This handler is intended primarily for use in development and light-duty file serving; for heavy traffic it will be more efficient to use a dedicated static file server (such as nginx or Apache). We support the HTTP ``Accept-Ranges`` mechanism to return partial content (because some browsers require this functionality to be present to seek in HTML5 audio or video), but this handler should not be used with files that are too large to fit comfortably in memory. **Subclassing notes** This class is designed to be extensible by subclassing, but because of the way static urls are generated with class methods rather than instance methods, the inheritance patterns are somewhat unusual. Be sure to use the ``@classmethod`` decorator when overriding a class method. Instance methods may use the attributes ``self.path`` ``self.absolute_path``, and ``self.modified``. Subclasses should only override methods discussed in this section; overriding other methods is error-prone. Overriding ``StaticFileHandler.get`` is particularly problematic due to the tight coupling with ``compute_etag`` and other methods. To change the way static urls are generated (e.g. to match the behavior of another server or CDN), override `make_static_url`, `parse_url_path`, `get_cache_time`, and/or `get_version`. To replace all interaction with the filesystem (e.g. to serve static content from a database), override `get_content`, `get_content_size`, `get_modified_time`, `get_absolute_path`, and `validate_absolute_path`. .. versionchanged:: 3.1 Many of the methods for subclasses were added in Tornado 3.1. """ CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years _static_hashes = {} _lock = threading.Lock() # protects _static_hashes def initialize(self, path, default_filename=None): self.root = path self.default_filename = default_filename @classmethod def reset(cls): with cls._lock: cls._static_hashes = {} def head(self, path): return self.get(path, include_body=False) @gen.coroutine def get(self, path, include_body=True): # Set up our path instance variables. self.path = self.parse_url_path(path) del path # make sure we don't refer to path instead of self.path again absolute_path = self.get_absolute_path(self.root, self.path) self.absolute_path = self.validate_absolute_path( self.root, absolute_path) if self.absolute_path is None: return self.modified = self.get_modified_time() self.set_headers() if self.should_return_304(): self.set_status(304) return request_range = None range_header = self.request.headers.get("Range") if range_header: # As per RFC 2616 14.16, if an invalid Range header is specified, # the request will be treated as if the header didn't exist. request_range = httputil._parse_request_range(range_header) size = self.get_content_size() if request_range: start, end = request_range if (start is not None and start >= size) or end == 0: # As per RFC 2616 14.35.1, a range is not satisfiable only: if # the first requested byte is equal to or greater than the # content, or when a suffix with length 0 is specified self.set_status(416) # Range Not Satisfiable self.set_header("Content-Type", "text/plain") self.set_header("Content-Range", "bytes */%s" % (size, )) return if start is not None and start < 0: start += size if end is not None and end > size: # Clients sometimes blindly use a large range to limit their # download size; cap the endpoint at the actual file size. end = size # Note: only return HTTP 206 if less than the entire range has been # requested. Not only is this semantically correct, but Chrome # refuses to play audio if it gets an HTTP 206 in response to # ``Range: bytes=0-``. if size != (end or size) - (start or 0): self.set_status(206) # Partial Content self.set_header("Content-Range", httputil._get_content_range(start, end, size)) else: start = end = None if start is not None and end is not None: content_length = end - start elif end is not None: content_length = end elif start is not None: content_length = size - start else: content_length = size self.set_header("Content-Length", content_length) if include_body: content = self.get_content(self.absolute_path, start, end) if isinstance(content, bytes): content = [content] for chunk in content: try: self.write(chunk) yield self.flush() except iostream.StreamClosedError: return else: assert self.request.method == "HEAD" def compute_etag(self): """Sets the ``Etag`` header based on static url version. This allows efficient ``If-None-Match`` checks against cached versions, and sends the correct ``Etag`` for a partial response (i.e. the same ``Etag`` as the full file). .. versionadded:: 3.1 """ version_hash = self._get_cached_version(self.absolute_path) if not version_hash: return None return '"%s"' % (version_hash, ) def set_headers(self): """Sets the content and caching headers on the response. .. versionadded:: 3.1 """ self.set_header("Accept-Ranges", "bytes") self.set_etag_header() if self.modified is not None: self.set_header("Last-Modified", self.modified) content_type = self.get_content_type() if content_type: self.set_header("Content-Type", content_type) cache_time = self.get_cache_time(self.path, self.modified, content_type) if cache_time > 0: self.set_header("Expires", datetime.datetime.utcnow() + datetime.timedelta(seconds=cache_time)) self.set_header("Cache-Control", "max-age=" + str(cache_time)) self.set_extra_headers(self.path) def should_return_304(self): """Returns True if the headers indicate that we should return 304. .. versionadded:: 3.1 """ if self.check_etag_header(): return True # Check the If-Modified-Since, and don't send the result if the # content has not been modified ims_value = self.request.headers.get("If-Modified-Since") if ims_value is not None: date_tuple = email.utils.parsedate(ims_value) if date_tuple is not None: if_since = datetime.datetime(*date_tuple[:6]) if if_since >= self.modified: return True return False @classmethod def get_absolute_path(cls, root, path): """Returns the absolute location of ``path`` relative to ``root``. ``root`` is the path configured for this `StaticFileHandler` (in most cases the ``static_path`` `Application` setting). This class method may be overridden in subclasses. By default it returns a filesystem path, but other strings may be used as long as they are unique and understood by the subclass's overridden `get_content`. .. versionadded:: 3.1 """ abspath = os.path.abspath(os.path.join(root, path)) return abspath def validate_absolute_path(self, root, absolute_path): """Validate and return the absolute path. ``root`` is the configured path for the `StaticFileHandler`, and ``path`` is the result of `get_absolute_path` This is an instance method called during request processing, so it may raise `HTTPError` or use methods like `RequestHandler.redirect` (return None after redirecting to halt further processing). This is where 404 errors for missing files are generated. This method may modify the path before returning it, but note that any such modifications will not be understood by `make_static_url`. In instance methods, this method's result is available as ``self.absolute_path``. .. versionadded:: 3.1 """ root = os.path.abspath(root) # os.path.abspath strips a trailing / # it needs to be temporarily added back for requests to root/ if not (absolute_path + os.path.sep).startswith(root): raise HTTPError(403, "%s is not in root static directory", self.path) if (os.path.isdir(absolute_path) and self.default_filename is not None): # need to look at the request.path here for when path is empty # but there is some prefix to the path that was already # trimmed by the routing if not self.request.path.endswith("/"): self.redirect(self.request.path + "/", permanent=True) return absolute_path = os.path.join(absolute_path, self.default_filename) if not os.path.exists(absolute_path): raise HTTPError(404) if not os.path.isfile(absolute_path): raise HTTPError(403, "%s is not a file", self.path) return absolute_path @classmethod def get_content(cls, abspath, start=None, end=None): """Retrieve the content of the requested resource which is located at the given absolute path. This class method may be overridden by subclasses. Note that its signature is different from other overridable class methods (no ``settings`` argument); this is deliberate to ensure that ``abspath`` is able to stand on its own as a cache key. This method should either return a byte string or an iterator of byte strings. The latter is preferred for large files as it helps reduce memory fragmentation. .. versionadded:: 3.1 """ with open(abspath, "rb") as file: if start is not None: file.seek(start) if end is not None: remaining = end - (start or 0) else: remaining = None while True: chunk_size = 64 * 1024 if remaining is not None and remaining < chunk_size: chunk_size = remaining chunk = file.read(chunk_size) if chunk: if remaining is not None: remaining -= len(chunk) yield chunk else: if remaining is not None: assert remaining == 0 return @classmethod def get_content_version(cls, abspath): """Returns a version string for the resource at the given path. This class method may be overridden by subclasses. The default implementation is a hash of the file's contents. .. versionadded:: 3.1 """ data = cls.get_content(abspath) hasher = hashlib.md5() if isinstance(data, bytes): hasher.update(data) else: for chunk in data: hasher.update(chunk) return hasher.hexdigest() def _stat(self): if not hasattr(self, '_stat_result'): self._stat_result = os.stat(self.absolute_path) return self._stat_result def get_content_size(self): """Retrieve the total size of the resource at the given path. This method may be overridden by subclasses. .. versionadded:: 3.1 .. versionchanged:: 4.0 This method is now always called, instead of only when partial results are requested. """ stat_result = self._stat() return stat_result[stat.ST_SIZE] def get_modified_time(self): """Returns the time that ``self.absolute_path`` was last modified. May be overridden in subclasses. Should return a `~datetime.datetime` object or None. .. versionadded:: 3.1 """ stat_result = self._stat() modified = datetime.datetime.utcfromtimestamp( stat_result[stat.ST_MTIME]) return modified def get_content_type(self): """Returns the ``Content-Type`` header to be used for this request. .. versionadded:: 3.1 """ mime_type, encoding = mimetypes.guess_type(self.absolute_path) return mime_type def set_extra_headers(self, path): """For subclass to add extra headers to the response""" pass def get_cache_time(self, path, modified, mime_type): """Override to customize cache control behavior. Return a positive number of seconds to make the result cacheable for that amount of time or 0 to mark resource as cacheable for an unspecified amount of time (subject to browser heuristics). By default returns cache expiry of 10 years for resources requested with ``v`` argument. """ return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0 @classmethod def make_static_url(cls, settings, path, include_version=True): """Constructs a versioned url for the given path. This method may be overridden in subclasses (but note that it is a class method rather than an instance method). Subclasses are only required to implement the signature ``make_static_url(cls, settings, path)``; other keyword arguments may be passed through `~RequestHandler.static_url` but are not standard. ``settings`` is the `Application.settings` dictionary. ``path`` is the static path being requested. The url returned should be relative to the current host. ``include_version`` determines whether the generated URL should include the query string containing the version hash of the file corresponding to the given ``path``. """ url = settings.get('static_url_prefix', '/static/') + path if not include_version: return url version_hash = cls.get_version(settings, path) if not version_hash: return url return '%s?v=%s' % (url, version_hash) def parse_url_path(self, url_path): """Converts a static URL path into a filesystem path. ``url_path`` is the path component of the URL with ``static_url_prefix`` removed. The return value should be filesystem path relative to ``static_path``. This is the inverse of `make_static_url`. """ if os.path.sep != "/": url_path = url_path.replace("/", os.path.sep) return url_path @classmethod def get_version(cls, settings, path): """Generate the version string to be used in static URLs. ``settings`` is the `Application.settings` dictionary and ``path`` is the relative location of the requested asset on the filesystem. The returned value should be a string, or ``None`` if no version could be determined. .. versionchanged:: 3.1 This method was previously recommended for subclasses to override; `get_content_version` is now preferred as it allows the base class to handle caching of the result. """ abs_path = cls.get_absolute_path(settings['static_path'], path) return cls._get_cached_version(abs_path) @classmethod def _get_cached_version(cls, abs_path): with cls._lock: hashes = cls._static_hashes if abs_path not in hashes: try: hashes[abs_path] = cls.get_content_version(abs_path) except Exception: gen_log.error("Could not open static file %r", abs_path) hashes[abs_path] = None hsh = hashes.get(abs_path) if hsh: return hsh return None class FallbackHandler(RequestHandler): """A `RequestHandler` that wraps another HTTP server callback. The fallback is a callable object that accepts an `~.httputil.HTTPServerRequest`, such as an `Application` or `tornado.wsgi.WSGIContainer`. This is most useful to use both Tornado ``RequestHandlers`` and WSGI in the same server. Typical usage:: wsgi_app = tornado.wsgi.WSGIContainer( django.core.handlers.wsgi.WSGIHandler()) application = tornado.web.Application([ (r"/foo", FooHandler), (r".*", FallbackHandler, dict(fallback=wsgi_app), ]) """ def initialize(self, fallback): self.fallback = fallback def prepare(self): self.fallback(self.request) self._finished = True class OutputTransform(object): """A transform modifies the result of an HTTP request (e.g., GZip encoding) Applications are not expected to create their own OutputTransforms or interact with them directly; the framework chooses which transforms (if any) to apply. """ def __init__(self, request): pass def transform_first_chunk(self, status_code, headers, chunk, finishing): return status_code, headers, chunk def transform_chunk(self, chunk, finishing): return chunk class GZipContentEncoding(OutputTransform): """Applies the gzip content encoding to the response. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 .. versionchanged:: 4.0 Now compresses all mime types beginning with ``text/``, instead of just a whitelist. (the whitelist is still used for certain non-text mime types). """ # Whitelist of compressible mime types (in addition to any types # beginning with "text/"). CONTENT_TYPES = set(["application/javascript", "application/x-javascript", "application/xml", "application/atom+xml", "application/json", "application/xhtml+xml"]) MIN_LENGTH = 5 def __init__(self, request): self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "") def _compressible_type(self, ctype): return ctype.startswith('text/') or ctype in self.CONTENT_TYPES def transform_first_chunk(self, status_code, headers, chunk, finishing): if 'Vary' in headers: headers['Vary'] += b', Accept-Encoding' else: headers['Vary'] = b'Accept-Encoding' if self._gzipping: ctype = _unicode(headers.get("Content-Type", "")).split(";")[0] self._gzipping = self._compressible_type(ctype) and \ (not finishing or len(chunk) >= self.MIN_LENGTH) and \ ("Content-Encoding" not in headers) if self._gzipping: headers["Content-Encoding"] = "gzip" self._gzip_value = BytesIO() self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value) chunk = self.transform_chunk(chunk, finishing) if "Content-Length" in headers: # The original content length is no longer correct. # If this is the last (and only) chunk, we can set the new # content-length; otherwise we remove it and fall back to # chunked encoding. if finishing: headers["Content-Length"] = str(len(chunk)) else: del headers["Content-Length"] return status_code, headers, chunk def transform_chunk(self, chunk, finishing): if self._gzipping: self._gzip_file.write(chunk) if finishing: self._gzip_file.close() else: self._gzip_file.flush() chunk = self._gzip_value.getvalue() self._gzip_value.truncate(0) self._gzip_value.seek(0) return chunk def authenticated(method): """Decorate methods with this to require that the user be logged in. If the user is not logged in, they will be redirected to the configured `login url <RequestHandler.get_login_url>`. If you configure a login url with a query parameter, Tornado will assume you know what you're doing and use it as-is. If not, it will add a `next` parameter so the login page knows where to send you once you're logged in. """ @functools.wraps(method) def wrapper(self, *args, **kwargs): if not self.current_user: if self.request.method in ("GET", "HEAD"): url = self.get_login_url() if "?" not in url: if urlparse.urlsplit(url).scheme: # if login url is absolute, make next absolute too next_url = self.request.full_url() else: next_url = self.request.uri url += "?" + urlencode(dict(next=next_url)) self.redirect(url) return raise HTTPError(403) return method(self, *args, **kwargs) return wrapper class UIModule(object): """A re-usable, modular UI unit on a page. UI modules often execute additional queries, and they can include additional CSS and JavaScript that will be included in the output page, which is automatically inserted on page render. Subclasses of UIModule must override the `render` method. """ def __init__(self, handler): self.handler = handler self.request = handler.request self.ui = handler.ui self.locale = handler.locale @property def current_user(self): return self.handler.current_user def render(self, *args, **kwargs): """Override in subclasses to return this module's output.""" raise NotImplementedError() def embedded_javascript(self): """Override to return a JavaScript string to be embedded in the page.""" return None def javascript_files(self): """Override to return a list of JavaScript files needed by this module. If the return values are relative paths, they will be passed to `RequestHandler.static_url`; otherwise they will be used as-is. """ return None def embedded_css(self): """Override to return a CSS string that will be embedded in the page.""" return None def css_files(self): """Override to returns a list of CSS files required by this module. If the return values are relative paths, they will be passed to `RequestHandler.static_url`; otherwise they will be used as-is. """ return None def html_head(self): """Override to return an HTML string that will be put in the <head/> element. """ return None def html_body(self): """Override to return an HTML string that will be put at the end of the <body/> element. """ return None def render_string(self, path, **kwargs): """Renders a template and returns it as a string.""" return self.handler.render_string(path, **kwargs) class _linkify(UIModule): def render(self, text, **kwargs): return escape.linkify(text, **kwargs) class _xsrf_form_html(UIModule): def render(self): return self.handler.xsrf_form_html() class TemplateModule(UIModule): """UIModule that simply renders the given template. {% module Template("foo.html") %} is similar to {% include "foo.html" %}, but the module version gets its own namespace (with kwargs passed to Template()) instead of inheriting the outer template's namespace. Templates rendered through this module also get access to UIModule's automatic javascript/css features. Simply call set_resources inside the template and give it keyword arguments corresponding to the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }} Note that these resources are output once per template file, not once per instantiation of the template, so they must not depend on any arguments to the template. """ def __init__(self, handler): super(TemplateModule, self).__init__(handler) # keep resources in both a list and a dict to preserve order self._resource_list = [] self._resource_dict = {} def render(self, path, **kwargs): def set_resources(**kwargs): if path not in self._resource_dict: self._resource_list.append(kwargs) self._resource_dict[path] = kwargs else: if self._resource_dict[path] != kwargs: raise ValueError("set_resources called with different " "resources for the same template") return "" return self.render_string(path, set_resources=set_resources, **kwargs) def _get_resources(self, key): return (r[key] for r in self._resource_list if key in r) def embedded_javascript(self): return "\n".join(self._get_resources("embedded_javascript")) def javascript_files(self): result = [] for f in self._get_resources("javascript_files"): if isinstance(f, (unicode_type, bytes)): result.append(f) else: result.extend(f) return result def embedded_css(self): return "\n".join(self._get_resources("embedded_css")) def css_files(self): result = [] for f in self._get_resources("css_files"): if isinstance(f, (unicode_type, bytes)): result.append(f) else: result.extend(f) return result def html_head(self): return "".join(self._get_resources("html_head")) def html_body(self): return "".join(self._get_resources("html_body")) class _UIModuleNamespace(object): """Lazy namespace which creates UIModule proxies bound to a handler.""" def __init__(self, handler, ui_modules): self.handler = handler self.ui_modules = ui_modules def __getitem__(self, key): return self.handler._ui_module(key, self.ui_modules[key]) def __getattr__(self, key): try: return self[key] except KeyError as e: raise AttributeError(str(e)) class URLSpec(object): """Specifies mappings between URLs and handlers.""" def __init__(self, pattern, handler, kwargs=None, name=None): """Parameters: * ``pattern``: Regular expression to be matched. Any groups in the regex will be passed in to the handler's get/post/etc methods as arguments. * ``handler``: `RequestHandler` subclass to be invoked. * ``kwargs`` (optional): A dictionary of additional arguments to be passed to the handler's constructor. * ``name`` (optional): A name for this handler. Used by `Application.reverse_url`. """ if not pattern.endswith('$'): pattern += '$' self.regex = re.compile(pattern) assert len(self.regex.groupindex) in (0, self.regex.groups), \ ("groups in url regexes must either be all named or all " "positional: %r" % self.regex.pattern) if isinstance(handler, str): # import the Module and instantiate the class # Must be a fully qualified name (module.ClassName) handler = import_object(handler) self.handler_class = handler self.kwargs = kwargs or {} self.name = name self._path, self._group_count = self._find_groups() def __repr__(self): return '%s(%r, %s, kwargs=%r, name=%r)' % \ (self.__class__.__name__, self.regex.pattern, self.handler_class, self.kwargs, self.name) def _find_groups(self): """Returns a tuple (reverse string, group count) for a url. For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method would return ('/%s/%s/', 2). """ pattern = self.regex.pattern if pattern.startswith('^'): pattern = pattern[1:] if pattern.endswith('$'): pattern = pattern[:-1] if self.regex.groups != pattern.count('('): # The pattern is too complicated for our simplistic matching, # so we can't support reversing it. return (None, None) pieces = [] for fragment in pattern.split('('): if ')' in fragment: paren_loc = fragment.index(')') if paren_loc >= 0: pieces.append('%s' + fragment[paren_loc + 1:]) else: pieces.append(fragment) return (''.join(pieces), self.regex.groups) def reverse(self, *args): assert self._path is not None, \ "Cannot reverse url regex " + self.regex.pattern assert len(args) == self._group_count, "required number of arguments "\ "not found" if not len(args): return self._path converted_args = [] for a in args: if not isinstance(a, (unicode_type, bytes)): a = str(a) converted_args.append(escape.url_escape(utf8(a), plus=False)) return self._path % tuple(converted_args) url = URLSpec if hasattr(hmac, 'compare_digest'): # python 3.3 _time_independent_equals = hmac.compare_digest else: def _time_independent_equals(a, b): if len(a) != len(b): return False result = 0 if isinstance(a[0], int): # python3 byte strings for x, y in zip(a, b): result |= x ^ y else: # python2 for x, y in zip(a, b): result |= ord(x) ^ ord(y) return result == 0 def create_signed_value(secret, name, value, version=None, clock=None, key_version=None): if version is None: version = DEFAULT_SIGNED_VALUE_VERSION if clock is None: clock = time.time timestamp = utf8(str(int(clock()))) value = base64.b64encode(utf8(value)) if version == 1: signature = _create_signature_v1(secret, name, value, timestamp) value = b"|".join([value, timestamp, signature]) return value elif version == 2: # The v2 format consists of a version number and a series of # length-prefixed fields "%d:%s", the last of which is a # signature, all separated by pipes. All numbers are in # decimal format with no leading zeros. The signature is an # HMAC-SHA256 of the whole string up to that point, including # the final pipe. # # The fields are: # - format version (i.e. 2; no length prefix) # - key version (integer, default is 0) # - timestamp (integer seconds since epoch) # - name (not encoded; assumed to be ~alphanumeric) # - value (base64-encoded) # - signature (hex-encoded; no length prefix) def format_field(s): return utf8("%d:" % len(s)) + utf8(s) to_sign = b"|".join([ b"2", format_field(str(key_version or 0)), format_field(timestamp), format_field(name), format_field(value), b'']) if isinstance(secret, dict): assert key_version is not None, 'Key version must be set when sign key dict is used' assert version >= 2, 'Version must be at least 2 for key version support' secret = secret[key_version] signature = _create_signature_v2(secret, to_sign) return to_sign + signature else: raise ValueError("Unsupported version %d" % version) # A leading version number in decimal # with no leading zeros, followed by a pipe. _signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$") def _get_version(value): # Figures out what version value is. Version 1 did not include an # explicit version field and started with arbitrary base64 data, # which makes this tricky. m = _signed_value_version_re.match(value) if m is None: version = 1 else: try: version = int(m.group(1)) if version > 999: # Certain payloads from the version-less v1 format may # be parsed as valid integers. Due to base64 padding # restrictions, this can only happen for numbers whose # length is a multiple of 4, so we can treat all # numbers up to 999 as versions, and for the rest we # fall back to v1 format. version = 1 except ValueError: version = 1 return version def decode_signed_value(secret, name, value, max_age_days=31, clock=None, min_version=None): if clock is None: clock = time.time if min_version is None: min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION if min_version > 2: raise ValueError("Unsupported min_version %d" % min_version) if not value: return None value = utf8(value) version = _get_version(value) if version < min_version: return None if version == 1: return _decode_signed_value_v1(secret, name, value, max_age_days, clock) elif version == 2: return _decode_signed_value_v2(secret, name, value, max_age_days, clock) else: return None def _decode_signed_value_v1(secret, name, value, max_age_days, clock): parts = utf8(value).split(b"|") if len(parts) != 3: return None signature = _create_signature_v1(secret, name, parts[0], parts[1]) if not _time_independent_equals(parts[2], signature): gen_log.warning("Invalid cookie signature %r", value) return None timestamp = int(parts[1]) if timestamp < clock() - max_age_days * 86400: gen_log.warning("Expired cookie %r", value) return None if timestamp > clock() + 31 * 86400: # _cookie_signature does not hash a delimiter between the # parts of the cookie, so an attacker could transfer trailing # digits from the payload to the timestamp without altering the # signature. For backwards compatibility, sanity-check timestamp # here instead of modifying _cookie_signature. gen_log.warning("Cookie timestamp in future; possible tampering %r", value) return None if parts[1].startswith(b"0"): gen_log.warning("Tampered cookie %r", value) return None try: return base64.b64decode(parts[0]) except Exception: return None def _decode_fields_v2(value): def _consume_field(s): length, _, rest = s.partition(b':') n = int(length) field_value = rest[:n] # In python 3, indexing bytes returns small integers; we must # use a slice to get a byte string as in python 2. if rest[n:n + 1] != b'|': raise ValueError("malformed v2 signed value field") rest = rest[n + 1:] return field_value, rest rest = value[2:] # remove version number key_version, rest = _consume_field(rest) timestamp, rest = _consume_field(rest) name_field, rest = _consume_field(rest) value_field, passed_sig = _consume_field(rest) return int(key_version), timestamp, name_field, value_field, passed_sig def _decode_signed_value_v2(secret, name, value, max_age_days, clock): try: key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value) except ValueError: return None signed_string = value[:-len(passed_sig)] if isinstance(secret, dict): try: secret = secret[key_version] except KeyError: return None expected_sig = _create_signature_v2(secret, signed_string) if not _time_independent_equals(passed_sig, expected_sig): return None if name_field != utf8(name): return None timestamp = int(timestamp) if timestamp < clock() - max_age_days * 86400: # The signature has expired. return None try: return base64.b64decode(value_field) except Exception: return None def get_signature_key_version(value): value = utf8(value) version = _get_version(value) if version < 2: return None try: key_version, _, _, _, _ = _decode_fields_v2(value) except ValueError: return None return key_version def _create_signature_v1(secret, *parts): hash = hmac.new(utf8(secret), digestmod=hashlib.sha1) for part in parts: hash.update(utf8(part)) return utf8(hash.hexdigest()) def _create_signature_v2(secret, s): hash = hmac.new(utf8(secret), digestmod=hashlib.sha256) hash.update(utf8(s)) return utf8(hash.hexdigest()) def _unquote_or_none(s): """None-safe wrapper around url_unescape to handle unamteched optional groups correctly. Note that args are passed as bytes so the handler can decide what encoding to use. """ if s is None: return s return escape.url_unescape(s, encoding=None, plus=False)
codeparrot/github-code-clean
"""Tests for http/cookiejar.py.""" import os import re import test.support import time import unittest import urllib.request from http.cookiejar import (time2isoz, http2time, iso2time, time2netscape, parse_ns_headers, join_header_words, split_header_words, Cookie, CookieJar, DefaultCookiePolicy, LWPCookieJar, MozillaCookieJar, LoadError, lwp_cookie_str, DEFAULT_HTTP_PORT, escape_path, reach, is_HDN, domain_match, user_domain_match, request_path, request_port, request_host) class DateTimeTests(unittest.TestCase): def test_time2isoz(self): base = 1019227000 day = 24*3600 self.assertEqual(time2isoz(base), "2002-04-19 14:36:40Z") self.assertEqual(time2isoz(base+day), "2002-04-20 14:36:40Z") self.assertEqual(time2isoz(base+2*day), "2002-04-21 14:36:40Z") self.assertEqual(time2isoz(base+3*day), "2002-04-22 14:36:40Z") az = time2isoz() bz = time2isoz(500000) for text in (az, bz): self.assertRegex(text, r"^\d{4}-\d\d-\d\d \d\d:\d\d:\d\dZ$", "bad time2isoz format: %s %s" % (az, bz)) def test_http2time(self): def parse_date(text): return time.gmtime(http2time(text))[:6] self.assertEqual(parse_date("01 Jan 2001"), (2001, 1, 1, 0, 0, 0.0)) # this test will break around year 2070 self.assertEqual(parse_date("03-Feb-20"), (2020, 2, 3, 0, 0, 0.0)) # this test will break around year 2048 self.assertEqual(parse_date("03-Feb-98"), (1998, 2, 3, 0, 0, 0.0)) def test_http2time_formats(self): # test http2time for supported dates. Test cases with 2 digit year # will probably break in year 2044. tests = [ 'Thu, 03 Feb 1994 00:00:00 GMT', # proposed new HTTP format 'Thursday, 03-Feb-94 00:00:00 GMT', # old rfc850 HTTP format 'Thursday, 03-Feb-1994 00:00:00 GMT', # broken rfc850 HTTP format '03 Feb 1994 00:00:00 GMT', # HTTP format (no weekday) '03-Feb-94 00:00:00 GMT', # old rfc850 (no weekday) '03-Feb-1994 00:00:00 GMT', # broken rfc850 (no weekday) '03-Feb-1994 00:00 GMT', # broken rfc850 (no weekday, no seconds) '03-Feb-1994 00:00', # broken rfc850 (no weekday, no seconds, no tz) '02-Feb-1994 24:00', # broken rfc850 (no weekday, no seconds, # no tz) using hour 24 with yesterday date '03-Feb-94', # old rfc850 HTTP format (no weekday, no time) '03-Feb-1994', # broken rfc850 HTTP format (no weekday, no time) '03 Feb 1994', # proposed new HTTP format (no weekday, no time) # A few tests with extra space at various places ' 03 Feb 1994 0:00 ', ' 03-Feb-1994 ', ] test_t = 760233600 # assume broken POSIX counting of seconds result = time2isoz(test_t) expected = "1994-02-03 00:00:00Z" self.assertEqual(result, expected, "%s => '%s' (%s)" % (test_t, result, expected)) for s in tests: self.assertEqual(http2time(s), test_t, s) self.assertEqual(http2time(s.lower()), test_t, s.lower()) self.assertEqual(http2time(s.upper()), test_t, s.upper()) def test_http2time_garbage(self): for test in [ '', 'Garbage', 'Mandag 16. September 1996', '01-00-1980', '01-13-1980', '00-01-1980', '32-01-1980', '01-01-1980 25:00:00', '01-01-1980 00:61:00', '01-01-1980 00:00:62', ]: self.assertIsNone(http2time(test), "http2time(%s) is not None\n" "http2time(test) %s" % (test, http2time(test))) def test_iso2time(self): def parse_date(text): return time.gmtime(iso2time(text))[:6] # ISO 8601 compact format self.assertEqual(parse_date("19940203T141529Z"), (1994, 2, 3, 14, 15, 29)) # ISO 8601 with time behind UTC self.assertEqual(parse_date("1994-02-03 07:15:29 -0700"), (1994, 2, 3, 14, 15, 29)) # ISO 8601 with time ahead of UTC self.assertEqual(parse_date("1994-02-03 19:45:29 +0530"), (1994, 2, 3, 14, 15, 29)) def test_iso2time_formats(self): # test iso2time for supported dates. tests = [ '1994-02-03 00:00:00 -0000', # ISO 8601 format '1994-02-03 00:00:00 +0000', # ISO 8601 format '1994-02-03 00:00:00', # zone is optional '1994-02-03', # only date '1994-02-03T00:00:00', # Use T as separator '19940203', # only date '1994-02-02 24:00:00', # using hour-24 yesterday date '19940203T000000Z', # ISO 8601 compact format # A few tests with extra space at various places ' 1994-02-03 ', ' 1994-02-03T00:00:00 ', ] test_t = 760233600 # assume broken POSIX counting of seconds for s in tests: self.assertEqual(iso2time(s), test_t, s) self.assertEqual(iso2time(s.lower()), test_t, s.lower()) self.assertEqual(iso2time(s.upper()), test_t, s.upper()) def test_iso2time_garbage(self): for test in [ '', 'Garbage', 'Thursday, 03-Feb-94 00:00:00 GMT', '1980-00-01', '1980-13-01', '1980-01-00', '1980-01-32', '1980-01-01 25:00:00', '1980-01-01 00:61:00', '01-01-1980 00:00:62', '01-01-1980T00:00:62', '19800101T250000Z' '1980-01-01 00:00:00 -2500', ]: self.assertIsNone(iso2time(test), "iso2time(%s) is not None\n" "iso2time(test) %s" % (test, iso2time(test))) class HeaderTests(unittest.TestCase): def test_parse_ns_headers(self): # quotes should be stripped expected = [[('foo', 'bar'), ('expires', 2209069412), ('version', '0')]] for hdr in [ 'foo=bar; expires=01 Jan 2040 22:23:32 GMT', 'foo=bar; expires="01 Jan 2040 22:23:32 GMT"', ]: self.assertEqual(parse_ns_headers([hdr]), expected) def test_parse_ns_headers_version(self): # quotes should be stripped expected = [[('foo', 'bar'), ('version', '1')]] for hdr in [ 'foo=bar; version="1"', 'foo=bar; Version="1"', ]: self.assertEqual(parse_ns_headers([hdr]), expected) def test_parse_ns_headers_special_names(self): # names such as 'expires' are not special in first name=value pair # of Set-Cookie: header # Cookie with name 'expires' hdr = 'expires=01 Jan 2040 22:23:32 GMT' expected = [[("expires", "01 Jan 2040 22:23:32 GMT"), ("version", "0")]] self.assertEqual(parse_ns_headers([hdr]), expected) def test_join_header_words(self): joined = join_header_words([[("foo", None), ("bar", "baz")]]) self.assertEqual(joined, "foo; bar=baz") self.assertEqual(join_header_words([[]]), "") def test_split_header_words(self): tests = [ ("foo", [[("foo", None)]]), ("foo=bar", [[("foo", "bar")]]), (" foo ", [[("foo", None)]]), (" foo= ", [[("foo", "")]]), (" foo=", [[("foo", "")]]), (" foo= ; ", [[("foo", "")]]), (" foo= ; bar= baz ", [[("foo", ""), ("bar", "baz")]]), ("foo=bar bar=baz", [[("foo", "bar"), ("bar", "baz")]]), # doesn't really matter if this next fails, but it works ATM ("foo= bar=baz", [[("foo", "bar=baz")]]), ("foo=bar;bar=baz", [[("foo", "bar"), ("bar", "baz")]]), ('foo bar baz', [[("foo", None), ("bar", None), ("baz", None)]]), ("a, b, c", [[("a", None)], [("b", None)], [("c", None)]]), (r'foo; bar=baz, spam=, foo="\,\;\"", bar= ', [[("foo", None), ("bar", "baz")], [("spam", "")], [("foo", ',;"')], [("bar", "")]]), ] for arg, expect in tests: try: result = split_header_words([arg]) except: import traceback, io f = io.StringIO() traceback.print_exc(None, f) result = "(error -- traceback follows)\n\n%s" % f.getvalue() self.assertEqual(result, expect, """ When parsing: '%s' Expected: '%s' Got: '%s' """ % (arg, expect, result)) def test_roundtrip(self): tests = [ ("foo", "foo"), ("foo=bar", "foo=bar"), (" foo ", "foo"), ("foo=", 'foo=""'), ("foo=bar bar=baz", "foo=bar; bar=baz"), ("foo=bar;bar=baz", "foo=bar; bar=baz"), ('foo bar baz', "foo; bar; baz"), (r'foo="\"" bar="\\"', r'foo="\""; bar="\\"'), ('foo,,,bar', 'foo, bar'), ('foo=bar,bar=baz', 'foo=bar, bar=baz'), ('text/html; charset=iso-8859-1', 'text/html; charset="iso-8859-1"'), ('foo="bar"; port="80,81"; discard, bar=baz', 'foo=bar; port="80,81"; discard, bar=baz'), (r'Basic realm="\"foo\\\\bar\""', r'Basic; realm="\"foo\\\\bar\""') ] for arg, expect in tests: input = split_header_words([arg]) res = join_header_words(input) self.assertEqual(res, expect, """ When parsing: '%s' Expected: '%s' Got: '%s' Input was: '%s' """ % (arg, expect, res, input)) class FakeResponse: def __init__(self, headers=[], url=None): """ headers: list of RFC822-style 'Key: value' strings """ import email self._headers = email.message_from_string("\n".join(headers)) self._url = url def info(self): return self._headers def interact_2965(cookiejar, url, *set_cookie_hdrs): return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2") def interact_netscape(cookiejar, url, *set_cookie_hdrs): return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie") def _interact(cookiejar, url, set_cookie_hdrs, hdr_name): """Perform a single request / response cycle, returning Cookie: header.""" req = urllib.request.Request(url) cookiejar.add_cookie_header(req) cookie_hdr = req.get_header("Cookie", "") headers = [] for hdr in set_cookie_hdrs: headers.append("%s: %s" % (hdr_name, hdr)) res = FakeResponse(headers, url) cookiejar.extract_cookies(res, req) return cookie_hdr class FileCookieJarTests(unittest.TestCase): def test_lwp_valueless_cookie(self): # cookies with no value should be saved and loaded consistently filename = test.support.TESTFN c = LWPCookieJar() interact_netscape(c, "http://www.acme.com/", 'boo') self.assertEqual(c._cookies["www.acme.com"]["/"]["boo"].value, None) try: c.save(filename, ignore_discard=True) c = LWPCookieJar() c.load(filename, ignore_discard=True) finally: try: os.unlink(filename) except OSError: pass self.assertEqual(c._cookies["www.acme.com"]["/"]["boo"].value, None) def test_bad_magic(self): # OSErrors (eg. file doesn't exist) are allowed to propagate filename = test.support.TESTFN for cookiejar_class in LWPCookieJar, MozillaCookieJar: c = cookiejar_class() try: c.load(filename="for this test to work, a file with this " "filename should not exist") except OSError as exc: # an OSError subclass (likely FileNotFoundError), but not # LoadError self.assertIsNot(exc.__class__, LoadError) else: self.fail("expected OSError for invalid filename") # Invalid contents of cookies file (eg. bad magic string) # causes a LoadError. try: with open(filename, "w") as f: f.write("oops\n") for cookiejar_class in LWPCookieJar, MozillaCookieJar: c = cookiejar_class() self.assertRaises(LoadError, c.load, filename) finally: try: os.unlink(filename) except OSError: pass class CookieTests(unittest.TestCase): # XXX # Get rid of string comparisons where not actually testing str / repr. # .clear() etc. # IP addresses like 50 (single number, no dot) and domain-matching # functions (and is_HDN)? See draft RFC 2965 errata. # Strictness switches # is_third_party() # unverifiability / third-party blocking # Netscape cookies work the same as RFC 2965 with regard to port. # Set-Cookie with negative max age. # If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber # Set-Cookie cookies. # Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.). # Cookies (V1 and V0) with no expiry date should be set to be discarded. # RFC 2965 Quoting: # Should accept unquoted cookie-attribute values? check errata draft. # Which are required on the way in and out? # Should always return quoted cookie-attribute values? # Proper testing of when RFC 2965 clobbers Netscape (waiting for errata). # Path-match on return (same for V0 and V1). # RFC 2965 acceptance and returning rules # Set-Cookie2 without version attribute is rejected. # Netscape peculiarities list from Ronald Tschalar. # The first two still need tests, the rest are covered. ## - Quoting: only quotes around the expires value are recognized as such ## (and yes, some folks quote the expires value); quotes around any other ## value are treated as part of the value. ## - White space: white space around names and values is ignored ## - Default path: if no path parameter is given, the path defaults to the ## path in the request-uri up to, but not including, the last '/'. Note ## that this is entirely different from what the spec says. ## - Commas and other delimiters: Netscape just parses until the next ';'. ## This means it will allow commas etc inside values (and yes, both ## commas and equals are commonly appear in the cookie value). This also ## means that if you fold multiple Set-Cookie header fields into one, ## comma-separated list, it'll be a headache to parse (at least my head ## starts hurting every time I think of that code). ## - Expires: You'll get all sorts of date formats in the expires, ## including emtpy expires attributes ("expires="). Be as flexible as you ## can, and certainly don't expect the weekday to be there; if you can't ## parse it, just ignore it and pretend it's a session cookie. ## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not ## just the 7 special TLD's listed in their spec. And folks rely on ## that... def test_domain_return_ok(self): # test optimization: .domain_return_ok() should filter out most # domains in the CookieJar before we try to access them (because that # may require disk access -- in particular, with MSIECookieJar) # This is only a rough check for performance reasons, so it's not too # critical as long as it's sufficiently liberal. pol = DefaultCookiePolicy() for url, domain, ok in [ ("http://foo.bar.com/", "blah.com", False), ("http://foo.bar.com/", "rhubarb.blah.com", False), ("http://foo.bar.com/", "rhubarb.foo.bar.com", False), ("http://foo.bar.com/", ".foo.bar.com", True), ("http://foo.bar.com/", "foo.bar.com", True), ("http://foo.bar.com/", ".bar.com", True), ("http://foo.bar.com/", "com", True), ("http://foo.com/", "rhubarb.foo.com", False), ("http://foo.com/", ".foo.com", True), ("http://foo.com/", "foo.com", True), ("http://foo.com/", "com", True), ("http://foo/", "rhubarb.foo", False), ("http://foo/", ".foo", True), ("http://foo/", "foo", True), ("http://foo/", "foo.local", True), ("http://foo/", ".local", True), ]: request = urllib.request.Request(url) r = pol.domain_return_ok(domain, request) if ok: self.assertTrue(r) else: self.assertFalse(r) def test_missing_value(self): # missing = sign in Cookie: header is regarded by Mozilla as a missing # name, and by http.cookiejar as a missing value filename = test.support.TESTFN c = MozillaCookieJar(filename) interact_netscape(c, "http://www.acme.com/", 'eggs') interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/') cookie = c._cookies["www.acme.com"]["/"]["eggs"] self.assertIsNone(cookie.value) self.assertEqual(cookie.name, "eggs") cookie = c._cookies["www.acme.com"]['/foo/']['"spam"'] self.assertIsNone(cookie.value) self.assertEqual(cookie.name, '"spam"') self.assertEqual(lwp_cookie_str(cookie), ( r'"spam"; path="/foo/"; domain="www.acme.com"; ' 'path_spec; discard; version=0')) old_str = repr(c) c.save(ignore_expires=True, ignore_discard=True) try: c = MozillaCookieJar(filename) c.revert(ignore_expires=True, ignore_discard=True) finally: os.unlink(c.filename) # cookies unchanged apart from lost info re. whether path was specified self.assertEqual( repr(c), re.sub("path_specified=%s" % True, "path_specified=%s" % False, old_str) ) self.assertEqual(interact_netscape(c, "http://www.acme.com/foo/"), '"spam"; eggs') def test_rfc2109_handling(self): # RFC 2109 cookies are handled as RFC 2965 or Netscape cookies, # dependent on policy settings for rfc2109_as_netscape, rfc2965, version in [ # default according to rfc2965 if not explicitly specified (None, False, 0), (None, True, 1), # explicit rfc2109_as_netscape (False, False, None), # version None here means no cookie stored (False, True, 1), (True, False, 0), (True, True, 0), ]: policy = DefaultCookiePolicy( rfc2109_as_netscape=rfc2109_as_netscape, rfc2965=rfc2965) c = CookieJar(policy) interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1") try: cookie = c._cookies["www.example.com"]["/"]["ni"] except KeyError: self.assertIsNone(version) # didn't expect a stored cookie else: self.assertEqual(cookie.version, version) # 2965 cookies are unaffected interact_2965(c, "http://www.example.com/", "foo=bar; Version=1") if rfc2965: cookie2965 = c._cookies["www.example.com"]["/"]["foo"] self.assertEqual(cookie2965.version, 1) def test_ns_parser(self): c = CookieJar() interact_netscape(c, "http://www.acme.com/", 'spam=eggs; DoMain=.acme.com; port; blArgh="feep"') interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080') interact_netscape(c, "http://www.acme.com:80/", 'nini=ni') interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=') interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; ' 'expires="Foo Bar 25 33:22:11 3022"') interact_netscape(c, 'http://www.acme.com/', 'fortytwo=') interact_netscape(c, 'http://www.acme.com/', '=unladenswallow') interact_netscape(c, 'http://www.acme.com/', 'holyhandgrenade') cookie = c._cookies[".acme.com"]["/"]["spam"] self.assertEqual(cookie.domain, ".acme.com") self.assertTrue(cookie.domain_specified) self.assertEqual(cookie.port, DEFAULT_HTTP_PORT) self.assertFalse(cookie.port_specified) # case is preserved self.assertTrue(cookie.has_nonstandard_attr("blArgh")) self.assertFalse(cookie.has_nonstandard_attr("blargh")) cookie = c._cookies["www.acme.com"]["/"]["ni"] self.assertEqual(cookie.domain, "www.acme.com") self.assertFalse(cookie.domain_specified) self.assertEqual(cookie.port, "80,8080") self.assertTrue(cookie.port_specified) cookie = c._cookies["www.acme.com"]["/"]["nini"] self.assertIsNone(cookie.port) self.assertFalse(cookie.port_specified) # invalid expires should not cause cookie to be dropped foo = c._cookies["www.acme.com"]["/"]["foo"] spam = c._cookies["www.acme.com"]["/"]["foo"] self.assertIsNone(foo.expires) self.assertIsNone(spam.expires) cookie = c._cookies['www.acme.com']['/']['fortytwo'] self.assertIsNotNone(cookie.value) self.assertEqual(cookie.value, '') # there should be a distinction between a present but empty value # (above) and a value that's entirely missing (below) cookie = c._cookies['www.acme.com']['/']['holyhandgrenade'] self.assertIsNone(cookie.value) def test_ns_parser_special_names(self): # names such as 'expires' are not special in first name=value pair # of Set-Cookie: header c = CookieJar() interact_netscape(c, "http://www.acme.com/", 'expires=eggs') interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs') cookies = c._cookies["www.acme.com"]["/"] self.assertIn('expires', cookies) self.assertIn('version', cookies) def test_expires(self): # if expires is in future, keep cookie... c = CookieJar() future = time2netscape(time.time()+3600) interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' % future) self.assertEqual(len(c), 1) now = time2netscape(time.time()-1) # ... and if in past or present, discard it interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' % now) h = interact_netscape(c, "http://www.acme.com/") self.assertEqual(len(c), 1) self.assertIn('spam="bar"', h) self.assertNotIn("foo", h) # max-age takes precedence over expires, and zero max-age is request to # delete both new cookie and any old matching cookie interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' % future) interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' % future) self.assertEqual(len(c), 3) interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; ' 'expires=%s; max-age=0' % future) interact_netscape(c, "http://www.acme.com/", 'bar="bar"; ' 'max-age=0; expires=%s' % future) h = interact_netscape(c, "http://www.acme.com/") self.assertEqual(len(c), 1) # test expiry at end of session for cookies with no expires attribute interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"') self.assertEqual(len(c), 2) c.clear_session_cookies() self.assertEqual(len(c), 1) self.assertIn('spam="bar"', h) # test if fractional expiry is accepted cookie = Cookie(0, "name", "value", None, False, "www.python.org", True, False, "/", False, False, "1444312383.018307", False, None, None, {}) self.assertEqual(cookie.expires, 1444312383) # XXX RFC 2965 expiry rules (some apply to V0 too) def test_default_path(self): # RFC 2965 pol = DefaultCookiePolicy(rfc2965=True) c = CookieJar(pol) interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"') self.assertIn("/", c._cookies["www.acme.com"]) c = CookieJar(pol) interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"') self.assertIn("/", c._cookies["www.acme.com"]) c = CookieJar(pol) interact_2965(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"; Version="1"') self.assertIn("/blah/", c._cookies["www.acme.com"]) c = CookieJar(pol) interact_2965(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"; Version="1"') self.assertIn("/blah/rhubarb/", c._cookies["www.acme.com"]) # Netscape c = CookieJar() interact_netscape(c, "http://www.acme.com/", 'spam="bar"') self.assertIn("/", c._cookies["www.acme.com"]) c = CookieJar() interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"') self.assertIn("/", c._cookies["www.acme.com"]) c = CookieJar() interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"') self.assertIn("/blah", c._cookies["www.acme.com"]) c = CookieJar() interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"') self.assertIn("/blah/rhubarb", c._cookies["www.acme.com"]) def test_default_path_with_query(self): cj = CookieJar() uri = "http://example.com/?spam/eggs" value = 'eggs="bar"' interact_netscape(cj, uri, value) # Default path does not include query, so is "/", not "/?spam". self.assertIn("/", cj._cookies["example.com"]) # Cookie is sent back to the same URI. self.assertEqual(interact_netscape(cj, uri), value) def test_escape_path(self): cases = [ # quoted safe ("/foo%2f/bar", "/foo%2F/bar"), ("/foo%2F/bar", "/foo%2F/bar"), # quoted % ("/foo%%/bar", "/foo%%/bar"), # quoted unsafe ("/fo%19o/bar", "/fo%19o/bar"), ("/fo%7do/bar", "/fo%7Do/bar"), # unquoted safe ("/foo/bar&", "/foo/bar&"), ("/foo//bar", "/foo//bar"), ("\176/foo/bar", "\176/foo/bar"), # unquoted unsafe ("/foo\031/bar", "/foo%19/bar"), ("/\175foo/bar", "/%7Dfoo/bar"), # unicode, latin-1 range ("/foo/bar\u00fc", "/foo/bar%C3%BC"), # UTF-8 encoded # unicode ("/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded ] for arg, result in cases: self.assertEqual(escape_path(arg), result) def test_request_path(self): # with parameters req = urllib.request.Request( "http://www.example.com/rheum/rhaponticum;" "foo=bar;sing=song?apples=pears&spam=eggs#ni") self.assertEqual(request_path(req), "/rheum/rhaponticum;foo=bar;sing=song") # without parameters req = urllib.request.Request( "http://www.example.com/rheum/rhaponticum?" "apples=pears&spam=eggs#ni") self.assertEqual(request_path(req), "/rheum/rhaponticum") # missing final slash req = urllib.request.Request("http://www.example.com") self.assertEqual(request_path(req), "/") def test_request_port(self): req = urllib.request.Request("http://www.acme.com:1234/", headers={"Host": "www.acme.com:4321"}) self.assertEqual(request_port(req), "1234") req = urllib.request.Request("http://www.acme.com/", headers={"Host": "www.acme.com:4321"}) self.assertEqual(request_port(req), DEFAULT_HTTP_PORT) def test_request_host(self): # this request is illegal (RFC2616, 14.2.3) req = urllib.request.Request("http://1.1.1.1/", headers={"Host": "www.acme.com:80"}) # libwww-perl wants this response, but that seems wrong (RFC 2616, # section 5.2, point 1., and RFC 2965 section 1, paragraph 3) #self.assertEqual(request_host(req), "www.acme.com") self.assertEqual(request_host(req), "1.1.1.1") req = urllib.request.Request("http://www.acme.com/", headers={"Host": "irrelevant.com"}) self.assertEqual(request_host(req), "www.acme.com") # port shouldn't be in request-host req = urllib.request.Request("http://www.acme.com:2345/resource.html", headers={"Host": "www.acme.com:5432"}) self.assertEqual(request_host(req), "www.acme.com") def test_is_HDN(self): self.assertTrue(is_HDN("foo.bar.com")) self.assertTrue(is_HDN("1foo2.3bar4.5com")) self.assertFalse(is_HDN("192.168.1.1")) self.assertFalse(is_HDN("")) self.assertFalse(is_HDN(".")) self.assertFalse(is_HDN(".foo.bar.com")) self.assertFalse(is_HDN("..foo")) self.assertFalse(is_HDN("foo.")) def test_reach(self): self.assertEqual(reach("www.acme.com"), ".acme.com") self.assertEqual(reach("acme.com"), "acme.com") self.assertEqual(reach("acme.local"), ".local") self.assertEqual(reach(".local"), ".local") self.assertEqual(reach(".com"), ".com") self.assertEqual(reach("."), ".") self.assertEqual(reach(""), "") self.assertEqual(reach("192.168.0.1"), "192.168.0.1") def test_domain_match(self): self.assertTrue(domain_match("192.168.1.1", "192.168.1.1")) self.assertFalse(domain_match("192.168.1.1", ".168.1.1")) self.assertTrue(domain_match("x.y.com", "x.Y.com")) self.assertTrue(domain_match("x.y.com", ".Y.com")) self.assertFalse(domain_match("x.y.com", "Y.com")) self.assertTrue(domain_match("a.b.c.com", ".c.com")) self.assertFalse(domain_match(".c.com", "a.b.c.com")) self.assertTrue(domain_match("example.local", ".local")) self.assertFalse(domain_match("blah.blah", "")) self.assertFalse(domain_match("", ".rhubarb.rhubarb")) self.assertTrue(domain_match("", "")) self.assertTrue(user_domain_match("acme.com", "acme.com")) self.assertFalse(user_domain_match("acme.com", ".acme.com")) self.assertTrue(user_domain_match("rhubarb.acme.com", ".acme.com")) self.assertTrue(user_domain_match("www.rhubarb.acme.com", ".acme.com")) self.assertTrue(user_domain_match("x.y.com", "x.Y.com")) self.assertTrue(user_domain_match("x.y.com", ".Y.com")) self.assertFalse(user_domain_match("x.y.com", "Y.com")) self.assertTrue(user_domain_match("y.com", "Y.com")) self.assertFalse(user_domain_match(".y.com", "Y.com")) self.assertTrue(user_domain_match(".y.com", ".Y.com")) self.assertTrue(user_domain_match("x.y.com", ".com")) self.assertFalse(user_domain_match("x.y.com", "com")) self.assertFalse(user_domain_match("x.y.com", "m")) self.assertFalse(user_domain_match("x.y.com", ".m")) self.assertFalse(user_domain_match("x.y.com", "")) self.assertFalse(user_domain_match("x.y.com", ".")) self.assertTrue(user_domain_match("192.168.1.1", "192.168.1.1")) # not both HDNs, so must string-compare equal to match self.assertFalse(user_domain_match("192.168.1.1", ".168.1.1")) self.assertFalse(user_domain_match("192.168.1.1", ".")) # empty string is a special case self.assertFalse(user_domain_match("192.168.1.1", "")) def test_wrong_domain(self): # Cookies whose effective request-host name does not domain-match the # domain are rejected. # XXX far from complete c = CookieJar() interact_2965(c, "http://www.nasty.com/", 'foo=bar; domain=friendly.org; Version="1"') self.assertEqual(len(c), 0) def test_strict_domain(self): # Cookies whose domain is a country-code tld like .co.uk should # not be set if CookiePolicy.strict_domain is true. cp = DefaultCookiePolicy(strict_domain=True) cj = CookieJar(policy=cp) interact_netscape(cj, "http://example.co.uk/", 'no=problemo') interact_netscape(cj, "http://example.co.uk/", 'okey=dokey; Domain=.example.co.uk') self.assertEqual(len(cj), 2) for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]: interact_netscape(cj, "http://example.%s/" % pseudo_tld, 'spam=eggs; Domain=.co.uk') self.assertEqual(len(cj), 2) def test_two_component_domain_ns(self): # Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain # should all get accepted, as should .acme.com, acme.com and no domain # for 2-component domains like acme.com. c = CookieJar() # two-component V0 domain is OK interact_netscape(c, "http://foo.net/", 'ns=bar') self.assertEqual(len(c), 1) self.assertEqual(c._cookies["foo.net"]["/"]["ns"].value, "bar") self.assertEqual(interact_netscape(c, "http://foo.net/"), "ns=bar") # *will* be returned to any other domain (unlike RFC 2965)... self.assertEqual(interact_netscape(c, "http://www.foo.net/"), "ns=bar") # ...unless requested otherwise pol = DefaultCookiePolicy( strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain) c.set_policy(pol) self.assertEqual(interact_netscape(c, "http://www.foo.net/"), "") # unlike RFC 2965, even explicit two-component domain is OK, # because .foo.net matches foo.net interact_netscape(c, "http://foo.net/foo/", 'spam1=eggs; domain=foo.net') # even if starts with a dot -- in NS rules, .foo.net matches foo.net! interact_netscape(c, "http://foo.net/foo/bar/", 'spam2=eggs; domain=.foo.net') self.assertEqual(len(c), 3) self.assertEqual(c._cookies[".foo.net"]["/foo"]["spam1"].value, "eggs") self.assertEqual(c._cookies[".foo.net"]["/foo/bar"]["spam2"].value, "eggs") self.assertEqual(interact_netscape(c, "http://foo.net/foo/bar/"), "spam2=eggs; spam1=eggs; ns=bar") # top-level domain is too general interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net') self.assertEqual(len(c), 3) ## # Netscape protocol doesn't allow non-special top level domains (such ## # as co.uk) in the domain attribute unless there are at least three ## # dots in it. # Oh yes it does! Real implementations don't check this, and real # cookies (of course) rely on that behaviour. interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk') ## self.assertEqual(len(c), 2) self.assertEqual(len(c), 4) def test_two_component_domain_rfc2965(self): pol = DefaultCookiePolicy(rfc2965=True) c = CookieJar(pol) # two-component V1 domain is OK interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"') self.assertEqual(len(c), 1) self.assertEqual(c._cookies["foo.net"]["/"]["foo"].value, "bar") self.assertEqual(interact_2965(c, "http://foo.net/"), "$Version=1; foo=bar") # won't be returned to any other domain (because domain was implied) self.assertEqual(interact_2965(c, "http://www.foo.net/"), "") # unless domain is given explicitly, because then it must be # rewritten to start with a dot: foo.net --> .foo.net, which does # not domain-match foo.net interact_2965(c, "http://foo.net/foo", 'spam=eggs; domain=foo.net; path=/foo; Version="1"') self.assertEqual(len(c), 1) self.assertEqual(interact_2965(c, "http://foo.net/foo"), "$Version=1; foo=bar") # explicit foo.net from three-component domain www.foo.net *does* get # set, because .foo.net domain-matches .foo.net interact_2965(c, "http://www.foo.net/foo/", 'spam=eggs; domain=foo.net; Version="1"') self.assertEqual(c._cookies[".foo.net"]["/foo/"]["spam"].value, "eggs") self.assertEqual(len(c), 2) self.assertEqual(interact_2965(c, "http://foo.net/foo/"), "$Version=1; foo=bar") self.assertEqual(interact_2965(c, "http://www.foo.net/foo/"), '$Version=1; spam=eggs; $Domain="foo.net"') # top-level domain is too general interact_2965(c, "http://foo.net/", 'ni="ni"; domain=".net"; Version="1"') self.assertEqual(len(c), 2) # RFC 2965 doesn't require blocking this interact_2965(c, "http://foo.co.uk/", 'nasty=trick; domain=.co.uk; Version="1"') self.assertEqual(len(c), 3) def test_domain_allow(self): c = CookieJar(policy=DefaultCookiePolicy( blocked_domains=["acme.com"], allowed_domains=["www.acme.com"])) req = urllib.request.Request("http://acme.com/") headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"] res = FakeResponse(headers, "http://acme.com/") c.extract_cookies(res, req) self.assertEqual(len(c), 0) req = urllib.request.Request("http://www.acme.com/") res = FakeResponse(headers, "http://www.acme.com/") c.extract_cookies(res, req) self.assertEqual(len(c), 1) req = urllib.request.Request("http://www.coyote.com/") res = FakeResponse(headers, "http://www.coyote.com/") c.extract_cookies(res, req) self.assertEqual(len(c), 1) # set a cookie with non-allowed domain... req = urllib.request.Request("http://www.coyote.com/") res = FakeResponse(headers, "http://www.coyote.com/") cookies = c.make_cookies(res, req) c.set_cookie(cookies[0]) self.assertEqual(len(c), 2) # ... and check is doesn't get returned c.add_cookie_header(req) self.assertFalse(req.has_header("Cookie")) def test_domain_block(self): pol = DefaultCookiePolicy( rfc2965=True, blocked_domains=[".acme.com"]) c = CookieJar(policy=pol) headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"] req = urllib.request.Request("http://www.acme.com/") res = FakeResponse(headers, "http://www.acme.com/") c.extract_cookies(res, req) self.assertEqual(len(c), 0) p = pol.set_blocked_domains(["acme.com"]) c.extract_cookies(res, req) self.assertEqual(len(c), 1) c.clear() req = urllib.request.Request("http://www.roadrunner.net/") res = FakeResponse(headers, "http://www.roadrunner.net/") c.extract_cookies(res, req) self.assertEqual(len(c), 1) req = urllib.request.Request("http://www.roadrunner.net/") c.add_cookie_header(req) self.assertTrue(req.has_header("Cookie")) self.assertTrue(req.has_header("Cookie2")) c.clear() pol.set_blocked_domains([".acme.com"]) c.extract_cookies(res, req) self.assertEqual(len(c), 1) # set a cookie with blocked domain... req = urllib.request.Request("http://www.acme.com/") res = FakeResponse(headers, "http://www.acme.com/") cookies = c.make_cookies(res, req) c.set_cookie(cookies[0]) self.assertEqual(len(c), 2) # ... and check is doesn't get returned c.add_cookie_header(req) self.assertFalse(req.has_header("Cookie")) def test_secure(self): for ns in True, False: for whitespace in " ", "": c = CookieJar() if ns: pol = DefaultCookiePolicy(rfc2965=False) int = interact_netscape vs = "" else: pol = DefaultCookiePolicy(rfc2965=True) int = interact_2965 vs = "; Version=1" c.set_policy(pol) url = "http://www.acme.com/" int(c, url, "foo1=bar%s%s" % (vs, whitespace)) int(c, url, "foo2=bar%s; secure%s" % (vs, whitespace)) self.assertFalse( c._cookies["www.acme.com"]["/"]["foo1"].secure, "non-secure cookie registered secure") self.assertTrue( c._cookies["www.acme.com"]["/"]["foo2"].secure, "secure cookie registered non-secure") def test_quote_cookie_value(self): c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True)) interact_2965(c, "http://www.acme.com/", r'foo=\b"a"r; Version=1') h = interact_2965(c, "http://www.acme.com/") self.assertEqual(h, r'$Version=1; foo=\\b\"a\"r') def test_missing_final_slash(self): # Missing slash from request URL's abs_path should be assumed present. url = "http://www.acme.com" c = CookieJar(DefaultCookiePolicy(rfc2965=True)) interact_2965(c, url, "foo=bar; Version=1") req = urllib.request.Request(url) self.assertEqual(len(c), 1) c.add_cookie_header(req) self.assertTrue(req.has_header("Cookie")) def test_domain_mirror(self): pol = DefaultCookiePolicy(rfc2965=True) c = CookieJar(pol) url = "http://foo.bar.com/" interact_2965(c, url, "spam=eggs; Version=1") h = interact_2965(c, url) self.assertNotIn("Domain", h, "absent domain returned with domain present") c = CookieJar(pol) url = "http://foo.bar.com/" interact_2965(c, url, 'spam=eggs; Version=1; Domain=.bar.com') h = interact_2965(c, url) self.assertIn('$Domain=".bar.com"', h, "domain not returned") c = CookieJar(pol) url = "http://foo.bar.com/" # note missing initial dot in Domain interact_2965(c, url, 'spam=eggs; Version=1; Domain=bar.com') h = interact_2965(c, url) self.assertIn('$Domain="bar.com"', h, "domain not returned") def test_path_mirror(self): pol = DefaultCookiePolicy(rfc2965=True) c = CookieJar(pol) url = "http://foo.bar.com/" interact_2965(c, url, "spam=eggs; Version=1") h = interact_2965(c, url) self.assertNotIn("Path", h, "absent path returned with path present") c = CookieJar(pol) url = "http://foo.bar.com/" interact_2965(c, url, 'spam=eggs; Version=1; Path=/') h = interact_2965(c, url) self.assertIn('$Path="/"', h, "path not returned") def test_port_mirror(self): pol = DefaultCookiePolicy(rfc2965=True) c = CookieJar(pol) url = "http://foo.bar.com/" interact_2965(c, url, "spam=eggs; Version=1") h = interact_2965(c, url) self.assertNotIn("Port", h, "absent port returned with port present") c = CookieJar(pol) url = "http://foo.bar.com/" interact_2965(c, url, "spam=eggs; Version=1; Port") h = interact_2965(c, url) self.assertRegex(h, "\$Port([^=]|$)", "port with no value not returned with no value") c = CookieJar(pol) url = "http://foo.bar.com/" interact_2965(c, url, 'spam=eggs; Version=1; Port="80"') h = interact_2965(c, url) self.assertIn('$Port="80"', h, "port with single value not returned with single value") c = CookieJar(pol) url = "http://foo.bar.com/" interact_2965(c, url, 'spam=eggs; Version=1; Port="80,8080"') h = interact_2965(c, url) self.assertIn('$Port="80,8080"', h, "port with multiple values not returned with multiple " "values") def test_no_return_comment(self): c = CookieJar(DefaultCookiePolicy(rfc2965=True)) url = "http://foo.bar.com/" interact_2965(c, url, 'spam=eggs; Version=1; ' 'Comment="does anybody read these?"; ' 'CommentURL="http://foo.bar.net/comment.html"') h = interact_2965(c, url) self.assertNotIn("Comment", h, "Comment or CommentURL cookie-attributes returned to server") def test_Cookie_iterator(self): cs = CookieJar(DefaultCookiePolicy(rfc2965=True)) # add some random cookies interact_2965(cs, "http://blah.spam.org/", 'foo=eggs; Version=1; ' 'Comment="does anybody read these?"; ' 'CommentURL="http://foo.bar.net/comment.html"') interact_netscape(cs, "http://www.acme.com/blah/", "spam=bar; secure") interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; secure; Version=1") interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; path=/; Version=1") interact_2965(cs, "http://www.sol.no", r'bang=wallop; version=1; domain=".sol.no"; ' r'port="90,100, 80,8080"; ' r'max-age=100; Comment = "Just kidding! (\"|\\\\) "') versions = [1, 1, 1, 0, 1] names = ["bang", "foo", "foo", "spam", "foo"] domains = [".sol.no", "blah.spam.org", "www.acme.com", "www.acme.com", "www.acme.com"] paths = ["/", "/", "/", "/blah", "/blah/"] for i in range(4): i = 0 for c in cs: self.assertIsInstance(c, Cookie) self.assertEqual(c.version, versions[i]) self.assertEqual(c.name, names[i]) self.assertEqual(c.domain, domains[i]) self.assertEqual(c.path, paths[i]) i = i + 1 def test_parse_ns_headers(self): # missing domain value (invalid cookie) self.assertEqual( parse_ns_headers(["foo=bar; path=/; domain"]), [[("foo", "bar"), ("path", "/"), ("domain", None), ("version", "0")]] ) # invalid expires value self.assertEqual( parse_ns_headers(["foo=bar; expires=Foo Bar 12 33:22:11 2000"]), [[("foo", "bar"), ("expires", None), ("version", "0")]] ) # missing cookie value (valid cookie) self.assertEqual( parse_ns_headers(["foo"]), [[("foo", None), ("version", "0")]] ) # missing cookie values for parsed attributes self.assertEqual( parse_ns_headers(['foo=bar; expires']), [[('foo', 'bar'), ('expires', None), ('version', '0')]]) self.assertEqual( parse_ns_headers(['foo=bar; version']), [[('foo', 'bar'), ('version', None)]]) # shouldn't add version if header is empty self.assertEqual(parse_ns_headers([""]), []) def test_bad_cookie_header(self): def cookiejar_from_cookie_headers(headers): c = CookieJar() req = urllib.request.Request("http://www.example.com/") r = FakeResponse(headers, "http://www.example.com/") c.extract_cookies(r, req) return c future = time2netscape(time.time()+3600) # none of these bad headers should cause an exception to be raised for headers in [ ["Set-Cookie: "], # actually, nothing wrong with this ["Set-Cookie2: "], # ditto # missing domain value ["Set-Cookie2: a=foo; path=/; Version=1; domain"], # bad max-age ["Set-Cookie: b=foo; max-age=oops"], # bad version ["Set-Cookie: b=foo; version=spam"], ["Set-Cookie:; Expires=%s" % future], ]: c = cookiejar_from_cookie_headers(headers) # these bad cookies shouldn't be set self.assertEqual(len(c), 0) # cookie with invalid expires is treated as session cookie headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"] c = cookiejar_from_cookie_headers(headers) cookie = c._cookies["www.example.com"]["/"]["c"] self.assertIsNone(cookie.expires) class LWPCookieTests(unittest.TestCase): # Tests taken from libwww-perl, with a few modifications and additions. def test_netscape_example_1(self): #------------------------------------------------------------------- # First we check that it works for the original example at # http://www.netscape.com/newsref/std/cookie_spec.html # Client requests a document, and receives in the response: # # Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT # # When client requests a URL in path "/" on this server, it sends: # # Cookie: CUSTOMER=WILE_E_COYOTE # # Client requests a document, and receives in the response: # # Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/ # # When client requests a URL in path "/" on this server, it sends: # # Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001 # # Client receives: # # Set-Cookie: SHIPPING=FEDEX; path=/fo # # When client requests a URL in path "/" on this server, it sends: # # Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001 # # When client requests a URL in path "/foo" on this server, it sends: # # Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001; SHIPPING=FEDEX # # The last Cookie is buggy, because both specifications say that the # most specific cookie must be sent first. SHIPPING=FEDEX is the # most specific and should thus be first. year_plus_one = time.localtime()[0] + 1 headers = [] c = CookieJar(DefaultCookiePolicy(rfc2965 = True)) #req = urllib.request.Request("http://1.1.1.1/", # headers={"Host": "www.acme.com:80"}) req = urllib.request.Request("http://www.acme.com:80/", headers={"Host": "www.acme.com:80"}) headers.append( "Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; " "expires=Wednesday, 09-Nov-%d 23:12:40 GMT" % year_plus_one) res = FakeResponse(headers, "http://www.acme.com/") c.extract_cookies(res, req) req = urllib.request.Request("http://www.acme.com/") c.add_cookie_header(req) self.assertEqual(req.get_header("Cookie"), "CUSTOMER=WILE_E_COYOTE") self.assertEqual(req.get_header("Cookie2"), '$Version="1"') headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/") res = FakeResponse(headers, "http://www.acme.com/") c.extract_cookies(res, req) req = urllib.request.Request("http://www.acme.com/foo/bar") c.add_cookie_header(req) h = req.get_header("Cookie") self.assertIn("PART_NUMBER=ROCKET_LAUNCHER_0001", h) self.assertIn("CUSTOMER=WILE_E_COYOTE", h) headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo') res = FakeResponse(headers, "http://www.acme.com") c.extract_cookies(res, req) req = urllib.request.Request("http://www.acme.com/") c.add_cookie_header(req) h = req.get_header("Cookie") self.assertIn("PART_NUMBER=ROCKET_LAUNCHER_0001", h) self.assertIn("CUSTOMER=WILE_E_COYOTE", h) self.assertNotIn("SHIPPING=FEDEX", h) req = urllib.request.Request("http://www.acme.com/foo/") c.add_cookie_header(req) h = req.get_header("Cookie") self.assertIn("PART_NUMBER=ROCKET_LAUNCHER_0001", h) self.assertIn("CUSTOMER=WILE_E_COYOTE", h) self.assertTrue(h.startswith("SHIPPING=FEDEX;")) def test_netscape_example_2(self): # Second Example transaction sequence: # # Assume all mappings from above have been cleared. # # Client receives: # # Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/ # # When client requests a URL in path "/" on this server, it sends: # # Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001 # # Client receives: # # Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo # # When client requests a URL in path "/ammo" on this server, it sends: # # Cookie: PART_NUMBER=RIDING_ROCKET_0023; PART_NUMBER=ROCKET_LAUNCHER_0001 # # NOTE: There are two name/value pairs named "PART_NUMBER" due to # the inheritance of the "/" mapping in addition to the "/ammo" mapping. c = CookieJar() headers = [] req = urllib.request.Request("http://www.acme.com/") headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/") res = FakeResponse(headers, "http://www.acme.com/") c.extract_cookies(res, req) req = urllib.request.Request("http://www.acme.com/") c.add_cookie_header(req) self.assertEqual(req.get_header("Cookie"), "PART_NUMBER=ROCKET_LAUNCHER_0001") headers.append( "Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo") res = FakeResponse(headers, "http://www.acme.com/") c.extract_cookies(res, req) req = urllib.request.Request("http://www.acme.com/ammo") c.add_cookie_header(req) self.assertRegex(req.get_header("Cookie"), r"PART_NUMBER=RIDING_ROCKET_0023;\s*" "PART_NUMBER=ROCKET_LAUNCHER_0001") def test_ietf_example_1(self): #------------------------------------------------------------------- # Then we test with the examples from draft-ietf-http-state-man-mec-03.txt # # 5. EXAMPLES c = CookieJar(DefaultCookiePolicy(rfc2965=True)) # # 5.1 Example 1 # # Most detail of request and response headers has been omitted. Assume # the user agent has no stored cookies. # # 1. User Agent -> Server # # POST /acme/login HTTP/1.1 # [form data] # # User identifies self via a form. # # 2. Server -> User Agent # # HTTP/1.1 200 OK # Set-Cookie2: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme" # # Cookie reflects user's identity. cookie = interact_2965( c, 'http://www.acme.com/acme/login', 'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"') self.assertFalse(cookie) # # 3. User Agent -> Server # # POST /acme/pickitem HTTP/1.1 # Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme" # [form data] # # User selects an item for ``shopping basket.'' # # 4. Server -> User Agent # # HTTP/1.1 200 OK # Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1"; # Path="/acme" # # Shopping basket contains an item. cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem', 'Part_Number="Rocket_Launcher_0001"; ' 'Version="1"; Path="/acme"'); self.assertRegex(cookie, r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$') # # 5. User Agent -> Server # # POST /acme/shipping HTTP/1.1 # Cookie: $Version="1"; # Customer="WILE_E_COYOTE"; $Path="/acme"; # Part_Number="Rocket_Launcher_0001"; $Path="/acme" # [form data] # # User selects shipping method from form. # # 6. Server -> User Agent # # HTTP/1.1 200 OK # Set-Cookie2: Shipping="FedEx"; Version="1"; Path="/acme" # # New cookie reflects shipping method. cookie = interact_2965(c, "http://www.acme.com/acme/shipping", 'Shipping="FedEx"; Version="1"; Path="/acme"') self.assertRegex(cookie, r'^\$Version="?1"?;') self.assertRegex(cookie, r'Part_Number="?Rocket_Launcher_0001"?;' '\s*\$Path="\/acme"') self.assertRegex(cookie, r'Customer="?WILE_E_COYOTE"?;' '\s*\$Path="\/acme"') # # 7. User Agent -> Server # # POST /acme/process HTTP/1.1 # Cookie: $Version="1"; # Customer="WILE_E_COYOTE"; $Path="/acme"; # Part_Number="Rocket_Launcher_0001"; $Path="/acme"; # Shipping="FedEx"; $Path="/acme" # [form data] # # User chooses to process order. # # 8. Server -> User Agent # # HTTP/1.1 200 OK # # Transaction is complete. cookie = interact_2965(c, "http://www.acme.com/acme/process") self.assertRegex(cookie, r'Shipping="?FedEx"?;\s*\$Path="\/acme"') self.assertIn("WILE_E_COYOTE", cookie) # # The user agent makes a series of requests on the origin server, after # each of which it receives a new cookie. All the cookies have the same # Path attribute and (default) domain. Because the request URLs all have # /acme as a prefix, and that matches the Path attribute, each request # contains all the cookies received so far. def test_ietf_example_2(self): # 5.2 Example 2 # # This example illustrates the effect of the Path attribute. All detail # of request and response headers has been omitted. Assume the user agent # has no stored cookies. c = CookieJar(DefaultCookiePolicy(rfc2965=True)) # Imagine the user agent has received, in response to earlier requests, # the response headers # # Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1"; # Path="/acme" # # and # # Set-Cookie2: Part_Number="Riding_Rocket_0023"; Version="1"; # Path="/acme/ammo" interact_2965( c, "http://www.acme.com/acme/ammo/specific", 'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"', 'Part_Number="Riding_Rocket_0023"; Version="1"; Path="/acme/ammo"') # A subsequent request by the user agent to the (same) server for URLs of # the form /acme/ammo/... would include the following request header: # # Cookie: $Version="1"; # Part_Number="Riding_Rocket_0023"; $Path="/acme/ammo"; # Part_Number="Rocket_Launcher_0001"; $Path="/acme" # # Note that the NAME=VALUE pair for the cookie with the more specific Path # attribute, /acme/ammo, comes before the one with the less specific Path # attribute, /acme. Further note that the same cookie name appears more # than once. cookie = interact_2965(c, "http://www.acme.com/acme/ammo/...") self.assertRegex(cookie, r"Riding_Rocket_0023.*Rocket_Launcher_0001") # A subsequent request by the user agent to the (same) server for a URL of # the form /acme/parts/ would include the following request header: # # Cookie: $Version="1"; Part_Number="Rocket_Launcher_0001"; $Path="/acme" # # Here, the second cookie's Path attribute /acme/ammo is not a prefix of # the request URL, /acme/parts/, so the cookie does not get forwarded to # the server. cookie = interact_2965(c, "http://www.acme.com/acme/parts/") self.assertIn("Rocket_Launcher_0001", cookie) self.assertNotIn("Riding_Rocket_0023", cookie) def test_rejection(self): # Test rejection of Set-Cookie2 responses based on domain, path, port. pol = DefaultCookiePolicy(rfc2965=True) c = LWPCookieJar(policy=pol) max_age = "max-age=3600" # illegal domain (no embedded dots) cookie = interact_2965(c, "http://www.acme.com", 'foo=bar; domain=".com"; version=1') self.assertFalse(c) # legal domain cookie = interact_2965(c, "http://www.acme.com", 'ping=pong; domain="acme.com"; version=1') self.assertEqual(len(c), 1) # illegal domain (host prefix "www.a" contains a dot) cookie = interact_2965(c, "http://www.a.acme.com", 'whiz=bang; domain="acme.com"; version=1') self.assertEqual(len(c), 1) # legal domain cookie = interact_2965(c, "http://www.a.acme.com", 'wow=flutter; domain=".a.acme.com"; version=1') self.assertEqual(len(c), 2) # can't partially match an IP-address cookie = interact_2965(c, "http://125.125.125.125", 'zzzz=ping; domain="125.125.125"; version=1') self.assertEqual(len(c), 2) # illegal path (must be prefix of request path) cookie = interact_2965(c, "http://www.sol.no", 'blah=rhubarb; domain=".sol.no"; path="/foo"; ' 'version=1') self.assertEqual(len(c), 2) # legal path cookie = interact_2965(c, "http://www.sol.no/foo/bar", 'bing=bong; domain=".sol.no"; path="/foo"; ' 'version=1') self.assertEqual(len(c), 3) # illegal port (request-port not in list) cookie = interact_2965(c, "http://www.sol.no", 'whiz=ffft; domain=".sol.no"; port="90,100"; ' 'version=1') self.assertEqual(len(c), 3) # legal port cookie = interact_2965( c, "http://www.sol.no", r'bang=wallop; version=1; domain=".sol.no"; ' r'port="90,100, 80,8080"; ' r'max-age=100; Comment = "Just kidding! (\"|\\\\) "') self.assertEqual(len(c), 4) # port attribute without any value (current port) cookie = interact_2965(c, "http://www.sol.no", 'foo9=bar; version=1; domain=".sol.no"; port; ' 'max-age=100;') self.assertEqual(len(c), 5) # encoded path # LWP has this test, but unescaping allowed path characters seems # like a bad idea, so I think this should fail: ## cookie = interact_2965(c, "http://www.sol.no/foo/", ## r'foo8=bar; version=1; path="/%66oo"') # but this is OK, because '<' is not an allowed HTTP URL path # character: cookie = interact_2965(c, "http://www.sol.no/<oo/", r'foo8=bar; version=1; path="/%3coo"') self.assertEqual(len(c), 6) # save and restore filename = test.support.TESTFN try: c.save(filename, ignore_discard=True) old = repr(c) c = LWPCookieJar(policy=pol) c.load(filename, ignore_discard=True) finally: try: os.unlink(filename) except OSError: pass self.assertEqual(old, repr(c)) def test_url_encoding(self): # Try some URL encodings of the PATHs. # (the behaviour here has changed from libwww-perl) c = CookieJar(DefaultCookiePolicy(rfc2965=True)) interact_2965(c, "http://www.acme.com/foo%2f%25/" "%3c%3c%0Anew%C3%A5/%C3%A5", "foo = bar; version = 1") cookie = interact_2965( c, "http://www.acme.com/foo%2f%25/<<%0anew\345/\346\370\345", 'bar=baz; path="/foo/"; version=1'); version_re = re.compile(r'^\$version=\"?1\"?', re.I) self.assertIn("foo=bar", cookie) self.assertRegex(cookie, version_re) cookie = interact_2965( c, "http://www.acme.com/foo/%25/<<%0anew\345/\346\370\345") self.assertFalse(cookie) # unicode URL doesn't raise exception cookie = interact_2965(c, "http://www.acme.com/\xfc") def test_mozilla(self): # Save / load Mozilla/Netscape cookie file format. year_plus_one = time.localtime()[0] + 1 filename = test.support.TESTFN c = MozillaCookieJar(filename, policy=DefaultCookiePolicy(rfc2965=True)) interact_2965(c, "http://www.acme.com/", "foo1=bar; max-age=100; Version=1") interact_2965(c, "http://www.acme.com/", 'foo2=bar; port="80"; max-age=100; Discard; Version=1') interact_2965(c, "http://www.acme.com/", "foo3=bar; secure; Version=1") expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,) interact_netscape(c, "http://www.foo.com/", "fooa=bar; %s" % expires) interact_netscape(c, "http://www.foo.com/", "foob=bar; Domain=.foo.com; %s" % expires) interact_netscape(c, "http://www.foo.com/", "fooc=bar; Domain=www.foo.com; %s" % expires) def save_and_restore(cj, ignore_discard): try: cj.save(ignore_discard=ignore_discard) new_c = MozillaCookieJar(filename, DefaultCookiePolicy(rfc2965=True)) new_c.load(ignore_discard=ignore_discard) finally: try: os.unlink(filename) except OSError: pass return new_c new_c = save_and_restore(c, True) self.assertEqual(len(new_c), 6) # none discarded self.assertIn("name='foo1', value='bar'", repr(new_c)) new_c = save_and_restore(c, False) self.assertEqual(len(new_c), 4) # 2 of them discarded on save self.assertIn("name='foo1', value='bar'", repr(new_c)) def test_netscape_misc(self): # Some additional Netscape cookies tests. c = CookieJar() headers = [] req = urllib.request.Request("http://foo.bar.acme.com/foo") # Netscape allows a host part that contains dots headers.append("Set-Cookie: Customer=WILE_E_COYOTE; domain=.acme.com") res = FakeResponse(headers, "http://www.acme.com/foo") c.extract_cookies(res, req) # and that the domain is the same as the host without adding a leading # dot to the domain. Should not quote even if strange chars are used # in the cookie value. headers.append("Set-Cookie: PART_NUMBER=3,4; domain=foo.bar.acme.com") res = FakeResponse(headers, "http://www.acme.com/foo") c.extract_cookies(res, req) req = urllib.request.Request("http://foo.bar.acme.com/foo") c.add_cookie_header(req) self.assertIn("PART_NUMBER=3,4", req.get_header("Cookie")) self.assertIn("Customer=WILE_E_COYOTE",req.get_header("Cookie")) def test_intranet_domains_2965(self): # Test handling of local intranet hostnames without a dot. c = CookieJar(DefaultCookiePolicy(rfc2965=True)) interact_2965(c, "http://example/", "foo1=bar; PORT; Discard; Version=1;") cookie = interact_2965(c, "http://example/", 'foo2=bar; domain=".local"; Version=1') self.assertIn("foo1=bar", cookie) interact_2965(c, "http://example/", 'foo3=bar; Version=1') cookie = interact_2965(c, "http://example/") self.assertIn("foo2=bar", cookie) self.assertEqual(len(c), 3) def test_intranet_domains_ns(self): c = CookieJar(DefaultCookiePolicy(rfc2965 = False)) interact_netscape(c, "http://example/", "foo1=bar") cookie = interact_netscape(c, "http://example/", 'foo2=bar; domain=.local') self.assertEqual(len(c), 2) self.assertIn("foo1=bar", cookie) cookie = interact_netscape(c, "http://example/") self.assertIn("foo2=bar", cookie) self.assertEqual(len(c), 2) def test_empty_path(self): # Test for empty path # Broken web-server ORION/1.3.38 returns to the client response like # # Set-Cookie: JSESSIONID=ABCDERANDOM123; Path= # # ie. with Path set to nothing. # In this case, extract_cookies() must set cookie to / (root) c = CookieJar(DefaultCookiePolicy(rfc2965 = True)) headers = [] req = urllib.request.Request("http://www.ants.com/") headers.append("Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=") res = FakeResponse(headers, "http://www.ants.com/") c.extract_cookies(res, req) req = urllib.request.Request("http://www.ants.com/") c.add_cookie_header(req) self.assertEqual(req.get_header("Cookie"), "JSESSIONID=ABCDERANDOM123") self.assertEqual(req.get_header("Cookie2"), '$Version="1"') # missing path in the request URI req = urllib.request.Request("http://www.ants.com:8080") c.add_cookie_header(req) self.assertEqual(req.get_header("Cookie"), "JSESSIONID=ABCDERANDOM123") self.assertEqual(req.get_header("Cookie2"), '$Version="1"') def test_session_cookies(self): year_plus_one = time.localtime()[0] + 1 # Check session cookies are deleted properly by # CookieJar.clear_session_cookies method req = urllib.request.Request('http://www.perlmeister.com/scripts') headers = [] headers.append("Set-Cookie: s1=session;Path=/scripts") headers.append("Set-Cookie: p1=perm; Domain=.perlmeister.com;" "Path=/;expires=Fri, 02-Feb-%d 23:24:20 GMT" % year_plus_one) headers.append("Set-Cookie: p2=perm;Path=/;expires=Fri, " "02-Feb-%d 23:24:20 GMT" % year_plus_one) headers.append("Set-Cookie: s2=session;Path=/scripts;" "Domain=.perlmeister.com") headers.append('Set-Cookie2: s3=session;Version=1;Discard;Path="/"') res = FakeResponse(headers, 'http://www.perlmeister.com/scripts') c = CookieJar() c.extract_cookies(res, req) # How many session/permanent cookies do we have? counter = {"session_after": 0, "perm_after": 0, "session_before": 0, "perm_before": 0} for cookie in c: key = "%s_before" % cookie.value counter[key] = counter[key] + 1 c.clear_session_cookies() # How many now? for cookie in c: key = "%s_after" % cookie.value counter[key] = counter[key] + 1 # a permanent cookie got lost accidently self.assertEqual(counter["perm_after"], counter["perm_before"]) # a session cookie hasn't been cleared self.assertEqual(counter["session_after"], 0) # we didn't have session cookies in the first place self.assertNotEqual(counter["session_before"], 0) def test_main(verbose=None): test.support.run_unittest( DateTimeTests, HeaderTests, CookieTests, FileCookieJarTests, LWPCookieTests, ) if __name__ == "__main__": test_main(verbose=True)
codeparrot/github-code-clean
from numpy.testing import assert_array_equal, assert_equal, assert_raises import numpy as np from skimage._shared.testing import test_parallel from skimage.draw import (set_color, line, line_aa, polygon, polygon_perimeter, circle, circle_perimeter, circle_perimeter_aa, ellipse, ellipse_perimeter, _bezier_segment, bezier_curve) def test_set_color(): img = np.zeros((10, 10)) rr, cc = line(0, 0, 0, 30) set_color(img, (rr, cc), 1) img_ = np.zeros((10, 10)) img_[0, :] = 1 assert_array_equal(img, img_) def test_set_color_with_alpha(): img = np.zeros((10, 10)) rr, cc, alpha = line_aa(0, 0, 0, 30) set_color(img, (rr, cc), 1, alpha=alpha) # Wrong dimensionality color assert_raises(ValueError, set_color, img, (rr, cc), (255, 0, 0), alpha=alpha) img = np.zeros((10, 10, 3)) rr, cc, alpha = line_aa(0, 0, 0, 30) set_color(img, (rr, cc), (1, 0, 0), alpha=alpha) @test_parallel() def test_line_horizontal(): img = np.zeros((10, 10)) rr, cc = line(0, 0, 0, 9) img[rr, cc] = 1 img_ = np.zeros((10, 10)) img_[0, :] = 1 assert_array_equal(img, img_) def test_line_vertical(): img = np.zeros((10, 10)) rr, cc = line(0, 0, 9, 0) img[rr, cc] = 1 img_ = np.zeros((10, 10)) img_[:, 0] = 1 assert_array_equal(img, img_) def test_line_reverse(): img = np.zeros((10, 10)) rr, cc = line(0, 9, 0, 0) img[rr, cc] = 1 img_ = np.zeros((10, 10)) img_[0, :] = 1 assert_array_equal(img, img_) def test_line_diag(): img = np.zeros((5, 5)) rr, cc = line(0, 0, 4, 4) img[rr, cc] = 1 img_ = np.eye(5) assert_array_equal(img, img_) def test_line_aa_horizontal(): img = np.zeros((10, 10)) rr, cc, val = line_aa(0, 0, 0, 9) set_color(img, (rr, cc), 1, alpha=val) img_ = np.zeros((10, 10)) img_[0, :] = 1 assert_array_equal(img, img_) def test_line_aa_vertical(): img = np.zeros((10, 10)) rr, cc, val = line_aa(0, 0, 9, 0) img[rr, cc] = val img_ = np.zeros((10, 10)) img_[:, 0] = 1 assert_array_equal(img, img_) def test_line_aa_diagonal(): img = np.zeros((10, 10)) rr, cc, val = line_aa(0, 0, 9, 6) img[rr, cc] = 1 # Check that each pixel belonging to line, # also belongs to line_aa r, c = line(0, 0, 9, 6) for x, y in zip(r, c): assert_equal(img[r, c], 1) def test_line_equal_aliasing_horizontally_vertically(): img0 = np.zeros((25, 25)) img1 = np.zeros((25, 25)) # Near-horizontal line rr, cc, val = line_aa(10, 2, 12, 20) img0[rr, cc] = val # Near-vertical (transpose of prior) rr, cc, val = line_aa(2, 10, 20, 12) img1[rr, cc] = val # Difference - should be zero assert_array_equal(img0, img1.T) def test_polygon_rectangle(): img = np.zeros((10, 10), 'uint8') rr, cc = polygon((1, 4, 4, 1, 1), (1, 1, 4, 4, 1)) img[rr, cc] = 1 img_ = np.zeros((10, 10)) img_[1:4, 1:4] = 1 assert_array_equal(img, img_) def test_polygon_rectangle_angular(): img = np.zeros((10, 10), 'uint8') poly = np.array(((0, 3), (4, 7), (7, 4), (3, 0), (0, 3))) rr, cc = polygon(poly[:, 0], poly[:, 1]) img[rr, cc] = 1 img_ = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ) assert_array_equal(img, img_) def test_polygon_parallelogram(): img = np.zeros((10, 10), 'uint8') poly = np.array(((1, 1), (5, 1), (7, 6), (3, 6), (1, 1))) rr, cc = polygon(poly[:, 0], poly[:, 1]) img[rr, cc] = 1 img_ = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ) assert_array_equal(img, img_) def test_polygon_exceed(): img = np.zeros((10, 10), 'uint8') poly = np.array(((1, -1), (100, -1), (100, 100), (1, 100), (1, 1))) rr, cc = polygon(poly[:, 0], poly[:, 1], img.shape) img[rr, cc] = 1 img_ = np.zeros((10, 10)) img_[1:, :] = 1 assert_array_equal(img, img_) def test_circle(): img = np.zeros((15, 15), 'uint8') rr, cc = circle(7, 7, 6) img[rr, cc] = 1 img_ = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ) assert_array_equal(img, img_) def test_circle_perimeter_bresenham(): img = np.zeros((15, 15), 'uint8') rr, cc = circle_perimeter(7, 7, 0, method='bresenham') img[rr, cc] = 1 assert(np.sum(img) == 1) assert(img[7][7] == 1) img = np.zeros((17, 15), 'uint8') rr, cc = circle_perimeter(7, 7, 7, method='bresenham') img[rr, cc] = 1 img_ = np.array( [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ) assert_array_equal(img, img_) def test_circle_perimeter_bresenham_shape(): img = np.zeros((15, 20), 'uint8') rr, cc = circle_perimeter(7, 10, 9, method='bresenham', shape=(15, 20)) img[rr, cc] = 1 shift = 5 img_ = np.zeros((15 + 2 * shift, 20), 'uint8') rr, cc = circle_perimeter(7 + shift, 10, 9, method='bresenham', shape=None) img_[rr, cc] = 1 assert_array_equal(img, img_[shift:-shift, :]) def test_circle_perimeter_andres(): img = np.zeros((15, 15), 'uint8') rr, cc = circle_perimeter(7, 7, 0, method='andres') img[rr, cc] = 1 assert(np.sum(img) == 1) assert(img[7][7] == 1) img = np.zeros((17, 15), 'uint8') rr, cc = circle_perimeter(7, 7, 7, method='andres') img[rr, cc] = 1 img_ = np.array( [[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ) assert_array_equal(img, img_) def test_circle_perimeter_aa(): img = np.zeros((15, 15), 'uint8') rr, cc, val = circle_perimeter_aa(7, 7, 0) img[rr, cc] = 1 assert(np.sum(img) == 1) assert(img[7][7] == 1) img = np.zeros((17, 17), 'uint8') rr, cc, val = circle_perimeter_aa(8, 8, 7) img[rr, cc] = val * 255 img_ = np.array( [[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 82, 180, 236, 255, 236, 180, 82, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 189, 172, 74, 18, 0, 18, 74, 172, 189, 0, 0, 0, 0], [ 0, 0, 0, 229, 25, 0, 0, 0, 0, 0, 0, 0, 25, 229, 0, 0, 0], [ 0, 0, 189, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 189, 0, 0], [ 0, 82, 172, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 172, 82, 0], [ 0, 180, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, 180, 0], [ 0, 236, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 236, 0], [ 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 0], [ 0, 236, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 236, 0], [ 0, 180, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, 180, 0], [ 0, 82, 172, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 172, 82, 0], [ 0, 0, 189, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 189, 0, 0], [ 0, 0, 0, 229, 25, 0, 0, 0, 0, 0, 0, 0, 25, 229, 0, 0, 0], [ 0, 0, 0, 0, 189, 172, 74, 18, 0, 18, 74, 172, 189, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 82, 180, 236, 255, 236, 180, 82, 0, 0, 0, 0, 0], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ) assert_array_equal(img, img_) def test_circle_perimeter_aa_shape(): img = np.zeros((15, 20), 'uint8') rr, cc, val = circle_perimeter_aa(7, 10, 9, shape=(15, 20)) img[rr, cc] = val * 255 shift = 5 img_ = np.zeros((15 + 2 * shift, 20), 'uint8') rr, cc, val = circle_perimeter_aa(7 + shift, 10, 9, shape=None) img_[rr, cc] = val * 255 assert_array_equal(img, img_[shift:-shift, :]) def test_ellipse_trivial(): img = np.zeros((2, 2), 'uint8') rr, cc = ellipse(0.5, 0.5, 0.5, 0.5) img[rr, cc] = 1 img_correct = np.array([ [0, 0], [0, 0] ]) assert_array_equal(img, img_correct) img = np.zeros((2, 2), 'uint8') rr, cc = ellipse(0.5, 0.5, 1.1, 1.1) img[rr, cc] = 1 img_correct = np.array([ [1, 1], [1, 1], ]) assert_array_equal(img, img_correct) img = np.zeros((3, 3), 'uint8') rr, cc = ellipse(1, 1, 0.9, 0.9) img[rr, cc] = 1 img_correct = np.array([ [0, 0, 0], [0, 1, 0], [0, 0, 0], ]) assert_array_equal(img, img_correct) img = np.zeros((3, 3), 'uint8') rr, cc = ellipse(1, 1, 1.1, 1.1) img[rr, cc] = 1 img_correct = np.array([ [0, 1, 0], [1, 1, 1], [0, 1, 0], ]) assert_array_equal(img, img_correct) img = np.zeros((3, 3), 'uint8') rr, cc = ellipse(1, 1, 1.5, 1.5) img[rr, cc] = 1 img_correct = np.array([ [1, 1, 1], [1, 1, 1], [1, 1, 1], ]) assert_array_equal(img, img_correct) def test_ellipse_generic(): img = np.zeros((4, 4), 'uint8') rr, cc = ellipse(1.5, 1.5, 1.1, 1.7) img[rr, cc] = 1 img_ = np.array([ [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 0, 0], ]) assert_array_equal(img, img_) img = np.zeros((5, 5), 'uint8') rr, cc = ellipse(2, 2, 1.7, 1.7) img[rr, cc] = 1 img_ = np.array([ [0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0], ]) assert_array_equal(img, img_) img = np.zeros((10, 10), 'uint8') rr, cc = ellipse(5, 5, 3, 4) img[rr, cc] = 1 img_ = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ]) assert_array_equal(img, img_) img = np.zeros((10, 10), 'uint8') rr, cc = ellipse(4.5, 5, 3.5, 4) img[rr, cc] = 1 img_ = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ]) assert_array_equal(img, img_) img = np.zeros((15, 15), 'uint8') rr, cc = ellipse(7, 7, 3, 7) img[rr, cc] = 1 img_ = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ]) assert_array_equal(img, img_) def test_ellipse_with_shape(): img = np.zeros((15, 15), 'uint8') rr, cc = ellipse(7, 7, 3, 10, shape=img.shape) img[rr, cc] = 1 img_ = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ) assert_array_equal(img, img_) def test_ellipse_negative(): rr, cc = ellipse(-3, -3, 1.7, 1.7) rr_, cc_ = np.nonzero(np.array([ [0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0], ])) assert_array_equal(rr, rr_ - 5) assert_array_equal(cc, cc_ - 5) def test_ellipse_perimeter_dot_zeroangle(): # dot, angle == 0 img = np.zeros((30, 15), 'uint8') rr, cc = ellipse_perimeter(15, 7, 0, 0, 0) img[rr, cc] = 1 assert(np.sum(img) == 1) assert(img[15][7] == 1) def test_ellipse_perimeter_dot_nzeroangle(): # dot, angle != 0 img = np.zeros((30, 15), 'uint8') rr, cc = ellipse_perimeter(15, 7, 0, 0, 1) img[rr, cc] = 1 assert(np.sum(img) == 1) assert(img[15][7] == 1) def test_ellipse_perimeter_flat_zeroangle(): # flat ellipse img = np.zeros((20, 18), 'uint8') img_ = np.zeros((20, 18), 'uint8') rr, cc = ellipse_perimeter(6, 7, 0, 5, 0) img[rr, cc] = 1 rr, cc = line(6, 2, 6, 12) img_[rr, cc] = 1 assert_array_equal(img, img_) def test_ellipse_perimeter_zeroangle(): # angle == 0 img = np.zeros((30, 15), 'uint8') rr, cc = ellipse_perimeter(15, 7, 14, 6, 0) img[rr, cc] = 1 img_ = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0]] ) assert_array_equal(img, img_) def test_ellipse_perimeter_nzeroangle(): # angle != 0 img = np.zeros((30, 25), 'uint8') rr, cc = ellipse_perimeter(15, 11, 12, 6, 1.1) img[rr, cc] = 1 img_ = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ) assert_array_equal(img, img_) def test_ellipse_perimeter_shape(): img = np.zeros((15, 20), 'uint8') rr, cc = ellipse_perimeter(7, 10, 9, 9, 0, shape=(15, 20)) img[rr, cc] = 1 shift = 5 img_ = np.zeros((15 + 2 * shift, 20), 'uint8') rr, cc = ellipse_perimeter(7 + shift, 10, 9, 9, 0, shape=None) img_[rr, cc] = 1 assert_array_equal(img, img_[shift:-shift, :]) def test_bezier_segment_straight(): image = np.zeros((200, 200), dtype=int) x0 = 50 y0 = 50 x1 = 150 y1 = 50 x2 = 150 y2 = 150 rr, cc = _bezier_segment(x0, y0, x1, y1, x2, y2, 0) image[rr, cc] = 1 image2 = np.zeros((200, 200), dtype=int) rr, cc = line(x0, y0, x2, y2) image2[rr, cc] = 1 assert_array_equal(image, image2) def test_bezier_segment_curved(): img = np.zeros((25, 25), 'uint8') x1, y1 = 20, 20 x2, y2 = 20, 2 x3, y3 = 2, 2 rr, cc = _bezier_segment(x1, y1, x2, y2, x3, y3, 1) img[rr, cc] = 1 img_ = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ) assert_equal(img[x1, y1], 1) assert_equal(img[x3, y3], 1) assert_array_equal(img, img_) def test_bezier_curve_straight(): image = np.zeros((200, 200), dtype=int) x0 = 50 y0 = 50 x1 = 150 y1 = 50 x2 = 150 y2 = 150 rr, cc = bezier_curve(x0, y0, x1, y1, x2, y2, 0) image [rr, cc] = 1 image2 = np.zeros((200, 200), dtype=int) rr, cc = line(x0, y0, x2, y2) image2 [rr, cc] = 1 assert_array_equal(image, image2) def test_bezier_curved_weight_eq_1(): img = np.zeros((23, 8), 'uint8') x1, y1 = (1, 1) x2, y2 = (11, 11) x3, y3 = (21, 1) rr, cc = bezier_curve(x1, y1, x2, y2, x3, y3, 1) img[rr, cc] = 1 assert_equal(img[x1, y1], 1) assert_equal(img[x3, y3], 1) img_ = np.array( [[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]] ) assert_equal(img, img_) def test_bezier_curved_weight_neq_1(): img = np.zeros((23, 10), 'uint8') x1, y1 = (1, 1) x2, y2 = (11, 11) x3, y3 = (21, 1) rr, cc = bezier_curve(x1, y1, x2, y2, x3, y3, 2) img[rr, cc] = 1 assert_equal(img[x1, y1], 1) assert_equal(img[x3, y3], 1) img_ = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ) assert_equal(img, img_) def test_bezier_curve_shape(): img = np.zeros((15, 20), 'uint8') x1, y1 = (1, 5) x2, y2 = (6, 11) x3, y3 = (1, 14) rr, cc = bezier_curve(x1, y1, x2, y2, x3, y3, 2, shape=(15, 20)) img[rr, cc] = 1 shift = 5 img_ = np.zeros((15 + 2 * shift, 20), 'uint8') x1, y1 = (1 + shift, 5) x2, y2 = (6 + shift, 11) x3, y3 = (1 + shift, 14) rr, cc = bezier_curve(x1, y1, x2, y2, x3, y3, 2, shape=None) img_[rr, cc] = 1 assert_array_equal(img, img_[shift:-shift, :]) def test_polygon_perimeter(): expected = np.array( [[1, 1, 1, 1], [1, 0, 0, 1], [1, 1, 1, 1]] ) out = np.zeros_like(expected) rr, cc = polygon_perimeter([0, 2, 2, 0], [0, 0, 3, 3]) out[rr, cc] = 1 assert_array_equal(out, expected) out = np.zeros_like(expected) rr, cc = polygon_perimeter([-1, -1, 3, 3], [-1, 4, 4, -1], shape=out.shape, clip=True) out[rr, cc] = 1 assert_array_equal(out, expected) assert_raises(ValueError, polygon_perimeter, [0], [1], clip=True) def test_polygon_perimeter_outside_image(): rr, cc = polygon_perimeter([-1, -1, 3, 3], [-1, 4, 4, -1], shape=(3, 4)) assert_equal(len(rr), 0) assert_equal(len(cc), 0) if __name__ == "__main__": from numpy.testing import run_module_suite run_module_suite()
codeparrot/github-code-clean
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding unique constraint on 'Cfda', fields ['program_number'] db.create_unique('data_cfda', ['program_number']) def backwards(self, orm): # Removing unique constraint on 'Cfda', fields ['program_number'] db.delete_unique('data_cfda', ['program_number']) models = { 'data.agegroup': { 'Meta': {'object_name': 'AgeGroup'}, 'age_group_desc': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'age_group_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'data.alternativefuelvehicles': { 'Meta': {'object_name': 'AlternativeFuelVehicles'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.annualstateenergyexpenditures': { 'Meta': {'object_name': 'AnnualStateEnergyExpenditures'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'msn': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.ansicountystate': { 'Meta': {'object_name': 'AnsiCountyState'}, 'ansi_class': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'ansi_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'county': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, 'data.ansistate': { 'Meta': {'object_name': 'AnsiState'}, 'ansi_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'gnisid': ('django.db.models.fields.CharField', [], {'max_length': '8'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'state_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'data.atcodes': { 'Meta': {'object_name': 'AtCodes'}, 'assistance_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'code': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'data.averageteachersalary': { 'Meta': {'object_name': 'AverageTeacherSalary'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.bilingualedspending': { 'Meta': {'object_name': 'BilingualEdSpending'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.budgetcategorysubfunctions': { 'Meta': {'object_name': 'BudgetCategorySubfunctions'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'npp_budget_category': ('django.db.models.fields.TextField', [], {'max_length': '64'}), 'subfunction': ('django.db.models.fields.TextField', [], {'max_length': '3'}) }, 'data.category': { 'Meta': {'object_name': 'Category'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}) }, 'data.cfda': { 'Meta': {'object_name': 'Cfda'}, 'accomplishments': ('django.db.models.fields.TextField', [], {}), 'account_id': ('django.db.models.fields.TextField', [], {}), 'agency_name': ('django.db.models.fields.TextField', [], {}), 'appeals': ('django.db.models.fields.TextField', [], {}), 'applicant_eligibility': ('django.db.models.fields.TextField', [], {}), 'application_procedure': ('django.db.models.fields.TextField', [], {}), 'approval_time_range': ('django.db.models.fields.TextField', [], {}), 'assistance_length_time': ('django.db.models.fields.TextField', [], {}), 'assistance_type': ('django.db.models.fields.TextField', [], {}), 'audits': ('django.db.models.fields.TextField', [], {}), 'authorization': ('django.db.models.fields.TextField', [], {}), 'award_procedure': ('django.db.models.fields.TextField', [], {}), 'beneficiary_eligibility': ('django.db.models.fields.TextField', [], {}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'credentials': ('django.db.models.fields.TextField', [], {}), 'deadline': ('django.db.models.fields.TextField', [], {}), 'example_projects': ('django.db.models.fields.TextField', [], {}), 'formula_matching_grant_request': ('django.db.models.fields.TextField', [], {}), 'headquarters_office': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'objective': ('django.db.models.fields.TextField', [], {}), 'obligations': ('django.db.models.fields.TextField', [], {}), 'omb_agency_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'omb_bureau_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'popular_name': ('django.db.models.fields.TextField', [], {}), 'preapplication_coordination': ('django.db.models.fields.TextField', [], {}), 'program_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}), 'program_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'published_date': ('django.db.models.fields.DateTimeField', [], {}), 'range_average_assistance': ('django.db.models.fields.TextField', [], {}), 'record': ('django.db.models.fields.TextField', [], {}), 'recovery_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'regional_local_office': ('django.db.models.fields.TextField', [], {}), 'regulations': ('django.db.models.fields.TextField', [], {}), 'renewal': ('django.db.models.fields.TextField', [], {}), 'reports': ('django.db.models.fields.TextField', [], {}), 'selection_criteria': ('django.db.models.fields.TextField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'use': ('django.db.models.fields.TextField', [], {}), 'web_address': ('django.db.models.fields.TextField', [], {}) }, 'data.cffr': { 'Meta': {'unique_together': "(('year', 'state', 'county', 'cffrprogram'),)", 'object_name': 'Cffr'}, 'amount': ('django.db.models.fields.BigIntegerField', [], {}), 'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'cffrprogram': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CffrProgram']"}), 'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.cffragency': { 'Meta': {'object_name': 'CffrAgency'}, 'agency_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '90'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.cffrgeo': { 'Meta': {'object_name': 'CffrGeo'}, 'congress_district': ('django.db.models.fields.CharField', [], {'max_length': '34', 'null': 'True'}), 'county_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'county_gu': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'place_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'place_gu': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'place_name': ('django.db.models.fields.CharField', [], {'max_length': '24'}), 'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'split_gu': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'state_gu': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'type_gu': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.cffrindividualcounty': { 'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'CffrIndividualCounty'}, 'amount': ('django.db.models.fields.BigIntegerField', [], {}), 'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.cffrindividualstate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'CffrIndividualState'}, 'amount': ('django.db.models.fields.BigIntegerField', [], {}), 'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.cffrobjectcode': { 'Meta': {'object_name': 'CffrObjectCode'}, 'category': ('django.db.models.fields.CharField', [], {'max_length': '80'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, 'data.cffrprogram': { 'Meta': {'unique_together': "(('year', 'program_code'),)", 'object_name': 'CffrProgram'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'program_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}), 'program_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'program_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.cffrprogramraw': { 'Meta': {'object_name': 'CffrProgramRaw'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'program_id_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}), 'program_name': ('django.db.models.fields.CharField', [], {'max_length': '74'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.cffrraw': { 'Meta': {'object_name': 'CffrRaw'}, 'agency_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}), 'amount': ('django.db.models.fields.BigIntegerField', [], {}), 'amount_adjusted': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}), 'congress_district': ('django.db.models.fields.CharField', [], {'max_length': '34', 'null': 'True'}), 'county_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'county_name': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True'}), 'funding_sign': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'place_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'place_name': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True'}), 'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'program_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}), 'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'state_postal': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.cffrstate': { 'Meta': {'unique_together': "(('year', 'state', 'cffrprogram'),)", 'object_name': 'CffrState'}, 'amount': ('django.db.models.fields.BigIntegerField', [], {}), 'amount_per_capita': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'cffrprogram': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CffrProgram']"}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.childrenpovertystate': { 'Meta': {'object_name': 'ChildrenPovertyState'}, 'children_poverty': ('django.db.models.fields.IntegerField', [], {}), 'children_poverty_moe': ('django.db.models.fields.IntegerField', [], {}), 'children_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'children_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'children_total': ('django.db.models.fields.IntegerField', [], {}), 'children_total_moe': ('django.db.models.fields.IntegerField', [], {}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.childrenpovertystateraw': { 'Meta': {'object_name': 'ChildrenPovertyStateRaw'}, 'children_poverty': ('django.db.models.fields.IntegerField', [], {}), 'children_poverty_moe': ('django.db.models.fields.IntegerField', [], {}), 'children_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'children_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'children_total': ('django.db.models.fields.IntegerField', [], {}), 'children_total_moe': ('django.db.models.fields.IntegerField', [], {}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.county': { 'Meta': {'unique_together': "(('state', 'county_ansi'),)", 'object_name': 'County'}, 'county_abbr': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'county_ansi': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'county_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'county_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'sort_order': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'data.diplomarecipienttotal': { 'Meta': {'object_name': 'DiplomaRecipientTotal'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.dropoutsrace': { 'Meta': {'object_name': 'DropoutsRace'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.drugfreeschoolspending': { 'Meta': {'object_name': 'DrugFreeSchoolSpending'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.educationalattainment': { 'Meta': {'object_name': 'EducationalAttainment'}, 'category': ('django.db.models.fields.TextField', [], {'max_length': '64'}), 'gender': ('django.db.models.fields.TextField', [], {'max_length': '16'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.TextField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'value_type': ('django.db.models.fields.TextField', [], {'max_length': '16'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.electricemissionsstate': { 'Meta': {'unique_together': "(('year', 'state', 'producer_type', 'energy_source'),)", 'object_name': 'ElectricEmissionsState'}, 'co2': ('django.db.models.fields.BigIntegerField', [], {}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'energy_source': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nox': ('django.db.models.fields.BigIntegerField', [], {}), 'producer_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'so2': ('django.db.models.fields.BigIntegerField', [], {}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.electricemissionsstateraw': { 'Meta': {'object_name': 'ElectricEmissionsStateRaw'}, 'co2': ('django.db.models.fields.BigIntegerField', [], {}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'energy_source': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nox': ('django.db.models.fields.BigIntegerField', [], {}), 'producer_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'so2': ('django.db.models.fields.BigIntegerField', [], {}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.ellstudentsdistrict': { 'Meta': {'object_name': 'EllStudentsDistrict'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.employment': { 'Meta': {'object_name': 'Employment'}, 'black_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'black_unemployed': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'hispanic_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'hispanic_unemployed': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'total_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'white_civilian_labor_force': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'white_unemployed': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.energyconsumptionstate': { 'Meta': {'unique_together': "(('year', 'state', 'msn'),)", 'object_name': 'EnergyConsumptionState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'msn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Msn']"}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.energyconsumptionstateraw': { 'Meta': {'object_name': 'EnergyConsumptionStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'msn': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.energyproductionstate': { 'Meta': {'unique_together': "(('year', 'state', 'msn'),)", 'object_name': 'EnergyProductionState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'msn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Msn']"}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.energyproductionstateraw': { 'Meta': {'object_name': 'EnergyProductionStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'msn': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.enrolledstudentsdistrict': { 'Meta': {'object_name': 'EnrolledStudentsDistrict'}, 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.CharField', [], {'max_length': '9'}) }, 'data.enrollmentrace': { 'Meta': {'object_name': 'EnrollmentRace'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.ethnicity': { 'Meta': {'object_name': 'Ethnicity'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'ethnicity_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '5'}), 'ethnicity_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'ethnicity_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'data.expenditureperpupil': { 'Meta': {'object_name': 'ExpenditurePerPupil'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.familiespovertystate': { 'Meta': {'object_name': 'FamiliesPovertyState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'families_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'families_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'families_total': ('django.db.models.fields.IntegerField', [], {}), 'families_total_moe': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.familiespovertystateraw': { 'Meta': {'object_name': 'FamiliesPovertyStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'families_poverty_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'families_poverty_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'families_total': ('django.db.models.fields.IntegerField', [], {}), 'families_total_moe': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.fcnaspending': { 'Meta': {'object_name': 'FcnaSpending'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.federalimpactaid': { 'Meta': {'object_name': 'FederalImpactAid'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.federaltaxcollectionstate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'FederalTaxCollectionState'}, 'business_income': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'estate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'estate_trust_income': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'excise': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'gift': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'individual_total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'notwitheld_income_and_seca': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'railroad_retirement': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'unemployment_insurance': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'witheld_income_and_fica': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.federaltaxcollectionstateraw': { 'Meta': {'object_name': 'FederalTaxCollectionStateRaw'}, 'business_income_taxes': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'estate_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'estate_trust_income_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'excise_taxes': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'gift_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'income_employment_estate_trust_total': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'individual_notwitheld_seca': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'individual_witheld_fica': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'railroad_retirement': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'total_collections': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'unemployment_insurance': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.fipscountycongressdistrict': { 'Meta': {'object_name': 'FipsCountyCongressDistrict'}, 'congress': ('django.db.models.fields.IntegerField', [], {}), 'county_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'district_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, 'data.fipsstate': { 'Meta': {'object_name': 'FipsState'}, 'code': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, 'data.foodsecuritystate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'FoodSecurityState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'food_insecure': ('django.db.models.fields.IntegerField', [], {}), 'food_insecure_percent': ('django.db.models.fields.FloatField', [], {}), 'food_secure': ('django.db.models.fields.IntegerField', [], {}), 'food_secure_high': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'food_secure_high_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'food_secure_low': ('django.db.models.fields.IntegerField', [], {}), 'food_secure_low_percent': ('django.db.models.fields.FloatField', [], {}), 'food_secure_marginal': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'food_secure_marginal_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'food_secure_percent': ('django.db.models.fields.FloatField', [], {}), 'food_secure_very_low': ('django.db.models.fields.IntegerField', [], {}), 'food_secure_very_low_percent': ('django.db.models.fields.FloatField', [], {}), 'household_total': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'no_response': ('django.db.models.fields.IntegerField', [], {}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.foodsecuritystateraw': { 'Meta': {'object_name': 'FoodSecurityStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'food_secure': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'food_secure_high': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'food_secure_low': ('django.db.models.fields.IntegerField', [], {}), 'food_secure_marginal': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'food_secure_very_low': ('django.db.models.fields.IntegerField', [], {}), 'household_total': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'no_response': ('django.db.models.fields.IntegerField', [], {}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.freeluncheligible': { 'Meta': {'object_name': 'FreeLunchEligible'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.freereducedluncheligible': { 'Meta': {'object_name': 'FreeReducedLunchEligible'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.freereducedluncheligiblecounty': { 'Meta': {'object_name': 'FreeReducedLunchEligibleCounty'}, 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'county_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.gender': { 'Meta': {'object_name': 'Gender'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'gender_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1'}), 'gender_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'data.halfpints': { 'Meta': {'object_name': 'HalfPints'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.headstartenrollment': { 'Meta': {'object_name': 'HeadStartEnrollment'}, 'enrollment': ('django.db.models.fields.IntegerField', [], {}), 'funding': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.healthinsurance': { 'Meta': {'object_name': 'HealthInsurance'}, 'all_people': ('django.db.models.fields.IntegerField', [], {}), 'covered': ('django.db.models.fields.IntegerField', [], {}), 'covered_pct': ('django.db.models.fields.FloatField', [], {}), 'covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'direct_purchase': ('django.db.models.fields.IntegerField', [], {}), 'direct_purchase_pct': ('django.db.models.fields.FloatField', [], {}), 'direct_purchase_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'direct_purchase_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'govt': ('django.db.models.fields.IntegerField', [], {}), 'govt_pct': ('django.db.models.fields.FloatField', [], {}), 'govt_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'govt_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'medicaid': ('django.db.models.fields.IntegerField', [], {}), 'medicaid_pct': ('django.db.models.fields.FloatField', [], {}), 'medicaid_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'medicaid_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'medicare': ('django.db.models.fields.IntegerField', [], {}), 'medicare_pct': ('django.db.models.fields.FloatField', [], {}), 'medicare_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'medicare_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'military': ('django.db.models.fields.IntegerField', [], {}), 'military_pct': ('django.db.models.fields.FloatField', [], {}), 'military_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'military_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'not_covered': ('django.db.models.fields.IntegerField', [], {}), 'not_covered_pct': ('django.db.models.fields.FloatField', [], {}), 'not_covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'not_covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'private': ('django.db.models.fields.IntegerField', [], {}), 'private_employment': ('django.db.models.fields.IntegerField', [], {}), 'private_employment_pct': ('django.db.models.fields.FloatField', [], {}), 'private_employment_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'private_employment_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'private_pct': ('django.db.models.fields.FloatField', [], {}), 'private_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'private_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.healthinsurancestate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'HealthInsuranceState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'pop': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'pop_18_34_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_no_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'pop_18_34_private': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_private_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_private_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_private_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_64': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_64_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_64_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'pop_18_64_no_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_64_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'pop_18_64_private': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_64_private_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_64_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'pop_35_64_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_no_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'pop_35_64_private': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_private_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_private_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_private_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'pop_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_no_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'pop_over_64': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'pop_over_64_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_no_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'pop_over_64_private': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_private_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_private_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_private_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'pop_under_18_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_no_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_no_ins_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'pop_under_18_private': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_private_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_private_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_private_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.healthinsurancestateraw': { 'Meta': {'object_name': 'HealthInsuranceStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'geoid': ('django.db.models.fields.CharField', [], {'max_length': '15'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'pop': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_no_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_private': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_private_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_private_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_private_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_18_34_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_no_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_private': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_private_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_private_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_private_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_35_64_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_no_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_private': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_private_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_private_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_private_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_over_64_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_no_ins': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_no_ins_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_private': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_private_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_private_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_private_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_public': ('django.db.models.fields.IntegerField', [], {}), 'pop_under_18_public_moe': ('django.db.models.fields.IntegerField', [], {}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.highschooldropouts': { 'Meta': {'object_name': 'HighSchoolDropouts'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.highschoolother': { 'Meta': {'object_name': 'HighSchoolOther'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.housingoccupancystate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'HousingOccupancyState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'occupied_units': ('django.db.models.fields.IntegerField', [], {}), 'occupied_units_moe': ('django.db.models.fields.IntegerField', [], {}), 'occupied_units_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'occupied_units_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'owner_occupied': ('django.db.models.fields.IntegerField', [], {}), 'owner_occupied_moe': ('django.db.models.fields.IntegerField', [], {}), 'owner_occupied_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'owner_occupied_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'owner_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'owner_vacancy_rate_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'renter_occupied': ('django.db.models.fields.IntegerField', [], {}), 'renter_occupied_moe': ('django.db.models.fields.IntegerField', [], {}), 'renter_occupied_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'renter_occupied_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'renter_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'renter_vacancy_rate_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'total_units': ('django.db.models.fields.IntegerField', [], {}), 'total_units_moe': ('django.db.models.fields.IntegerField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'vacant_units': ('django.db.models.fields.IntegerField', [], {}), 'vacant_units_moe': ('django.db.models.fields.IntegerField', [], {}), 'vacant_units_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'vacant_units_percent_moe': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.housingoccupancystateraw': { 'Meta': {'object_name': 'HousingOccupancyStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'occupied_units': ('django.db.models.fields.IntegerField', [], {}), 'occupied_units_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'occupied_units_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}), 'occupied_units_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}), 'owner_occupied': ('django.db.models.fields.IntegerField', [], {}), 'owner_occupied_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'owner_occupied_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}), 'owner_occupied_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}), 'owner_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'owner_vacancy_rate_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'renter_occupied': ('django.db.models.fields.IntegerField', [], {}), 'renter_occupied_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'renter_occupied_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}), 'renter_occupied_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}), 'renter_vacancy_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'renter_vacancy_rate_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'total_units': ('django.db.models.fields.IntegerField', [], {}), 'total_units_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'vacant_units': ('django.db.models.fields.IntegerField', [], {}), 'vacant_units_moe': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'vacant_units_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2'}), 'vacant_units_percent_moe': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.individualeducationprograms': { 'Meta': {'object_name': 'IndividualEducationPrograms'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.kidshealthinsurance': { 'Meta': {'object_name': 'KidsHealthInsurance'}, 'all_people': ('django.db.models.fields.IntegerField', [], {}), 'covered': ('django.db.models.fields.IntegerField', [], {}), 'covered_pct': ('django.db.models.fields.FloatField', [], {}), 'covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'direct_purchase': ('django.db.models.fields.IntegerField', [], {}), 'direct_purchase_pct': ('django.db.models.fields.FloatField', [], {}), 'direct_purchase_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'direct_purchase_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'govt': ('django.db.models.fields.IntegerField', [], {}), 'govt_pct': ('django.db.models.fields.FloatField', [], {}), 'govt_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'govt_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'medicaid': ('django.db.models.fields.IntegerField', [], {}), 'medicaid_pct': ('django.db.models.fields.FloatField', [], {}), 'medicaid_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'medicaid_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'medicare': ('django.db.models.fields.IntegerField', [], {}), 'medicare_pct': ('django.db.models.fields.FloatField', [], {}), 'medicare_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'medicare_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'military': ('django.db.models.fields.IntegerField', [], {}), 'military_pct': ('django.db.models.fields.FloatField', [], {}), 'military_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'military_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'not_covered': ('django.db.models.fields.IntegerField', [], {}), 'not_covered_pct': ('django.db.models.fields.FloatField', [], {}), 'not_covered_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'not_covered_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'private': ('django.db.models.fields.IntegerField', [], {}), 'private_employment': ('django.db.models.fields.IntegerField', [], {}), 'private_employment_pct': ('django.db.models.fields.FloatField', [], {}), 'private_employment_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'private_employment_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'private_pct': ('django.db.models.fields.FloatField', [], {}), 'private_pct_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'private_se': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.laborforcecounty': { 'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'LaborForceCounty'}, 'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'employment_total': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'labor_force_total': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'laus_code': ('django.db.models.fields.CharField', [], {'max_length': '8'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'unemployment_rate': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'unemployment_total': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.laborforcecountyraw': { 'Meta': {'object_name': 'LaborForceCountyRaw'}, 'county_fips': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'county_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'employed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'labor_force': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'laus_code': ('django.db.models.fields.CharField', [], {'max_length': '8'}), 'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'unemployed': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'unemployment_rate': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.laborforcestate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'LaborForceState'}, 'civilian_noninstitutional_pop': ('django.db.models.fields.IntegerField', [], {}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'employment_pop_rate': ('django.db.models.fields.FloatField', [], {}), 'employment_total': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'labor_force_participation_rate': ('django.db.models.fields.FloatField', [], {}), 'labor_force_total': ('django.db.models.fields.IntegerField', [], {}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'unemployment_rate': ('django.db.models.fields.FloatField', [], {}), 'unemployment_total': ('django.db.models.fields.IntegerField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.laborforcestateraw': { 'Meta': {'object_name': 'LaborForceStateRaw'}, 'area': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'area_fips': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'civilian_noninstitutional_pop': ('django.db.models.fields.IntegerField', [], {}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'employment_pop_rate': ('django.db.models.fields.FloatField', [], {}), 'employment_total': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'labor_force_participation_rate': ('django.db.models.fields.FloatField', [], {}), 'labor_force_total': ('django.db.models.fields.IntegerField', [], {}), 'unemployment_rate': ('django.db.models.fields.FloatField', [], {}), 'unemployment_total': ('django.db.models.fields.IntegerField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.laborunderutilizationstate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'LaborUnderutilizationState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'u1': ('django.db.models.fields.FloatField', [], {}), 'u2': ('django.db.models.fields.FloatField', [], {}), 'u3': ('django.db.models.fields.FloatField', [], {}), 'u4': ('django.db.models.fields.FloatField', [], {}), 'u5': ('django.db.models.fields.FloatField', [], {}), 'u6': ('django.db.models.fields.FloatField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.laborunderutilizationstateraw': { 'Meta': {'object_name': 'LaborUnderutilizationStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'u1': ('django.db.models.fields.FloatField', [], {}), 'u2': ('django.db.models.fields.FloatField', [], {}), 'u3': ('django.db.models.fields.FloatField', [], {}), 'u4': ('django.db.models.fields.FloatField', [], {}), 'u5': ('django.db.models.fields.FloatField', [], {}), 'u6': ('django.db.models.fields.FloatField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.mathsciencespending': { 'Meta': {'object_name': 'MathScienceSpending'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.medianhouseholdincomestateraw': { 'Meta': {'object_name': 'MedianHouseholdIncomeStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'median_household_income': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'median_household_income_moe': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.medianincomestate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'MedianIncomeState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'median_household_income': ('django.db.models.fields.FloatField', [], {}), 'median_household_income_moe': ('django.db.models.fields.IntegerField', [], {}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.medicaidparticipation': { 'Meta': {'object_name': 'MedicaidParticipation'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.medicareenrollment': { 'Meta': {'object_name': 'MedicareEnrollment'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.migrantstudents': { 'Meta': {'object_name': 'MigrantStudents'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.militarypersonnel': { 'Meta': {'object_name': 'MilitaryPersonnel'}, 'civilian_personnel': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'military_personnel': ('django.db.models.fields.IntegerField', [], {}), 'reserve_national_guard_personnel': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.msn': { 'Meta': {'object_name': 'Msn'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'msn_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'msn_desc': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'msn_unit': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'data.nativeedspending': { 'Meta': {'object_name': 'NativeEdSpending'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.ncesschooldistrict': { 'Meta': {'object_name': 'NcesSchoolDistrict'}, 'congress_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'county_code': ('django.db.models.fields.CharField', [], {'max_length': '4'}), 'county_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'district_code': ('django.db.models.fields.CharField', [], {'max_length': '6'}), 'district_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'state_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, 'data.newaidscases': { 'Meta': {'object_name': 'NewAidsCases'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.otherfederalrevenue': { 'Meta': {'object_name': 'OtherFederalRevenue'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.CharField', [], {'max_length': '16'}) }, 'data.peoplepovertystate': { 'Meta': {'object_name': 'PeoplePovertyState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'percent_standard_error': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'total_population': ('django.db.models.fields.IntegerField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.IntegerField', [], {}), 'value_standard_error': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.peoplepovertystateraw': { 'Meta': {'object_name': 'PeoplePovertyStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'percent_standard_error': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'total_population': ('django.db.models.fields.IntegerField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.IntegerField', [], {}), 'value_standard_error': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.populationagecounty': { 'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'PopulationAgeCounty'}, 'age_0_19': ('django.db.models.fields.IntegerField', [], {}), 'age_0_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_0_4': ('django.db.models.fields.IntegerField', [], {}), 'age_0_4_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_10_14': ('django.db.models.fields.IntegerField', [], {}), 'age_10_14_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_15_19': ('django.db.models.fields.IntegerField', [], {}), 'age_15_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_20_24': ('django.db.models.fields.IntegerField', [], {}), 'age_20_24_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_25_29': ('django.db.models.fields.IntegerField', [], {}), 'age_25_29_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_30_34': ('django.db.models.fields.IntegerField', [], {}), 'age_30_34_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_35_39': ('django.db.models.fields.IntegerField', [], {}), 'age_35_39_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_40_44': ('django.db.models.fields.IntegerField', [], {}), 'age_40_44_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_45_49': ('django.db.models.fields.IntegerField', [], {}), 'age_45_49_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_50_54': ('django.db.models.fields.IntegerField', [], {}), 'age_50_54_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_55_59': ('django.db.models.fields.IntegerField', [], {}), 'age_55_59_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_5_9': ('django.db.models.fields.IntegerField', [], {}), 'age_5_9_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_60_64': ('django.db.models.fields.IntegerField', [], {}), 'age_60_64_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_65_69': ('django.db.models.fields.IntegerField', [], {}), 'age_65_69_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_65_over': ('django.db.models.fields.IntegerField', [], {}), 'age_65_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_70_74': ('django.db.models.fields.IntegerField', [], {}), 'age_70_74_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_75_79': ('django.db.models.fields.IntegerField', [], {}), 'age_75_79_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_80_84': ('django.db.models.fields.IntegerField', [], {}), 'age_80_84_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_85_over': ('django.db.models.fields.IntegerField', [], {}), 'age_85_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'total': ('django.db.models.fields.IntegerField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.populationagestate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PopulationAgeState'}, 'age_0_19': ('django.db.models.fields.IntegerField', [], {}), 'age_0_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_0_4': ('django.db.models.fields.IntegerField', [], {}), 'age_0_4_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_10_14': ('django.db.models.fields.IntegerField', [], {}), 'age_10_14_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_15_19': ('django.db.models.fields.IntegerField', [], {}), 'age_15_19_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_20_24': ('django.db.models.fields.IntegerField', [], {}), 'age_20_24_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_25_29': ('django.db.models.fields.IntegerField', [], {}), 'age_25_29_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_30_34': ('django.db.models.fields.IntegerField', [], {}), 'age_30_34_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_35_39': ('django.db.models.fields.IntegerField', [], {}), 'age_35_39_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_40_44': ('django.db.models.fields.IntegerField', [], {}), 'age_40_44_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_45_49': ('django.db.models.fields.IntegerField', [], {}), 'age_45_49_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_50_54': ('django.db.models.fields.IntegerField', [], {}), 'age_50_54_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_55_59': ('django.db.models.fields.IntegerField', [], {}), 'age_55_59_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_5_9': ('django.db.models.fields.IntegerField', [], {}), 'age_5_9_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_60_64': ('django.db.models.fields.IntegerField', [], {}), 'age_60_64_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_65_69': ('django.db.models.fields.IntegerField', [], {}), 'age_65_69_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_65_over': ('django.db.models.fields.IntegerField', [], {}), 'age_65_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_70_74': ('django.db.models.fields.IntegerField', [], {}), 'age_70_74_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_75_79': ('django.db.models.fields.IntegerField', [], {}), 'age_75_79_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_80_84': ('django.db.models.fields.IntegerField', [], {}), 'age_80_84_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_85_over': ('django.db.models.fields.IntegerField', [], {}), 'age_85_over_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'total': ('django.db.models.fields.IntegerField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.populationcongressionaldistrict': { 'Meta': {'object_name': 'PopulationCongressionalDistrict'}, 'american_indian_alaskan_alone': ('django.db.models.fields.IntegerField', [], {}), 'asian_alone': ('django.db.models.fields.IntegerField', [], {}), 'black_alone': ('django.db.models.fields.IntegerField', [], {}), 'district': ('django.db.models.fields.IntegerField', [], {}), 'hawaiian_pacific_island_alone': ('django.db.models.fields.IntegerField', [], {}), 'households': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'other_alone': ('django.db.models.fields.IntegerField', [], {}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'two_or_more_races': ('django.db.models.fields.IntegerField', [], {}), 'white_alone': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.populationest00raw': { 'Meta': {'unique_together': "(('state', 'county', 'gender', 'ethnic_origin', 'race'),)", 'object_name': 'PopulationEst00Raw'}, 'census2010pop': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'ctyname': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'estimatesbase2000': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'ethnic_origin': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'popestimate2000': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2001': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2002': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2003': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2004': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2005': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2006': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2007': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2008': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2009': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2010': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'race': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'stname': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'sumlev': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'data.populationest10raw': { 'Meta': {'unique_together': "(('state', 'county', 'gender', 'ethnic_origin', 'race'),)", 'object_name': 'PopulationEst10Raw'}, 'census2010pop': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'census2020pop': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'ctyname': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'estimatesbase2010': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'ethnic_origin': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'popestimate2010': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2011': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2012': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2013': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2014': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2015': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2016': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2017': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2018': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2019': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'popestimate2020': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'race': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'stname': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'sumlev': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'data.populationest90raw': { 'Meta': {'unique_together': "(('year', 'state', 'county', 'agegrp', 'race_gender', 'ethnic_origin'),)", 'object_name': 'PopulationEst90Raw'}, 'agegrp': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'create_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'ethnic_origin': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'race_gender': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'update_date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.CharField', [], {'max_length': '2'}) }, 'data.populationfamilies': { 'Meta': {'object_name': 'PopulationFamilies'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.populationgendercounty': { 'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'PopulationGenderCounty'}, 'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'female': ('django.db.models.fields.IntegerField', [], {}), 'female_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'male': ('django.db.models.fields.IntegerField', [], {}), 'male_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'total': ('django.db.models.fields.IntegerField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.populationgenderstate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PopulationGenderState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'female': ('django.db.models.fields.IntegerField', [], {}), 'female_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'male': ('django.db.models.fields.IntegerField', [], {}), 'male_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'total': ('django.db.models.fields.IntegerField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.populationracecounty': { 'Meta': {'unique_together': "(('year', 'state', 'county'),)", 'object_name': 'PopulationRaceCounty'}, 'asian_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'asian_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'asian_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'asian_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'asian_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'asian_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'asian_pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'black_alone': ('django.db.models.fields.IntegerField', [], {}), 'black_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'black_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'black_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'black_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'black_alone_percent': ('django.db.models.fields.FloatField', [], {}), 'black_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'black_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'black_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'black_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'black_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'black_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'county': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.County']"}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'multiple_race': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'multiple_race_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'multiple_race_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'multiple_race_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'multiple_race_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'multiple_race_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'native_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'native_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'native_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'native_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'native_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'native_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'native_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'native_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'native_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'native_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'native_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'native_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'pacific_islander_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'pacific_islander_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'pacific_islander_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'pacific_islander_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'pacific_islander_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'pacific_islander_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'pacific_islander_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'pacific_islander_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'pacific_islander_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'pacific_islander_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'total': ('django.db.models.fields.IntegerField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'white_alone': ('django.db.models.fields.IntegerField', [], {}), 'white_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'white_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'white_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'white_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'white_alone_percent': ('django.db.models.fields.FloatField', [], {}), 'white_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'white_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'white_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'white_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'white_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'white_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.populationracestate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PopulationRaceState'}, 'asian_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'asian_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'asian_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'asian_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'asian_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'asian_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'asian_pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'asian_pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'black_alone': ('django.db.models.fields.IntegerField', [], {}), 'black_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'black_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'black_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'black_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'black_alone_percent': ('django.db.models.fields.FloatField', [], {}), 'black_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'black_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'black_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'black_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'black_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'black_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'multiple_race': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'multiple_race_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'multiple_race_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'multiple_race_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'multiple_race_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'multiple_race_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'native_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'native_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'native_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'native_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'native_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'native_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'native_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'native_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'native_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'native_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'native_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'native_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'pacific_islander_alone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'pacific_islander_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'pacific_islander_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'pacific_islander_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'pacific_islander_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'pacific_islander_alone_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'pacific_islander_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'pacific_islander_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'pacific_islander_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'pacific_islander_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'pacific_islander_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'pacific_islander_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'total': ('django.db.models.fields.IntegerField', [], {}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'white_alone': ('django.db.models.fields.IntegerField', [], {}), 'white_alone_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'white_alone_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'white_alone_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'white_alone_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'white_alone_percent': ('django.db.models.fields.FloatField', [], {}), 'white_other': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'white_other_hispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'white_other_hispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'white_other_nonhispanic': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'white_other_nonhispanic_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'white_other_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.presidentsbudget': { 'Meta': {'object_name': 'PresidentsBudget'}, 'account_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'account_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'agency_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'bea_category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'budget_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'bureau_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'bureau_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'grant_non_grant': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'on_off_budget': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'source_category_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'source_category_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'source_subcategory_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'source_subcategory_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'subfunction_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'subfunction_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'treasury_agency_code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) }, 'data.presidentsbudgetyear': { 'Meta': {'object_name': 'PresidentsBudgetYear'}, 'budget': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'years'", 'to': "orm['data.PresidentsBudget']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'value': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.CharField', [], {'max_length': '4'}) }, 'data.pupilteacherdistrict': { 'Meta': {'object_name': 'PupilTeacherDistrict'}, 'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '7'}), 'agency_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'amount': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.pupilteacherstate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'PupilTeacherState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ratio': ('django.db.models.fields.FloatField', [], {}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.pupilteacherstateraw': { 'Meta': {'object_name': 'PupilTeacherStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ratio': ('django.db.models.fields.FloatField', [], {}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.race': { 'Meta': {'object_name': 'Race'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'race_abbr': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}), 'race_desc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'race_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'data.racecombo': { 'Meta': {'object_name': 'RaceCombo'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'race_combo_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'race_combo_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'data.retireddisablednilf': { 'Meta': {'object_name': 'RetiredDisabledNilf'}, 'disabled_not_in_labor_force': ('django.db.models.fields.IntegerField', [], {}), 'employed_absent': ('django.db.models.fields.IntegerField', [], {}), 'employed_at_work': ('django.db.models.fields.IntegerField', [], {}), 'employed_on_layoff': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'other_not_in_labor_force': ('django.db.models.fields.IntegerField', [], {}), 'retired_not_in_labor_force': ('django.db.models.fields.IntegerField', [], {}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'total': ('django.db.models.fields.IntegerField', [], {}), 'unemployed_looking': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.saipecountystate': { 'Meta': {'object_name': 'SaipeCountyState'}, 'age_0_17_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'age_0_17_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'age_0_17_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'age_0_17_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_0_17_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_0_17_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_0_5_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'age_0_5_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'age_0_5_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'age_0_5_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_0_5_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_0_5_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_5_17_related_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'age_5_17_related_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'age_5_17_related_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'age_5_17_related_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_5_17_related_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'age_5_17_related_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'all_age_poverty': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'all_age_poverty_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'all_age_poverty_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'all_age_poverty_percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'all_age_poverty_percent_90_lower': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'all_age_poverty_percent_90_upper': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'file_tag': ('django.db.models.fields.CharField', [], {'max_length': '22'}), 'fips_county': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'fips_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'median_household_income': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'median_household_income_90_lower': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'median_household_income_90_upper': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'state_county_name': ('django.db.models.fields.CharField', [], {'max_length': '45'}), 'state_postal_abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.saipeschool': { 'Meta': {'object_name': 'SaipeSchool'}, 'ccd_district_id': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'district_name': ('django.db.models.fields.CharField', [], {'max_length': '65'}), 'file_stamp': ('django.db.models.fields.CharField', [], {'max_length': '21'}), 'fips_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'population': ('django.db.models.fields.IntegerField', [], {}), 'relevant_population': ('django.db.models.fields.IntegerField', [], {}), 'relevant_population_poverty': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.schipenrollmentstate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SchipEnrollmentState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.schipenrollmentstateraw': { 'Meta': {'object_name': 'SchipEnrollmentStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.schoolbreakfastparticipationstate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SchoolBreakfastParticipationState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.schoolbreakfastparticipationstateraw': { 'Meta': {'object_name': 'SchoolBreakfastParticipationStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.schoollunchparticipationstate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SchoolLunchParticipationState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.schoollunchparticipationstateraw': { 'Meta': {'object_name': 'SchoolLunchParticipationStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.shelterpopulation': { 'Meta': {'object_name': 'ShelterPopulation'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'percent': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.snapbenefitsrecipients': { 'Meta': {'object_name': 'SnapBenefitsRecipients'}, 'county_fips': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'state_fips': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.snapmonthlybenefitspersonstate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SnapMonthlyBenefitsPersonState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.snapmonthlybenefitspersonstateraw': { 'Meta': {'object_name': 'SnapMonthlyBenefitsPersonStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.snapparticipationhouseholdsstate': { 'Meta': {'unique_together': "(('year', 'state'),)", 'object_name': 'SnapParticipationHouseholdsState'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.State']"}), 'update_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'year': ('django.db.models.fields.IntegerField', [], {}) }, 'data.snapparticipationhouseholdsstateraw': { 'Meta': {'object_name': 'SnapParticipationHouseholdsStateRaw'}, 'create_date': ('django.db.models.fields.DateTimeField', [], {'auto
codeparrot/github-code-clean
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2011, 2012 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ''' bibauthorid_bdinterface This is the only file in bibauthorid which should use the data base. It should have an interface for all other files in the module. ''' import bibauthorid_config as bconfig import sys import numpy import cPickle import zlib from itertools import groupby, count, ifilter, chain, imap from operator import itemgetter from invenio.access_control_engine import acc_authorize_action from bibauthorid_name_utils import split_name_parts from bibauthorid_name_utils import create_canonical_name from bibauthorid_name_utils import create_normalized_name from bibauthorid_general_utils import bibauthor_print from bibauthorid_general_utils import update_status \ , update_status_final from dbquery import run_sql \ , OperationalError \ , ProgrammingError def get_sql_time(): ''' Returns the time acoarding to the database. The type is datetime.datetime. ''' return run_sql("select now()")[0][0] def set_personid_row(person_id, tag, value, opt1=0, opt2=0, opt3=""): ''' Inserts data and the additional options of a person by a given personid and tag. ''' run_sql("INSERT INTO aidPERSONIDDATA " "(`personid`, `tag`, `data`, `opt1`, `opt2`, `opt3`) " "VALUES (%s, %s, %s, %s, %s, %s)", (person_id, tag, value, opt1, opt2, opt3)) def get_personid_row(person_id, tag): ''' Returns all the records associated to a person and a tag. @param person_id: id of the person to read the attribute from @type person_id: int @param tag: the tag to read. @type tag: string @return: the data associated with a virtual author @rtype: tuple of tuples ''' return run_sql("SELECT data, opt1, opt2, opt3 " "data FROM aidPERSONIDDATA " "WHERE personid = %s AND tag = %s", (person_id, tag)) def del_personid_row(tag, person_id=None, value=None): ''' Change the value associated to the given tag for a certain person. @param person_id: ID of the person @type person_id: int @param tag: tag to be updated @type tag: string @param value: value to be written for the tag @type value: string ''' if person_id: if value: run_sql("delete from aidPERSONIDDATA where personid=%s and tag=%s and data=%s", (person_id, tag, value,)) else: run_sql("delete from aidPERSONIDDATA where personid=%s and tag=%s", (person_id, tag,)) else: if value: run_sql("delete from aidPERSONIDDATA where tag=%s and data=%s", (tag, value,)) else: run_sql("delete from aidPERSONIDDATA where tag=%s", (tag,)) def get_all_papers_of_pids(personid_list): ''' Get all papers of authors in a given list and sorts the results by bibrefrec. @param personid_list: list with the authors. @type personid_list: iteratable of integers. ''' if personid_list: plist = list_2_SQL_str(personid_list, lambda x: str(x)) paps = run_sql("select personid, bibref_table, bibref_value, bibrec, flag " "from aidPERSONIDPAPERS " "where personid in %s " % plist) inner = set(row[1:4] for row in paps if row[4] > -2) return (x for x in paps if x[1:4] in inner) return () def del_person_not_manually_claimed_papers(pid): ''' Deletes papers from a person which have not been manually claimed. ''' run_sql("delete from aidPERSONIDPAPERS " "where and (flag <> '-2' and flag <> '2') and personid=%s", (pid,)) def get_personid_from_uid(uid): ''' Returns the personID associated with the provided ui. If the personID is already associated with the person the secon parameter is True, false otherwise. @param uid: userID @type uid: ((int,),) ''' pid = run_sql("select personid from aidPERSONIDDATA where tag=%s and data=%s", ('uid', str(uid[0][0]))) if len(pid) == 1: return (pid[0], True) else: return ([-1], False) def get_uid_from_personid(pid): uid = run_sql("select data from aidPERSONIDDATA where tag='uid' and personid = %s", (pid,)) if uid: return uid[0][0] else: return None def get_new_personid(): pids = (run_sql("select max(personid) from aidPERSONIDDATA")[0][0], run_sql("select max(personid) from aidPERSONIDPAPERS")[0][0]) pids = tuple(int(p) for p in pids if p != None) if len(pids) == 2: return max(*pids) + 1 elif len(pids) == 1: return pids[0] + 1 else: return 0 def get_existing_personids(): try: pids_data = set(zip(*run_sql("select distinct personid from aidPERSONIDDATA"))[0]) except IndexError: pids_data = set() try: pids_pap = set(zip(*run_sql("select distinct personid from aidPERSONIDPAPERS"))[0]) except IndexError: pids_pap = set() return pids_data | pids_pap def get_existing_result_clusters(): return run_sql("select distinct personid from aidRESULTS") def create_new_person(uid= -1, uid_is_owner=False): ''' Create a new person. Set the uid as owner if requested. ''' pid = get_new_personid() if uid_is_owner: set_personid_row(pid, 'uid', str(uid)) else: set_personid_row(pid, 'user-created', str(uid)) return pid def create_new_person_from_uid(uid): return create_new_person(uid, uid_is_owner=True) def new_person_from_signature(sig, name=None): ''' Creates a new person from a signature. ''' pid = get_new_personid() add_signature(sig, name, pid) return pid def add_signature(sig, name, pid): ''' Inserts a signature in personid. ''' if not name: name = get_name_by_bibrecref(sig) name = create_normalized_name(split_name_parts(name)) run_sql("INSERT INTO aidPERSONIDPAPERS " "(personid, bibref_table, bibref_value, bibrec, name) " "VALUES (%s, %s, %s, %s, %s)" , (pid, str(sig[0]), sig[1], sig[2], name)) def move_signature(sig, pid): ''' Inserts a signature in personid. ''' run_sql("update aidPERSONIDPAPERS set personid=%s " "where bibref_table=%s and bibref_value=%s " "and bibrec=%s and flag <> 2 and flag <> -2", (pid,) + sig) def find_conflicts(sig, pid): """ """ return run_sql("select bibref_table, bibref_value, bibrec, flag " "from aidPERSONIDPAPERS where " "personid = %s and " "bibrec = %s and " "flag <> -2" , (pid, sig[2])) def update_request_ticket(person_id, tag_data_tuple, ticket_id=None): ''' Creates / updates a request ticket for a personID @param: personid int @param: tag_data_tuples 'image' of the ticket: (('paper', '700:316,10'), ('owner', 'admin'), ('external_id', 'ticket_18')) @return: ticketid ''' #tags: rt_owner (the owner of the ticket, associating the rt_number to the transaction) # rt_external_id # rt_paper_cornfirm, rt_paper_reject, rt_paper_forget, rt_name, rt_email, rt_whatever #flag: rt_number if not ticket_id: last_id = run_sql("select max(opt1) from aidPERSONIDDATA where personid=%s and tag like %s", (str(person_id), 'rt_%'))[0][0] if last_id: ticket_id = last_id + 1 else: ticket_id = 1 else: delete_request_ticket(person_id, ticket_id) for d in tag_data_tuple: run_sql("insert into aidPERSONIDDATA (personid, tag, data, opt1) " "values (%s,%s,%s,%s)", (str(person_id), 'rt_' + str(d[0]), str(d[1]), str(ticket_id))) return ticket_id def delete_request_ticket(person_id, ticket_id=None): ''' Removes a ticket from a person_id. If ticket_id is not provider removes all the tickets pending on a person. ''' if ticket_id: run_sql("delete from aidPERSONIDDATA where personid=%s and tag like %s and opt1 =%s", (str(person_id), 'rt_%', str(ticket_id))) else: run_sql("delete from aidPERSONIDDATA where personid=%s and tag like %s", (str(person_id), 'rt_%')) def get_all_personids_by_name(regexpr): return run_sql("select personid, name " "from aidPERSONIDPAPERS " "where name like %s", (regexpr,)) def get_personids_by_canonical_name(target): pid = run_sql("select personid from aidPERSONIDDATA where " "tag='canonical_name' and data like %s", (target,)) if pid: return run_sql("select personid, name from aidPERSONIDPAPERS " "where personid=%s", (pid[0][0],)) else: return [] def get_bibref_modification_status(bibref): ''' Determines if a record attached to a person has been touched by a human by checking the flag. @param pid: The Person ID of the person to check the assignment from @type pid: int @param bibref: The paper identifier to be checked (e.g. "100:12,144") @type bibref: string returns [bool:human_modified, int:lcul] ''' if not bibref: raise ValueError("A bibref is expected!") head, rec = bibref.split(',') table, ref = head.split(':') flags = run_sql("SELECT flag, lcul FROM aidPERSONIDPAPERS WHERE " "bibref_table = %s and bibref_value = %s and bibrec = %s" , (table, ref, rec)) if flags: return flags[0] else: return (False, 0) def get_canonical_id_from_personid(pid): ''' Finds the person id canonical name (e.g. Ellis_J_R_1) @param pid @type int @return: sql result of the request @rtype: tuple of tuple ''' return run_sql("SELECT data FROM aidPERSONIDDATA WHERE " "tag = %s AND personid = %s", ('canonical_name', str(pid))) def get_papers_status(paper): ''' Gets the personID and flag assiciated to papers @param papers: list of papers @type papers: '100:7531,9024' @return: (('data','personID','flag',),) @rtype: tuple of tuples ''' head, bibrec = paper.split(',') _table, bibref = head.split(':') rets = run_sql("select PersonID, flag " "from aidPERSONIDPAPERS " "where bibref_table = %s " "and bibref_value = %s " "and bibrec = %s" % (head, bibrec, bibref)) return [[paper] + list(x) for x in rets] def get_persons_from_recids(recids, return_alt_names=False, return_all_person_papers=False): rec_2_pid = dict() pid_2_data = dict() all_pids = set() def get_canonical_name(pid): return run_sql("SELECT data " "FROM aidPERSONIDDATA " "WHERE tag = %s " "AND personid = %s", ('canonical_name', pid)) for rec in recids: pids = run_sql("SELECT personid " "FROM aidPERSONIDPAPERS " "WHERE bibrec = %s " " and flag > -2 ", (rec,)) # for some reason python's set is faster than a mysql distinct pids = set(p[0] for p in pids) all_pids |= pids rec_2_pid[rec] = list(pids) for pid in all_pids: pid_data = {} canonical = get_canonical_name(pid) #We can supposed that this person didn't have a chance to get a canonical name yet #because it was not fully processed by it's creator. Anyway it's safe to try to create one #before failing miserably if not canonical: update_personID_canonical_names([pid]) canonical = get_canonical_name(pid) #assert len(canonical) == 1 #This condition cannot hold in case claims or update daemons are run in parallel #with this, as it can happen that a person with papers exists for wich a canonical name #has not been computed yet. Hence, it will be indexed next time, so it learns. #Each person should have at most one canonical name, so: assert len(canonical) <= 1 if len(canonical) == 1: pid_data = {'canonical_id' : canonical[0][0]} if return_alt_names: names = run_sql("SELECT name " "FROM aidPERSONIDPAPERS " "WHERE personid = %s " " and flag > -2 ", (pid,)) names = set(n[0] for n in names) pid_data['alternatative_names'] = list(names) if return_all_person_papers: recs = run_sql("SELECT bibrec " "FROM aidPERSONIDPAPERS " "WHERE personid = %s " " and flag > -2 ", (pid,)) recs = set(r[0] for r in recs) pid_data['person_records'] = list(recs) pid_2_data[pid] = pid_data return (rec_2_pid, pid_2_data) def get_person_db_names_count(pid, sort_by_count=True): ''' Returns the set of name strings and count associated to a person id. The name strings are as found in the database. @param pid: ID of the person @type pid: ('2',) ''' id_2_count = run_sql("select bibref_table, bibref_value " "from aidPERSONIDPAPERS " "where personid = %s " "and flag > -2", (pid,)) ref100 = [refid[1] for refid in id_2_count if refid[0] == '100'] ref700 = [refid[1] for refid in id_2_count if refid[0] == '700'] ref100_count = dict((key, len(list(data))) for key, data in groupby(sorted(ref100))) ref700_count = dict((key, len(list(data))) for key, data in groupby(sorted(ref700))) if ref100: ref100_s = list_2_SQL_str(ref100, str) id100_2_str = run_sql("select id, value " "from bib10x " "where id in %s" % ref100_s) else: id100_2_str = tuple() if ref700: ref700_s = list_2_SQL_str(ref700, str) id700_2_str = run_sql("select id, value " "from bib70x " "where id in %s" % ref700_s) else: id700_2_str = tuple() ret100 = [(name, ref100_count[refid]) for refid, name in id100_2_str] ret700 = [(name, ref700_count[refid]) for refid, name in id700_2_str] ret = ret100 + ret700 if sort_by_count: ret = sorted(ret, key=itemgetter(1), reverse=True) return ret def get_person_id_from_canonical_id(canonical_id): ''' Finds the person id from a canonical name (e.g. Ellis_J_R_1) @param canonical_id: the canonical ID @type canonical_id: string @return: sql result of the request @rtype: tuple of tuple ''' return run_sql("SELECT personid FROM aidPERSONIDDATA WHERE " "tag='canonical_name' AND data = %s", (canonical_id,)) def get_person_names_count(pid): ''' Returns the set of name strings and count associated to a person id @param pid: ID of the person @type pid: ('2',) @param value: value to be written for the tag @type value: string ''' return run_sql("select name, count(name) from aidPERSONIDPAPERS where " "personid=%s and flag > -2 group by name", (pid,)) def get_person_db_names_set(pid): ''' Returns the set of db_name strings associated to a person id @param pid: ID of the person @type pid: 2 ''' names = get_person_db_names_count(pid) if names: return zip(set(zip(*names)[0])) else: return [] def get_personids_from_bibrec(bibrec): ''' Returns all the personids associated to a bibrec. ''' pids = run_sql("select distinct personid from aidPERSONIDPAPERS where bibrec=%s and flag > -2", (bibrec,)) if pids: return zip(*pids)[0] else: return [] def get_personids_and_papers_from_bibrecs(bibrecs, limit_by_name=None): ''' ''' if not bibrecs: return [] else: bibrecs = list_2_SQL_str(bibrecs) if limit_by_name: try: surname = split_name_parts(limit_by_name)[0] except IndexError: surname = None else: surname = None if not surname: data = run_sql("select personid,bibrec from aidPERSONIDPAPERS where bibrec in %s" % (bibrecs,)) else: surname = split_name_parts(limit_by_name)[0] data = run_sql(("select personid,bibrec from aidPERSONIDPAPERS where bibrec in %s " "and name like " % bibrecs) + ' %s ', (surname + '%',)) pidlist = [(k, set([s[1] for s in d])) for k, d in groupby(sorted(data, key=lambda x:x[0]), key=lambda x:x[0])] pidlist = sorted(pidlist, key=lambda x:len(x[1]), reverse=True) return pidlist def get_person_bibrecs(pid): ''' Returns bibrecs associated with a personid @param pid: integer personid @return [bibrec1,...,bibrecN] ''' papers = run_sql("select bibrec from aidPERSONIDPAPERS where personid=%s", (str(pid),)) if papers: return list(set(zip(*papers)[0])) else: return [] def get_person_papers(pid, flag, show_author_name=False, show_title=False, show_rt_status=False, show_affiliations=False, show_date=False, show_experiment=False): query = "bibref_table, bibref_value, bibrec, flag" if show_author_name: query += ", name" all_papers = run_sql("SELECT " + query + " " "FROM aidPERSONIDPAPERS " "WHERE personid = %s " "AND flag >= %s", (pid, flag)) def format_paper(paper): bibrefrec = "%s:%d,%d" % paper[:3] ret = {'data' : bibrefrec, 'flag' : paper[3] } if show_author_name: ret['authorname'] = paper[4] if show_title: ret['title'] = "" title = get_title_from_rec(paper[2]) if title: ret['title'] = (title, ) if show_rt_status: rt_count = run_sql("SELECT count(personid) " "FROM aidPERSONIDDATA WHERE " "tag like 'rt_%%' and data = %s" , (bibrefrec,)) ret['rt_status'] = (rt_count[0][0] > 0) if show_affiliations: tag = '%s__u' % paper[0] ret['affiliation'] = get_grouped_records(paper[:3], tag)[tag] if show_date: ret['date'] = [] date_id = run_sql("SELECT id_bibxxx " "FROM bibrec_bib26x " "WHERE id_bibrec = %s " , (paper[2],)) if date_id: date_id_s = list_2_SQL_str(date_id, lambda x: x[0]) date = run_sql("SELECT value " "FROM bib26x " "WHERE id in %s " "AND tag = %s" % (date_id_s, "'269__c'")) if date: ret['date'] = zip(*date)[0] if show_experiment: ret['experiment'] = [] experiment_id = run_sql("SELECT id_bibxxx " "FROM bibrec_bib69x " "WHERE id_bibrec = %s " , (paper[2],)) if experiment_id: experiment_id_s = list_2_SQL_str(experiment_id, lambda x: x[0]) experiment = run_sql("SELECT value " "FROM bib69x " "WHERE id in %s " "AND tag = %s" % (experiment_id_s, "'693__e'")) if experiment: ret['experiment'] = zip(*experiment)[0] return ret return [format_paper(paper) for paper in all_papers] def get_persons_with_open_tickets_list(): ''' Finds all the persons with open tickets and returns pids and count of tickets @return: [[pid, ticket_count]] ''' return run_sql("select personid, count(distinct opt1) from " "aidPERSONIDDATA where tag like 'rt_%' group by personid") def get_request_ticket(person_id, ticket_id=None): ''' Retrieves one or many requests tickets from a person @param: person_id: person id integer @param: matching: couple of values to match ('tag', 'value') @param: ticket_id: ticket id (flag) value @returns: [[[('tag', 'value')], ticket_id]] [[[('a', 'va'), ('b', 'vb')], 1L], [[('b', 'daOEIaoe'), ('a', 'caaoOUIe')], 2L]] ''' if ticket_id: tstr = " and opt1='%s' " % ticket_id else: tstr = " " tickets = run_sql("select tag,data,opt1 from aidPERSONIDDATA where personid=%s and " " tag like 'rt_%%' " + tstr , (person_id,)) return [[[(s[0][3:], s[1]) for s in d], k] for k, d in groupby(sorted(tickets, key=lambda k: k[2]), key=lambda k: k[2])] def insert_user_log(userinfo, personid, action, tag, value, comment='', transactionid=0, timestamp=None): ''' Instert log entries in the user log table. For example of entres look at the table generation script. @param userinfo: username or user identifier @type: string @param personid: personid involved in the transaction @type: longint @param action: action type @type: string @param tag: tag @type: string @param value: value for the transaction @type: string @param comment: optional comment for the transaction @type: string @param transactionid: optional id for the transaction @type: longint @return: the transactionid @rtype: longint ''' # if transactionid == 0: # transactionid = max(run_sql('SELECT MAX(transactionid) FROM `aidUSERINPUTLOG`')[0][0], -1) + 1 if not timestamp: timestamp = run_sql('select now()')[0][0] # run_sql('insert into aidUSERINPUTLOG (transactionid,timestamp,userinfo,personid,action,tag,value,comment) values ' # '(%(transactionid)s,%(timestamp)s,%(userinfo)s,%(personid)s,%(action)s,%(tag)s,%(value)s,%(comment)s)', # ({'transactionid':str(transactionid), # 'timestamp':timestamp.timestamp, # 'userinfo':str(userinfo), # 'personid':str(personid), # 'action':str(action), # 'tag':str(tag), # 'value':str(value), # 'comment':str(comment)})) run_sql('insert into aidUSERINPUTLOG ' '(transactionid,timestamp,userinfo,personid,action,tag,value,comment) values ' '(%s,%s,%s,%s,%s,%s,%s,%s)', (transactionid, timestamp, userinfo, personid, action, tag, value, comment)) return transactionid def person_bibref_is_touched_old(pid, bibref): ''' Determines if a record attached to a person has been touched by a human by checking the flag. @param pid: The Person ID of the person to check the assignment from @type pid: int @param bibref: The paper identifier to be checked (e.g. "100:12,144") @type bibref: string ''' bibref, rec = bibref.split(",") table, ref = bibref.split(":") flag = run_sql("SELECT flag " "FROM aidPERSONIDPAPERS " "WHERE personid = %s " "AND bibref_table = %s " "AND bibref_value = %s " "AND bibrec = %s" , (pid, table, ref, rec)) try: flag = flag[0][0] except (IndexError): return False if not flag: return False elif -2 < flag < 2: return False else: return True def confirm_papers_to_person(pid, papers, user_level=0): ''' Confirms the relationship between pid and paper, as from user input. @param pid: id of the person @type pid: ('2',) @param papers: list of papers to confirm @type papers: (('100:7531,9024',),) @param gather_list: list to store the pids to be updated rather than calling update_personID_names_string_set @typer gather_list: set([('2',), ('3',)]) ''' for p in papers: bibref, rec = p[0].split(",") rec = int(rec) table, ref = bibref.split(":") ref = int(ref) run_sql("delete from aidPERSONIDPAPERS where personid=%s and bibrec=%s", (pid[0], rec)) run_sql("delete from aidPERSONIDPAPERS where bibref_table=%s and " " bibref_value = %s and bibrec=%s", (table, ref, rec)) add_signature([table, ref, rec], None, pid[0]) run_sql("update aidPERSONIDPAPERS " "set personid = %s " ", flag = %s " ", lcul = %s " "where bibref_table = %s " "and bibref_value = %s " "and bibrec = %s" , (str(pid[0]), '2', user_level, table, ref, rec)) update_personID_canonical_names(pid) def reject_papers_from_person(pid, papers, user_level=0): ''' Confirms the negative relationship between pid and paper, as from user input. @param pid: id of the person @type pid: integer @param papers: list of papers to confirm @type papers: ('100:7531,9024',) ''' new_pid = get_new_personid() for p in papers: brr, rec = p.split(",") table, ref = brr.split(':') sig = (table, ref, rec) records = personid_name_from_signature(sig) assert(records) fpid, name = records[0] assert fpid == pid run_sql("INSERT INTO aidPERSONIDPAPERS " "(personid, bibref_table, bibref_value, bibrec, name, flag, lcul) " "VALUES (%s, %s, %s, %s, %s, %s, %s)" , (pid, table, ref, rec, name, -2, user_level)) move_signature(sig, new_pid) update_personID_canonical_names((pid,)) def reset_papers_flag(pid, papers): ''' Resets the flag associated to the papers to '0' @param papers: list of papers to confirm @type papers: (('100:7531,9024',),) @param gather_list: list to store the pids to be updated rather than calling update_personID_names_string_set @typer gather_list: set([('2',), ('3',)]) ''' for p in papers: bibref, rec = p[0].split(",") table, ref = bibref.split(":") run_sql("update aidPERSONIDPAPERS " "set flag = %s, lcul = %s " "where bibref_table = %s " "and bibref_value = %s " "and bibrec = %s" , ('0', '0', table, ref, rec)) def user_can_modify_data(uid, pid): ''' Return True if the uid can modify data of this personID, false otherwise. @param uid: the user id @type: int @param pid: the person id @type: int @return: can user mofidfy data? @rtype: boolean ''' pid_uid = run_sql("select data from aidPERSONIDDATA where tag = %s" " and personid = %s", ('uid', str(pid))) if len(pid_uid) >= 1 and str(uid) == str(pid_uid[0][0]): rights = bconfig.CLAIMPAPER_CHANGE_OWN_DATA else: rights = bconfig.CLAIMPAPER_CHANGE_OTHERS_DATA return acc_authorize_action(uid, rights)[0] == 0 def get_possible_bibrecref(names, bibrec, always_match=False): ''' Returns a list of bibrefs for which the surname is matching @param names: list of names strings @param bibrec: bibrec number @param always_match: match with all the names (full bibrefs list) ''' splitted_names = [split_name_parts(n) for n in names] bibrec_names_100 = run_sql("select o.id, o.value from bib10x o, " "(select i.id_bibxxx as iid from bibrec_bib10x i " "where id_bibrec=%s) as dummy " "where o.tag='100__a' AND o.id = dummy.iid", (str(bibrec),)) bibrec_names_700 = run_sql("select o.id, o.value from bib70x o, " "(select i.id_bibxxx as iid from bibrec_bib70x i " "where id_bibrec=%s) as dummy " "where o.tag='700__a' AND o.id = dummy.iid", (str(bibrec),)) # bibrec_names_100 = run_sql("select id,value from bib10x where tag='100__a' and id in " # "(select id_bibxxx from bibrec_bib10x where id_bibrec=%s)", # (str(bibrec),)) # bibrec_names_700 = run_sql("select id,value from bib70x where tag='700__a' and id in " # "(select id_bibxxx from bibrec_bib70x where id_bibrec=%s)", # (str(bibrec),)) bibreflist = [] for b in bibrec_names_100: spb = split_name_parts(b[1]) for n in splitted_names: if (n[0].lower() == spb[0].lower()) or always_match: if ['100:' + str(b[0]), b[1]] not in bibreflist: bibreflist.append(['100:' + str(b[0]), b[1]]) for b in bibrec_names_700: spb = split_name_parts(b[1]) for n in splitted_names: if (n[0].lower() == spb[0].lower()) or always_match: if ['700:' + str(b[0]), b[1]] not in bibreflist: bibreflist.append(['700:' + str(b[0]), b[1]]) return bibreflist def user_can_modify_paper(uid, paper): ''' Return True if the uid can modify this paper, false otherwise. If the paper is assigned more then one time (from algorithms) consider the most privileged assignment. @param uid: the user id @type: int @param paper: the paper bibref,bibrec pair x00:1234,4321 @type: str @return: can user mofidfy paper attribution? @rtype: boolean ''' bibref, rec = paper.split(",") table, ref = bibref.split(":") prow = run_sql("select personid, lcul from aidPERSONIDPAPERS " "where bibref_table = %s and bibref_value = %s and bibrec = %s " "order by lcul desc limit 0,1", (table, ref, rec)) if len(prow) == 0: return ((acc_authorize_action(uid, bconfig.CLAIMPAPER_CLAIM_OWN_PAPERS)[0] == 0) or (acc_authorize_action(uid, bconfig.CLAIMPAPER_CLAIM_OTHERS_PAPERS)[0] == 0)) min_req_acc_n = int(prow[0][1]) req_acc = resolve_paper_access_right(bconfig.CLAIMPAPER_CLAIM_OWN_PAPERS) pid_uid = run_sql("select data from aidPERSONIDDATA where tag = %s and personid = %s", ('uid', str(prow[0][0]))) if len(pid_uid) > 0: if (str(pid_uid[0][0]) != str(uid)) and min_req_acc_n > 0: req_acc = resolve_paper_access_right(bconfig.CLAIMPAPER_CLAIM_OTHERS_PAPERS) if min_req_acc_n < req_acc: min_req_acc_n = req_acc min_req_acc = resolve_paper_access_right(min_req_acc_n) return (acc_authorize_action(uid, min_req_acc)[0] == 0) and (resolve_paper_access_right(min_req_acc) >= min_req_acc_n) def resolve_paper_access_right(acc): ''' Given a string or an integer, resolves to the corresponding integer or string If asked for a wrong/not present parameter falls back to the minimum privilege. ''' access_dict = {bconfig.CLAIMPAPER_VIEW_PID_UNIVERSE: 0, bconfig.CLAIMPAPER_CLAIM_OWN_PAPERS: 25, bconfig.CLAIMPAPER_CLAIM_OTHERS_PAPERS: 50} if isinstance(acc, str): try: return access_dict[acc] except: return 0 inverse_dict = dict([[v, k] for k, v in access_dict.items()]) lower_accs = [a for a in inverse_dict.keys() if a <= acc] try: return inverse_dict[max(lower_accs)] except: return bconfig.CLAIMPAPER_VIEW_PID_UNIVERSE def get_recently_modified_record_ids(date): ''' Returns the bibrecs with modification date more recent then date. @param date: date ''' return [p[0] for p in run_sql( "select id from bibrec where modification_date > %s", (date,))] def filter_modified_record_ids(bibrecs, date): ''' Returns the bibrecs with modification date before the date. @param date: date ''' return ifilter( lambda x: run_sql("select count(*) from bibrec " "where id = %s and " "modification_date < %s" , (x[2], date))[0][0] , bibrecs) def get_cached_author_page(pageparam): ''' Return cached authorpage @param: pageparam (int personid) @return (id, 'authorpage_cache', personid, authorpage_html, date_cached) ''' #TABLE: id, tag, identifier, data, date caches = run_sql("select id, object_name, object_key, object_value, last_updated \ from aidCACHE \ where object_name='authorpage_cache' and object_key=%s", (str(pageparam),)) if len(caches) >= 1: return caches[0] else: return [] def delete_cached_author_page(personid): ''' Deletes from the author page cache the page concerning one person ''' run_sql("delete from aidCACHE where object_name='authorpage_cache' and object_key=%s", (str(personid),)) def update_cached_author_page_timestamp(pageparam): ''' Updates cached author page timestamp @param pageparam: int personid ''' #TABLE: id, tag, identifier, data, date run_sql("update aidCACHE set last_updated=now() where object_name='authorpage_cache' and object_key=%s", (str(pageparam),)) def update_cached_author_page(pageparam, page): ''' Updates cached author page, deleting old caches for same pageparam @param pageparam: int personid @param page: string html authorpage ''' #TABLE: id, tag, identifier, data, date run_sql("delete from aidCACHE where object_name='authorpage_cache' and object_key=%s", (str(pageparam),)) run_sql("insert into aidCACHE values (Null,'authorpage_cache',%s,%s,now())", (str(pageparam), str(page))) def get_user_log(transactionid='', userinfo='', personID='', action='', tag='', value='', comment='', only_most_recent=False): ''' Get user log table entry matching all the given parameters; all of them are optional. IF no parameters are given retuns the complete log table @param transactionid: id of the transaction @param userinfo: user name or identifier @param personid: id of the person involved @param action: action @param tag: tag @param value: value @param comment: comment ''' sql_query = ('select id,transactionid,timestamp,userinfo,personid,action,tag,value,comment ' + 'from aidUSERINPUTLOG where 1 ') if transactionid: sql_query += ' and transactionid=\'' + str(transactionid) + '\'' if userinfo: sql_query += ' and userinfo=\'' + str(userinfo) + '\'' if personID: sql_query += ' and personid=\'' + str(personID) + '\'' if action: sql_query += ' and action=\'' + str(action) + '\'' if tag: sql_query += ' and tag=\'' + str(tag) + '\'' if value: sql_query += ' and value=\'' + str(value) + '\'' if comment: sql_query += ' and comment=\'' + str(comment) + '\'' if only_most_recent: sql_query += ' order by timestamp desc limit 0,1' return run_sql(sql_query) def list_2_SQL_str(items, f=lambda x: x): """ Concatenates all items in items to a sql string using f. @param items: a set of items @param type items: X @param f: a function which transforms each item from items to string @param type f: X:->str @return: "(x1, x2, x3, ... xn)" for xi in items @return type: string """ strs = (str(f(x)) for x in items) return "(%s)" % ", ".join(strs) def get_authors_from_paper(paper): ''' selects all author bibrefs by a given papers ''' fullbibrefs100 = run_sql("select id_bibxxx from bibrec_bib10x where id_bibrec=%s", (paper,)) if len(fullbibrefs100) > 0: fullbibrefs100str = list_2_SQL_str(fullbibrefs100, lambda x: str(x[0])) return run_sql("select id from bib10x where tag='100__a' and id in %s" % (fullbibrefs100str,)) return tuple() def get_coauthors_from_paper(paper): ''' selects all coauthor bibrefs by a given papers ''' fullbibrefs700 = run_sql("select id_bibxxx from bibrec_bib70x where id_bibrec=%s", (paper,)) if len(fullbibrefs700) > 0: fullbibrefs700str = list_2_SQL_str(fullbibrefs700, lambda x: str(x[0])) return run_sql("select id from bib70x where tag='700__a' and id in %s" % (fullbibrefs700str,)) return tuple() def get_bibrefrec_subset(table, papers, refs): table = "bibrec_bib%sx" % str(table)[:-1] contents = run_sql("select id_bibrec, id_bibxxx from %s" % table) papers = set(papers) refs = set(refs) # yes, there are duplicates and we must set them return set(ifilter(lambda x: x[0] in papers and x[1] in refs, contents)) def get_deleted_papers(): return run_sql("select o.id_bibrec from bibrec_bib98x o, " "(select i.id as iid from bib98x i " "where value = 'DELETED' " "and tag like '980__a') as dummy " "where o.id_bibxxx = dummy.iid") #bibauthorid_maintenance personid update private methods def update_personID_canonical_names(persons_list=None, overwrite=False, suggested=''): ''' Updates the personID table creating or updating canonical names for persons @param: persons_list: persons to consider for the update (('1'),) @param: overwrite: if to touch already existing canonical names @param: suggested: string to suggest a canonical name for the person ''' if not persons_list: persons_list = [x[0] for x in run_sql('select distinct personid from aidPERSONIDPAPERS')] for idx, pid in enumerate(persons_list): update_status(float(idx) / float(len(persons_list)), "Updating canonical_names...") current_canonical = run_sql("select data from aidPERSONIDDATA where " "personid=%s and tag=%s", (pid, 'canonical_name')) if overwrite or len(current_canonical) == 0: names = get_person_names_count(pid) names = sorted(names, key=lambda k: k[1], reverse=True) if len(names) < 1 and not suggested: continue else: if suggested: canonical_name = suggested else: canonical_name = create_canonical_name(names[0][0]) run_sql("delete from aidPERSONIDDATA where personid=%s and tag=%s", (pid, 'canonical_name')) existing_cnames = run_sql("select data from aidPERSONIDDATA " "where tag=%s and data like %s", ('canonical_name', str(canonical_name) + '%')) existing_cnames = set(name[0] for name in existing_cnames) for i in count(1): cur_try = canonical_name + '.' + str(i) if cur_try not in existing_cnames: canonical_name = cur_try break run_sql("insert into aidPERSONIDDATA (personid, tag, data) values (%s,%s,%s) ", (pid, 'canonical_name', canonical_name)) update_status_final("Updating canonical_names finished.") def personid_get_recids_affected_since(last_timestamp): ''' Returns a list of recids which have been manually changed since timestamp @TODO: extend the system to track and signal even automatic updates (unless a full reindex is acceptable in case of magic automatic update) @param: last_timestamp: last update, datetime.datetime ''' vset = set(int(v[0].split(',')[1]) for v in run_sql( "select distinct value from aidUSERINPUTLOG " "where timestamp > %s", (last_timestamp,)) if ',' in v[0] and ':' in v[0]) pids = set(int(p[0]) for p in run_sql( "select distinct personid from aidUSERINPUTLOG " "where timestamp > %s", (last_timestamp,)) if p[0] > 0) if pids: pids_s = list_2_SQL_str(pids) vset |= set(int(b[0]) for b in run_sql( "select bibrec from aidPERSONIDPAPERS " "where personid in %s" % pids_s)) return list(vset) # I'm not sure about this cast. It might work without it. def get_all_paper_records(pid, claimed_only=False): if not claimed_only: return run_sql("SELECT distinct bibrec FROM aidPERSONIDPAPERS WHERE personid = %s", (str(pid),)) else: return run_sql("SELECT distinct bibrec FROM aidPERSONIDPAPERS WHERE " "personid = %s and flag=2 or flag=-2", (str(pid),)) def get_all_names_from_personid(): return ((name[0][0], set(n[1] for n in name), len(name)) for name in (run_sql( "SELECT personid, name " "FROM aidPERSONIDPAPERS " "WHERE personid = %s " "AND flag > -2", p) for p in run_sql( "SELECT DISTINCT personid " "FROM aidPERSONIDPAPERS " "WHERE flag > -2") )) def get_grouped_records(bibrefrec, *args): ''' By a given bibrefrec: mark:ref,rec this function will scan bibmarkx table and extract all records with tag in argc, which are grouped togerther with this bibrec. Returns a dictionary with { tag : [extracted_values] } if the values is not found. @type bibrefrec: (mark(int), ref(int), rec(int)) ''' table, ref, rec = bibrefrec target_table = "bib%sx" % (str(table)[:-1]) mapping_table = "bibrec_%s" % target_table group_id = run_sql("SELECT field_number " "FROM %s " "WHERE id_bibrec = %d " "AND id_bibxxx = %d" % (mapping_table, rec, ref)) if len(group_id) == 0: # unfortunately the mapping is not found, so # we cannot find anything return dict((arg, []) for arg in args) elif len(group_id) == 1: # All is fine field_number = group_id[0][0] else: # sounds bad, but ignore the error field_number = group_id[0][0] grouped = run_sql("SELECT id_bibxxx " "FROM %s " "WHERE id_bibrec = %d " "AND field_number = %d" % (mapping_table, rec, int(field_number))) assert len(grouped) > 0 grouped_s = list_2_SQL_str(grouped, lambda x: str(x[0])) ret = {} for arg in args: qry = run_sql("SELECT value " "FROM %s " "WHERE tag LIKE '%s' " "AND id IN %s" % (target_table, arg, grouped_s)) ret[arg] = [q[0] for q in qry] return ret def get_name_by_bibrecref(bib): ''' @param bib: bibrefrec or bibref @type bib: (mark, bibref, bibrec) OR (mark, bibref) ''' table = "bib%sx" % (str(bib[0])[:-1]) refid = bib[1] tag = "%s__a" % bib[0] ret = run_sql("select value from %s where id = '%s' and tag = '%s'" % (table, refid, tag)) # if zero - check if the garbage collector has run assert len(ret) == 1 return ret[0][0] def get_collaboration(bibrec): bibxxx = run_sql("select id_bibxxx from bibrec_bib71x where id_bibrec = %s", (str(bibrec),)) if len(bibxxx) == 0: return () bibxxx = list_2_SQL_str(bibxxx, lambda x: str(x[0])) ret = run_sql("select value from bib71x where id in %s and tag like '%s'" % (bibxxx, "710__g")) return [r[0] for r in ret] def get_key_words(bibrec): if bconfig.CFG_ADS_SITE: bibxxx = run_sql("select id_bibxxx from bibrec_bib65x where id_bibrec = %s", (str(bibrec),)) else: bibxxx = run_sql("select id_bibxxx from bibrec_bib69x where id_bibrec = %s", (str(bibrec),)) if len(bibxxx) == 0: return () bibxxx = list_2_SQL_str(bibxxx, lambda x: str(x[0])) if bconfig.CFG_ADS_SITE: ret = run_sql("select value from bib69x where id in %s and tag like '%s'" % (bibxxx, "6531_a")) else: ret = run_sql("select value from bib69x where id in %s and tag like '%s'" % (bibxxx, "695__a")) return [r[0] for r in ret] def get_all_authors(bibrec): bibxxx_1 = run_sql("select id_bibxxx from bibrec_bib10x where id_bibrec = %s", (str(bibrec),)) bibxxx_7 = run_sql("select id_bibxxx from bibrec_bib70x where id_bibrec = %s", (str(bibrec),)) if bibxxx_1: bibxxxs_1 = list_2_SQL_str(bibxxx_1, lambda x: str(x[0])) authors_1 = run_sql("select value from bib10x where tag = '%s' and id in %s" % ('100__a', bibxxxs_1,)) else: authors_1 = [] if bibxxx_7: bibxxxs_7 = list_2_SQL_str(bibxxx_7, lambda x: str(x[0])) authors_7 = run_sql("select value from bib70x where tag = '%s' and id in %s" % ('700__a', bibxxxs_7,)) else: authors_7 = [] return [a[0] for a in authors_1] + [a[0] for a in authors_7] def get_title_from_rec(rec): """ Returns the name of the paper like str if found. Otherwise returns None. """ title_id = run_sql("SELECT id_bibxxx " "FROM bibrec_bib24x " "WHERE id_bibrec = %s", (rec,)) if title_id: title_id_s = list_2_SQL_str(title_id, lambda x: x[0]) title = run_sql("SELECT value " "FROM bib24x " "WHERE id in %s " "AND tag = '245__a'" % title_id_s) if title: return title[0][0] def get_bib10x(): return run_sql("select id, value from bib10x where tag like %s", ("100__a",)) def get_bib70x(): return run_sql("select id, value from bib70x where tag like %s", ("700__a",)) class bib_matrix: ''' This small class contains the sparse matrix and encapsulates it. ''' # please increment this value every time you # change the output of the comparison functions current_comparison_version = 9 special_items = ((None, -3., 'N'), ('+', -2., '+'), ('-', -1., '-')) special_symbols = dict((x[0], (x[1], x[2])) for x in special_items) special_numbers = dict((x[1], (x[0], x[2])) for x in special_items) special_strings = dict((x[2], (x[0], x[1])) for x in special_items) def __init__(self, cluster_set=None): if cluster_set: bibs = chain(*(cl.bibs for cl in cluster_set.clusters)) self._bibmap = dict((b[1], b[0]) for b in enumerate(bibs)) width = len(self._bibmap) size = ((width - 1) * width) / 2 self._matrix = bib_matrix.create_empty_matrix(size) else: self._bibmap = dict() @staticmethod def create_empty_matrix(lenght): ret = numpy.ndarray(shape=(lenght, 2), dtype=float, order='C') ret.fill(bib_matrix.special_symbols[None][0]) return ret def _resolve_entry(self, bibs): entry = sorted(self._bibmap[bib] for bib in bibs) assert entry[0] < entry[1] return entry[0] + ((entry[1] - 1) * entry[1]) / 2 def __setitem__(self, bibs, val): entry = self._resolve_entry(bibs) if val in self.special_symbols: num = self.special_symbols[val][0] val = (num, num) self._matrix[entry] = val def __getitem__(self, bibs): entry = self._resolve_entry(bibs) ret = self._matrix[entry] if ret[0] in self.special_numbers: return self.special_numbers[ret[0]][0] return ret[0], ret[1] def __contains__(self, bib): return bib in self._bibmap def get_keys(self): return self._bibmap.keys() @staticmethod def __pickle_tuple(tupy): ''' tupy can be a very special iterable. It may contain: * (float, float) * None * '+', '-' or '?' ''' def to_str(elem): if elem[0] in bib_matrix.special_numbers: return "%s" % bib_matrix.special_numbers[elem[0]][1] return "%.2f:%.2f" % (elem[0], elem[1]) return "|".join(imap(to_str, tupy)) @staticmethod def __unpickle_tuple(tupy): ''' tupy must be an object created by pickle_tuple. ''' def from_str(elem): if elem in bib_matrix.special_strings: nummy = bib_matrix.special_strings[elem][1] return (nummy, nummy) fls = elem.split(":") assert len(fls) == 2 return (float(fls[0]), float(fls[1])) strs = tupy.split("|") if strs == ['']: strs = [] ret = bib_matrix.create_empty_matrix(len(strs)) for i, stri in enumerate(strs): if i % 100000 == 0: update_status(float(i) / len(strs), "Loading the cache...") ret[i][0], ret[i][1] = from_str(stri) update_status_final("Probability matrix loaded.") return ret def load(self, name): ''' This method will load the matrix from the database. ''' row = run_sql("select bibmap, matrix " "from aidPROBCACHE " "where cluster like %s", (name,)) if len(row) == 0: self._bibmap = dict() return False elif len(row) == 1: bibmap_vs = zlib.decompress(row[0][0]) bibmap_v = cPickle.loads(bibmap_vs) rec_v, self.creation_time, self._bibmap = bibmap_v if (rec_v != bib_matrix.current_comparison_version or bib_matrix.current_comparison_version < 0): # you can use negative # version to recalculate self._bibmap = dict() return False matrix_s = zlib.decompress(row[0][1]) self._matrix = bib_matrix.__unpickle_tuple(matrix_s) if self._bibmap and self._matrix != None: if len(self._bibmap) * (len(self._bibmap) - 1) / 2 != len(self._matrix): print >> sys.stderr, ("Error: aidPROBCACHE is corrupted! " "Cluster %s has bibmap with %d bibs, " "but matrix with %d entries." % (name, len(self._bibmap), len(self._matrix))) print >> sys.stderr, "Try to increase max_packet_size." assert False, "Bibmap: %d, Matrix %d" % (len(self._bibmap), len(self._matrix)) return False return True else: self._bibmap = dict() return False else: assert False, "aidPROBCACHE is corrupted" self._bibmap = dict() return False def store(self, name, creation_time): bibmap_v = (bib_matrix.current_comparison_version, creation_time, self._bibmap) bibmap_vs = cPickle.dumps(bibmap_v) bibmap_vsc = zlib.compress(bibmap_vs) matrix_s = bib_matrix.__pickle_tuple(self._matrix) matrix_sc = zlib.compress(matrix_s) run_sql("delete from aidPROBCACHE where cluster like %s", (name,)) run_sql("insert low_priority " "into aidPROBCACHE " "set cluster = %s, " "bibmap = %s, " "matrix = %s", (name, bibmap_vsc, matrix_sc)) def delete_paper_from_personid(rec): ''' Deletes all information in PERSONID about a given paper ''' run_sql("delete from aidPERSONIDPAPERS where bibrec = %s", (rec,)) def get_signatures_from_rec(bibrec): ''' Retrieves all information in PERSONID about a given bibrec. ''' return run_sql("select personid, bibref_table, bibref_value, bibrec, name " "from aidPERSONIDPAPERS where bibrec = %s" , (bibrec,)) def modify_signature(oldref, oldrec, newref, newname): ''' Modifies a signature in aidPERSONIDpapers. ''' return run_sql("UPDATE aidPERSONIDPAPERS " "SET bibref_table = %s, bibref_value = %s, name = %s " "WHERE bibref_table = %s AND bibref_value = %s AND bibrec = %s" , (str(newref[0]), newref[1], newname, str(oldref[0]), oldref[1], oldrec)) def find_pids_by_name(name): ''' Finds names and personids by a prefix name. ''' return set(run_sql("SELECT personid, name " "FROM aidPERSONIDPAPERS " "WHERE name like %s" , (name + ',%',))) def find_pids_by_exact_name(name): """ Finds names and personids by a name. """ return set(run_sql("SELECT personid " "FROM aidPERSONIDPAPERS " "WHERE name = %s" , (name,))) def remove_sigs(signatures): ''' Removes records from aidPERSONIDPAPERS ''' for sig in signatures: run_sql("DELETE FROM aidPERSONIDPAPERS " "WHERE bibref_table like %s AND bibref_value = %s AND bibrec = %s" , (str(sig[0]), sig[1], sig[2])) def remove_personid_papers(pids): ''' Removes all signatures from aidPERSONIDPAPERS with pid in pids ''' if pids: run_sql("delete from aidPERSONIDPAPERS where personid in %s" % list_2_SQL_str(pids)) def get_full_personid_papers(table_name="`aidPERSONIDPAPERS`"): ''' Get all columns and rows from aidPERSONIDPAPERS or any other table with the same structure. ''' return run_sql("select personid, bibref_table, " "bibref_value, bibrec, name, flag, " "lcul from %s" % table_name) def get_full_results(): ''' Depricated. Should be removed soon. ''' return run_sql("select personid, bibref_table, bibref_value, bibrec " "from aidRESULTS") def get_lastname_results(last_name): ''' Returns rows from aidRESULTS which share a common last name. ''' return run_sql("select personid, bibref_table, bibref_value, bibrec " "from aidRESULTS " "where personid like '" + last_name + ".%'") def get_full_personid_data(table_name="`aidPERSONIDDATA`"): ''' Get all columns and rows from aidPERSONIDDATA or any other table with the same structure. ''' return run_sql("select personid, tag, data, " "opt1, opt2, opt3 from %s" % table_name) def get_wrong_names(): ''' Returns a generator with all wrong names in aidPERSONIDPAPERS. Every element is (table, ref, correct_name). ''' bib100 = dict(((x[0], create_normalized_name(split_name_parts(x[1]))) for x in get_bib10x())) bib700 = dict(((x[0], create_normalized_name(split_name_parts(x[1]))) for x in get_bib70x())) pidnames100 = run_sql("select distinct bibref_value, name from aidPERSONIDPAPERS " " where bibref_table='100'") pidnames700 = run_sql("select distinct bibref_value, name from aidPERSONIDPAPERS " " where bibref_table='700'") wrong100 = set(('100', x[0], bib100.get(x[0], None)) for x in pidnames100 if x[1] != bib100.get(x[0], None)) wrong700 = set(('700', x[0], bib700.get(x[0], None)) for x in pidnames700 if x[1] != bib700.get(x[0], None)) total = len(wrong100) + len(wrong700) return chain(wrong100, wrong700), total def check_personid_papers(output_file=None): ''' Checks all invariants of personid. Writes in stdout if output_file if False. ''' if output_file: fp = open(output_file, "w") printer = lambda x: fp.write(x + '\n') else: printer = bibauthor_print checkers = (check_duplicated_papers, check_duplicated_signatures, check_wrong_names, check_canonical_names, check_empty_personids, check_wrong_rejection, # check_claim_ispireid_contradiction, ) # Avoid writing f(a) or g(a), because one of the calls # might be optimized. return all([check(printer) for check in checkers]) def check_duplicated_papers(printer): ret = True pids = run_sql("select distinct personid from aidPERSONIDPAPERS") for pid in pids: pid = pid[0] recs = run_sql("select bibrec from aidPERSONIDPAPERS where personid = %s and flag <> %s", (pid, -2)) recs = [rec[0] for rec in recs] for rec in set(recs): recs.remove(rec) if recs: ret = False printer("Person %d has duplicated papers: %s" % (pid, str(tuple(set(recs))))) return ret def check_duplicated_signatures(printer): ret = True recs = run_sql("select distinct bibrec from aidPERSONIDPAPERS") for rec in recs: rec = rec[0] refs = list(run_sql("select bibref_table, bibref_value from aidPERSONIDPAPERS where bibrec = %s and flag > %s", (rec, "-2"))) for ref in set(refs): refs.remove(ref) if refs: ret = False refs = sorted(refs) refs = groupby(refs) refs = ["Found %s:%s %d times." % (key[0], key[1], len(list(data)) + 1) for key, data in refs] printer("Paper %d has duplicated signatures:" % rec) for ref in refs: printer("\t%s" % ref) return ret def check_wrong_names(printer): ret = True wrong_names, number = get_wrong_names() if number > 0: ret = False printer("%d corrupted names in aidPERSONIDPAPERS." % number) for wrong_name in wrong_names: if wrong_name[2]: printer("Outdated name, '%s'(%s:%d)." % (wrong_name[2], wrong_name[0], wrong_name[1])) else: printer("Invalid id(%s:%d)." % (wrong_name[0], wrong_name[1])) return ret def check_canonical_names(printer): ret = True pid_cn = run_sql("select personid, data from aidPERSONIDDATA where tag = %s", ('canonical_name',)) pid_2_cn = dict((k, len(list(d))) for k, d in groupby(sorted(pid_cn, key=itemgetter(0)), key=itemgetter(0))) for pid in get_existing_personids(): canon = pid_2_cn.get(pid, 0) if canon != 1: if canon == 0: papers = run_sql("select count(*) from aidPERSONIDPAPERS where personid = %s", (pid,))[0][0] if papers != 0: printer("Personid %d does not have a canonical name, but have %d papers." % (pid, papers)) ret = False else: printer("Personid %d has %d canonical names.", (pid, canon)) ret = False return ret def check_empty_personids(printer): ret = True paper_pids = set(p[0] for p in run_sql("select personid from aidPERSONIDPAPERS")) data_pids = set(p[0] for p in run_sql("select personid from aidPERSONIDDATA")) for p in data_pids - paper_pids: fields = run_sql("select count(*) from aidPERSONIDDATA where personid = %s and tag <> %s", (p, "canonical_name",))[0][0] if fields == 0: printer("Personid %d has no papers and nothing else than canonical_name." % p) ret = False return ret def check_wrong_rejection(printer): ret = True all_rejections = run_sql("select personid, bibref_table, bibref_value, bibrec " "from aidPERSONIDPAPERS " "where flag = %s", ('-2',)) for rej in all_rejections: sigs = run_sql("select personid from aidPERSONIDPAPERS " "where bibref_table = %s " "and bibref_value = %s " "and bibrec = %s " "and flag <> '-2'", rej[1:]) # To avoid duplication of error messages don't complain # if the papers is assigned to more than one personids. if not sigs: printer("The paper (%s:%s,%s) was rejected from person %d, but never assigned or claimed." % (rej[1:] + rej[:1])) ret = False elif rej[1] in sigs: printer("Personid %d has both assigned and rejected paper (%s:%s,%s)." % rej) ret = False return ret def check_merger(): ''' This function presumes that copy_personid was called before the merger. ''' is_ok = True old_claims = set(run_sql("select personid, bibref_table, bibref_value, bibrec, flag " "from aidPERSONIDPAPERS_copy " "where flag = -2 or flag = 2")) cur_claims = set(run_sql("select personid, bibref_table, bibref_value, bibrec, flag " "from aidPERSONIDPAPERS " "where flag = -2 or flag = 2")) errors = ((old_claims - cur_claims, "Some claims were lost during the merge."), (cur_claims - old_claims, "Some new claims appeared after the merge.")) act = { -2 : 'Rejection', 2 : 'Claim' } for err_set, err_msg in errors: if err_set: is_ok = False bibauthor_print(err_msg) bibauthor_print("".join(" %s: personid %d %d:%d,%d\n" % (act[cl[6]], cl[0], int(cl[1]), cl[2], cl[3]) for cl in err_set)) old_assigned = set(run_sql("select bibref_table, bibref_value, bibrec " "from aidPERSONIDPAPERS_copy")) #"where flag <> -2 and flag <> 2")) cur_assigned = set(run_sql("select bibref_table, bibref_value, bibrec " "from aidPERSONIDPAPERS")) #"where flag <> -2 and flag <> 2")) errors = ((old_assigned - cur_assigned, "Some signatures were lost during the merge."), (cur_assigned - old_assigned, "Some new signatures appeared after the merge.")) for err_sig, err_msg in errors: if err_sig: is_ok = False bibauthor_print(err_msg) bibauthor_print("".join(" %s:%d,%d\n" % sig for sig in err_sig)) return is_ok def check_results(): is_ok = True all_result_rows = run_sql("select * from aidRESULTS") keyfunc = lambda x: x[1:] duplicated = (d for d in (list(d) for k, d in groupby(sorted(all_result_rows, key=keyfunc), key=keyfunc)) if len(d) > 1) for dd in duplicated: is_ok = False for d in dd: print "%s %s %s %s" % d print clusters = {} for rr in all_result_rows: clusters[rr[0]] = clusters.get(rr[0], []) + [rr[3]] faulty_clusters = dict((cid, len(recs) - len(set(recs))) for cid, recs in clusters.items() if not len(recs) == len(set(recs))) if faulty_clusters: is_ok = False print "Recids NOT unique in clusters!" print ("A total of %s clusters hold an average of %.2f duplicates" % (len(faulty_clusters), (sum(faulty_clusters.values()) / float(len(faulty_clusters))))) for c in faulty_clusters: print "Name: %-20s Size: %4d Faulty: %2d" % (c, len(clusters[c]), faulty_clusters[c]) return is_ok def check_claim_inspireid_contradiction(): iids10x = run_sql("select id from bib10x where tag = '100__i'") iids70x = run_sql("select id from bib70x where tag = '700__i'") refs10x = set(x[0] for x in run_sql("select id from bib10x where tag = '100__a'")) refs70x = set(x[0] for x in run_sql("select id from bib70x where tag = '700__a'")) if iids10x: iids10x = list_2_SQL_str(iids10x, lambda x: str(x[0])) iids10x = run_sql("select id_bibxxx, id_bibrec, field_number " "from bibrec_bib10x " "where id_bibxxx in %s" % iids10x) iids10x = ((row[0], [(ref, rec) for ref, rec in run_sql( "select id_bibxxx, id_bibrec " "from bibrec_bib10x " "where id_bibrec = '%s' " "and field_number = '%s'" % row[1:]) if ref in refs10x]) for row in iids10x) else: iids10x = () if iids70x: iids70x = list_2_SQL_str(iids70x, lambda x: str(x[0])) iids70x = run_sql("select id_bibxxx, id_bibrec, field_number " "from bibrec_bib70x " "where id_bibxxx in %s" % iids70x) iids70x = ((row[0], [(ref, rec) for ref, rec in run_sql( "select id_bibxxx, id_bibrec " "from bibrec_bib70x " "where id_bibrec = '%s' " "and field_number = '%s'" % (row[1:])) if ref in refs70x]) for row in iids70x) else: iids70x = () # [(iids, [bibs])] inspired = list(chain(((iid, list(set(('100', ) + bib for bib in bibs))) for iid, bibs in iids10x), ((iid, list(set(('700', ) + bib for bib in bibs))) for iid, bibs in iids70x))) assert all(len(x[1]) == 1 for x in inspired) inspired = ((k, map(itemgetter(0), map(itemgetter(1), d))) for k, d in groupby(sorted(inspired, key=itemgetter(0)), key=itemgetter(0))) # [(inspireid, [bibs])] inspired = [([(run_sql("select personid " "from aidPERSONIDPAPERS " "where bibref_table = %s " "and bibref_value = %s " "and bibrec = %s " "and flag = '2'" , bib), bib) for bib in cluster[1]], cluster[0]) for cluster in inspired] # [([([pid], bibs)], inspireid)] for cluster, iid in inspired: pids = set(chain.from_iterable(imap(itemgetter(0), cluster))) if len(pids) > 1: print "InspireID: %s links the following papers:" % iid print map(itemgetter(1), cluster) print "More than one personid claimed them:" print list(pids) print continue if len(pids) == 0: # not even one paper with this inspireid has been # claimed, screw it continue pid = list(pids)[0][0] # The last step is to check all non-claimed papers for being # claimed by the person on some different signature. problem = (run_sql("select bibref_table, bibref_value, bibrec " "from aidPERSONIDPAPERS " "where bibrec = %s " "and personid = %s " "and flag = %s" , (bib[2], pid, 2)) for bib in (bib for lpid, bib in cluster if not lpid)) problem = list(chain.from_iterable(problem)) if problem: print "A personid has claimed a paper from an inspireid cluster and a contradictory paper." print "Personid %d" % pid print "Inspireid cluster %s" % str(map(itemgetter(1), cluster)) print "Contradicting claims: %s" % str(problem) print def repair_personid(): ''' This should make check_personid_papers() to return true. ''' pids = run_sql("select distinct personid from aidPERSONIDPAPERS") lpids = len(pids) for i, pid in enumerate((p[0] for p in pids)): update_status(float(i) / lpids, "Checking per-pid...") rows = run_sql("select bibrec, bibref_table, bibref_value, flag " "from aidPERSONIDPAPERS where personid = %s", (pid,)) rows = ((k, list(d)) for k, d in groupby(sorted(rows, key=itemgetter(0)), itemgetter(0))) for rec, sigs in rows: if len(sigs) > 1: claimed = [sig for sig in sigs if sig[3] > 1] rejected = [sig for sig in sigs if sig[3] < -1] if len(claimed) == 1: sigs.remove(claimed[0]) elif len(claimed) == 0 and len(rejected) == 1: sigs.remove(rejected[0]) for sig in set(sigs): run_sql("delete from aidPERSONIDPAPERS " "where personid = %s " "and bibrec = %s " "and bibref_table = %s " "and bibref_value = %s " "and flag = %s" , (pid, sig[0], sig[1], sig[2], sig[3])) update_status_final("Done with per-pid fixing.") recs = run_sql("select distinct bibrec from aidPERSONIDPAPERS") lrecs = len(recs) for i, rec in enumerate((r[0] for r in recs)): update_status(float(i) / lrecs, "Checking per-rec...") rows = run_sql("select bibref_table, bibref_value, flag from aidPERSONIDPAPERS " "where bibrec = %s", (rec,)) kfuc = itemgetter(slice(0, 2)) rows = ((k, map(itemgetter(2), d)) for k, d in groupby(sorted(rows), kfuc)) for bibref, flags in rows: if len(flags) > 1: claimed = sum(1 for f in flags if f > 1) rejected = sum(1 for f in flags if f < -1) if claimed == 1: run_sql("delete from aidPERSONIDPAPERS " "where bibrec = %s " "and bibref_table = %s " "and bibref_value = %s " "and flag <> %s" , (rec, bibref[0], bibref[1], 2)) elif claimed == 0 and rejected == 1: run_sql("delete from aidPERSONIDPAPERS " "where bibrec = %s " "and bibref_table = %s " "and bibref_value = %s " "and flag <> %s" , (rec, bibref[0], bibref[1], -2)) else: run_sql("delete from aidPERSONIDPAPERS " "where bibrec = %s " "and bibref_table = %s " "and bibref_value = %s" , (rec, bibref[0], bibref[1])) update_status_final("Done with per-rec fixing.") update_status(0 / 1, "Fixing wrong names...") wrong_names, number = get_wrong_names() for i, w in enumerate(wrong_names): update_status(i / number, "Fixing wrong names...") if w[2]: run_sql("update aidPERSONIDPAPERS set name=%s where bibref_table=%s and bibref_value=%s", (w[2], w[0], w[1])) else: run_sql("delete from aidPERSONIDPAPERS where bibref_table=%s and bibref_value=%s", (w[0], w[1])) no_rejs = frozenset(run_sql("select bibref_table, bibref_value, bibrec from aidPERSONIDPAPERS where flag <> -2")) rejs = frozenset(run_sql("select bibref_table, bibref_value, bibrec from aidPERSONIDPAPERS where flag = -2")) floating_rejs = rejs - no_rejs update_personID_canonical_names(map(new_person_from_signature, floating_rejs)) update_status_final("Fixed all wrong names.") update_status(0, "Checking missing canonical names...") paper_pids = run_sql("select distinct personid from aidPERSONIDPAPERS") cname_pids = run_sql("select distinct personid from aidPERSONIDDATA where tag='canonical_name'") missing_cnames = list(set(p[0] for p in paper_pids) - set(p[0] for p in cname_pids)) npids = len(missing_cnames) for pid in missing_cnames: update_status(missing_cnames.index(pid) / float(npids), "Creating missing canonical names...") update_personID_canonical_names([pid]) update_status_final("Done restoring canonical names.") def get_all_bibrecs(): return [x[0] for x in run_sql("select distinct bibrec from aidPERSONIDPAPERS")] def remove_all_bibrecs(bibrecs): bibrecs_s = list_2_SQL_str(bibrecs) run_sql("delete from aidPERSONIDPAPERS where bibrec in %s" % bibrecs_s) def empty_results_table(): run_sql("TRUNCATE aidRESULTS") def save_cluster(named_cluster): name, cluster = named_cluster for bib in cluster.bibs: run_sql("INSERT INTO aidRESULTS " "(personid, bibref_table, bibref_value, bibrec) " "VALUES (%s, %s, %s, %s) " , (name, str(bib[0]), bib[1], bib[2])) def remove_result_cluster(name): run_sql("DELETE FROM aidRESULTS " "WHERE personid like '%s%%'" % name) def personid_name_from_signature(sig): ret = run_sql("select personid, name " "from aidPERSONIDPAPERS " "where bibref_table = %s and bibref_value = %s and bibrec = %s " "and flag > '-2'" , sig) assert len(ret) < 2, ret return ret def personid_from_signature(sig): ret = run_sql("select personid, flag " "from aidPERSONIDPAPERS " "where bibref_table = %s and bibref_value = %s and bibrec = %s " "and flag > '-2'" , sig) assert len(ret) < 2, ret return ret def in_results(name): return run_sql("select count(*) " "from aidRESULTS " "where personid like %s" , (name + '.0',))[0][0] > 0 def get_signature_info(sig): ret = run_sql("select personid, flag " "from aidPERSONIDPAPERS " "where bibref_table = %s and bibref_value = %s and bibrec = %s " "order by flag" , sig) return ret def get_claimed_papers(pid): return run_sql("select bibref_table, bibref_value, bibrec " "from aidPERSONIDPAPERS " "where personid = %s " "and flag > %s", (pid, 1)) def copy_personids(): run_sql("DROP TABLE IF EXISTS `aidPERSONIDDATA_copy`") run_sql("CREATE TABLE `aidPERSONIDDATA_copy` ( " "`personid` BIGINT( 16 ) UNSIGNED NOT NULL , " "`tag` VARCHAR( 64 ) NOT NULL , " "`data` VARCHAR( 256 ) NOT NULL , " "`opt1` MEDIUMINT( 8 ) DEFAULT NULL , " "`opt2` MEDIUMINT( 8 ) DEFAULT NULL , " "`opt3` VARCHAR( 256 ) DEFAULT NULL , " "KEY `personid-b` ( `personid` ) , " "KEY `tag-b` ( `tag` ) , " "KEY `data-b` ( `data` ) , " "KEY `opt1` ( `opt1` ) " ") ENGINE = MYISAM DEFAULT CHARSET = utf8") run_sql("INSERT INTO `aidPERSONIDDATA_copy` " "SELECT * " "FROM `aidPERSONIDDATA`") run_sql("DROP TABLE IF EXISTS `aidPERSONIDPAPERS_copy`") run_sql("CREATE TABLE `aidPERSONIDPAPERS_copy` ( " "`personid` bigint( 16 ) unsigned NOT NULL , " "`bibref_table` enum( '100', '700' ) NOT NULL , " "`bibref_value` mediumint( 8 ) unsigned NOT NULL , " "`bibrec` mediumint( 8 ) unsigned NOT NULL , " "`name` varchar( 256 ) NOT NULL , " "`flag` smallint( 2 ) NOT NULL DEFAULT '0', " "`lcul` smallint( 2 ) NOT NULL DEFAULT '0', " "`last_updated` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP , " "KEY `personid-b` ( `personid` ) , " "KEY `reftable-b` ( `bibref_table` ) , " "KEY `refvalue-b` ( `bibref_value` ) , " "KEY `rec-b` ( `bibrec` ) , " "KEY `name-b` ( `name` ) , " "KEY `timestamp-b` ( `last_updated` ) , " "KEY `ptvrf-b` ( `personid` , `bibref_table` , `bibref_value` , `bibrec` , `flag` ) " ") ENGINE = MyISAM DEFAULT CHARSET = utf8") run_sql("INSERT INTO `aidPERSONIDPAPERS_copy` " "SELECT * " "FROM `aidPERSONIDPAPERS") def delete_empty_persons(): pp = run_sql("select personid from aidPERSONIDPAPERS") pp = set(p[0] for p in pp) pd = run_sql("select personid from aidPERSONIDDATA") pd = set(p[0] for p in pd) fpd = run_sql("select personid from aidPERSONIDDATA where tag <> 'canonical_name'") fpd = set(p[0] for p in fpd) to_delete = pd - (pp | fpd) if to_delete: run_sql("delete from aidPERSONIDDATA where personid in %s" % list_2_SQL_str(to_delete)) def restore_personids(): run_sql("TRUNCATE `aidPERSONIDDATA`") run_sql("INSERT INTO `aidPERSONIDDATA` " "SELECT * " "FROM `aidPERSONIDDATA_copy`") run_sql("TRUNCATE `aidPERSONIDPAPERS`") run_sql("INSERT INTO `aidPERSONIDPAPERS` " "SELECT * " "FROM `aidPERSONIDPAPERS_copy") def get_possible_personids_from_paperlist_old(bibrecreflist): ''' @param bibrecreflist: list of bibrecref couples, (('100:123,123',),) or bibrecs (('123',),) returns a list of pids and connected bibrefs in order of number of bibrefs per pid [ [['1'],['123:123.123','123:123.123']] , [['2'],['123:123.123']] ] ''' pid_bibrecref_dict = {} for b in bibrecreflist: pids = [] try: pids = run_sql("select personid from aidPERSONID " "use index (`tdf-b`) where tag=%s and data=%s", ('paper', str(b[0]))) except (OperationalError, ProgrammingError): pids = run_sql("select personid from aidPERSONID " "where tag=%s and data=%s", ('paper', str(b[0]))) for pid in pids: if pid[0] in pid_bibrecref_dict: pid_bibrecref_dict[pid[0]].append(str(b[0])) else: pid_bibrecref_dict[pid[0]] = [str(b[0])] pid_list = [[i, pid_bibrecref_dict[i]] for i in pid_bibrecref_dict] return sorted(pid_list, key=lambda k: len(k[2]), reverse=True) def resolve_affiliation(ambiguous_aff_string): """ This is a method available in the context of author disambiguation in ADS only. No other platform provides the db table used by this function. @warning: to be used in an ADS context only. @param ambiguous_aff_string: Ambiguous affiliation string @type ambiguous_aff_string: str @return: The normalized version of the name string as presented in the database @rtype: str """ if not ambiguous_aff_string or not bconfig.CFG_ADS_SITE: return "None" aff_id = run_sql("select aff_id from ads_affiliations where affstring=%s", (ambiguous_aff_string,)) if aff_id: return aff_id[0][0] else: return "None"
codeparrot/github-code-clean